mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-09-26 20:41:53 +08:00
91 lines
2.9 KiB
Python
91 lines
2.9 KiB
Python
"""
|
|
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""
|
|
|
|
# This file is modified from https://github.com/vllm-project/vllm/blob/main/benchmarks/benchmark_utils.py
|
|
|
|
|
|
import argparse
|
|
import json
|
|
import math
|
|
import os
|
|
from typing import Any
|
|
|
|
|
|
def convert_to_pytorch_benchmark_format(args: argparse.Namespace,
|
|
metrics: dict[str, list],
|
|
extra_info: dict[str, Any]) -> list:
|
|
"""
|
|
Save the benchmark results in the format used by PyTorch OSS benchmark with
|
|
on metric per record
|
|
https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
|
|
"""
|
|
records = []
|
|
if not os.environ.get("SAVE_TO_PYTORCH_BENCHMARK_FORMAT", False):
|
|
return records
|
|
|
|
for name, benchmark_values in metrics.items():
|
|
record = {
|
|
"benchmark": {
|
|
"name": "vLLM benchmark",
|
|
"extra_info": {
|
|
"args": vars(args),
|
|
},
|
|
},
|
|
"model": {
|
|
"name": args.model,
|
|
},
|
|
"metric": {
|
|
"name": name,
|
|
"benchmark_values": benchmark_values,
|
|
"extra_info": extra_info,
|
|
},
|
|
}
|
|
|
|
tp = record["benchmark"]["extra_info"]["args"].get(
|
|
"tensor_parallel_size")
|
|
# Save tensor_parallel_size parameter if it's part of the metadata
|
|
if not tp and "tensor_parallel_size" in extra_info:
|
|
record["benchmark"]["extra_info"]["args"][
|
|
"tensor_parallel_size"] = extra_info["tensor_parallel_size"]
|
|
|
|
records.append(record)
|
|
|
|
return records
|
|
|
|
|
|
class InfEncoder(json.JSONEncoder):
|
|
"""InfEncoder"""
|
|
def clear_inf(self, o: Any):
|
|
"""clear_inf"""
|
|
if isinstance(o, dict):
|
|
return {k: self.clear_inf(v) for k, v in o.items()}
|
|
elif isinstance(o, list):
|
|
return [self.clear_inf(v) for v in o]
|
|
elif isinstance(o, float) and math.isinf(o):
|
|
return "inf"
|
|
return o
|
|
|
|
def iterencode(self, o: Any, *args, **kwargs) -> Any:
|
|
"""iterencode"""
|
|
return super().iterencode(self.clear_inf(o), *args, **kwargs)
|
|
|
|
|
|
def write_to_json(filename: str, records: list) -> None:
|
|
"""write_to_json"""
|
|
with open(filename, "w") as f:
|
|
json.dump(records, f, cls=InfEncoder)
|
|
|