[Model] Support Paddle3D PETR v2 model (#1863)

* Support PETR v2

* make petrv2 precision equal with the origin repo

* delete extra func

* modify review problem

* delete visualize

* Update README_CN.md

* Update README.md

* Update README_CN.md

* fix build problem

* delete external variable and function

---------

Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
CoolCola
2023-05-19 10:45:36 +08:00
committed by GitHub
parent c8ff8b63e8
commit e3b285c762
20 changed files with 1181 additions and 0 deletions

View File

@@ -0,0 +1,45 @@
import fastdeploy as fd
import cv2
import os
from fastdeploy import ModelFormat
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="Path of petr paddle model.")
parser.add_argument(
"--image", required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu(0)
if args.device.lower() == "cpu":
option.use_cpu()
return option
args = parse_arguments()
model_file = os.path.join(args.model, "petrv2_inference.pdmodel")
params_file = os.path.join(args.model, "petrv2_inference.pdiparams")
config_file = os.path.join(args.model, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.perception.Petr(
model_file, params_file, config_file, runtime_option=runtime_option)
# 预测图片检测结果
im = cv2.imread(args.image)
result = model.predict(im)
print(result)