mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-26 10:00:33 +08:00
[Backend] Support onnxruntime DirectML inference. (#1304)
* Fix links in readme * Fix links in readme * Update PPOCRv2/v3 examples * Update auto compression configs * Add neww quantization support for paddleclas model * Update quantized Yolov6s model download link * Improve PPOCR comments * Add English doc for quantization * Fix PPOCR rec model bug * Add new paddleseg quantization support * Add new paddleseg quantization support * Add new paddleseg quantization support * Add new paddleseg quantization support * Add Ascend model list * Add ascend model list * Add ascend model list * Add ascend model list * Add ascend model list * Add ascend model list * Add ascend model list * Support DirectML in onnxruntime * Support onnxruntime DirectML * Support onnxruntime DirectML * Support onnxruntime DirectML * Support OnnxRuntime DirectML * Support OnnxRuntime DirectML * Support OnnxRuntime DirectML * Support OnnxRuntime DirectML * Support OnnxRuntime DirectML * Support OnnxRuntime DirectML * Support OnnxRuntime DirectML * Support OnnxRuntime DirectML * Remove DirectML vision model example * Imporve OnnxRuntime DirectML * Imporve OnnxRuntime DirectML * fix opencv cmake in Windows * recheck codestyle
This commit is contained in:
@@ -96,7 +96,8 @@ void IpuInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
std::cout << res.Str() << std::endl;
|
||||
}
|
||||
|
||||
void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
void KunlunXinInfer(const std::string& model_dir,
|
||||
const std::string& image_file) {
|
||||
auto model_file = model_dir + sep + "inference.pdmodel";
|
||||
auto params_file = model_dir + sep + "inference.pdiparams";
|
||||
auto config_file = model_dir + sep + "inference_cls.yaml";
|
||||
@@ -152,7 +153,7 @@ void AscendInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
auto model_file = model_dir + sep + "inference.pdmodel";
|
||||
auto params_file = model_dir + sep + "inference.pdiparams";
|
||||
auto config_file = model_dir + sep + "inference_cls.yaml";
|
||||
|
||||
|
||||
auto option = fastdeploy::RuntimeOption();
|
||||
option.UseAscend();
|
||||
|
||||
@@ -172,14 +173,14 @@ void AscendInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
std::cout << res.Str() << std::endl;
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
if (argc < 4) {
|
||||
std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
|
||||
"e.g ./infer_demo ./ResNet50_vd ./test.jpeg 0"
|
||||
<< std::endl;
|
||||
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
|
||||
"with gpu; 2: run with gpu and use tensorrt backend; 3: run with ipu; 4: run with kunlunxin."
|
||||
"with gpu; 2: run with gpu and use tensorrt backend; 3: run "
|
||||
"with ipu; 4: run with kunlunxin."
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user