mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
[Backend] support ipu in paddle inference backend. (#437)
* feat(ipu): add ipu support for paddle_infer backend. * fix(): remove unused env. * fix(ipu): simplify user API for IPU. * fix(cmake): fix merge conflict error in CMakeList. Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
@@ -70,6 +70,32 @@ void GpuInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
std::cout << res.Str() << std::endl;
|
||||
}
|
||||
|
||||
void IpuInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
auto model_file = model_dir + sep + "inference.pdmodel";
|
||||
auto params_file = model_dir + sep + "inference.pdiparams";
|
||||
auto config_file = model_dir + sep + "inference_cls.yaml";
|
||||
|
||||
auto option = fastdeploy::RuntimeOption();
|
||||
option.UseIpu();
|
||||
auto model = fastdeploy::vision::classification::PaddleClasModel(
|
||||
model_file, params_file, config_file, option);
|
||||
if (!model.Initialized()) {
|
||||
std::cerr << "Failed to initialize." << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
auto im = cv::imread(image_file);
|
||||
|
||||
fastdeploy::vision::ClassifyResult res;
|
||||
if (!model.Predict(&im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
// print res
|
||||
std::cout << res.Str() << std::endl;
|
||||
}
|
||||
|
||||
void TrtInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
auto model_file = model_dir + sep + "inference.pdmodel";
|
||||
auto params_file = model_dir + sep + "inference.pdiparams";
|
||||
@@ -113,6 +139,8 @@ int main(int argc, char* argv[]) {
|
||||
GpuInfer(argv[1], argv[2]);
|
||||
} else if (std::atoi(argv[3]) == 2) {
|
||||
TrtInfer(argv[1], argv[2]);
|
||||
} else if (std::atoi(argv[3]) == 3) {
|
||||
IpuInfer(argv[1], argv[2]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user