[Backend] Add OCR、Seg、 KeypointDetection、Matting、 ernie-3.0 and adaface models for XPU Deploy (#960)

* [FlyCV] Bump up FlyCV -> official release 1.0.0

* add seg models for XPU

* add ocr model for XPU

* add matting

* add matting python

* fix infer.cc

* add keypointdetection support for XPU

* Add adaface support for XPU

* add ernie-3.0

* fix doc

Co-authored-by: DefTruth <qiustudent_r@163.com>
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
yeliang2258
2022-12-26 15:02:58 +08:00
committed by GitHub
parent 3b29e86add
commit 7b15f72516
39 changed files with 304 additions and 25 deletions

41
examples/vision/faceid/adaface/cpp/infer.cc Normal file → Executable file
View File

@@ -47,6 +47,43 @@ void CpuInfer(const std::string &model_file, const std::string &params_file,
<< ", Cosine 02:" << cosine02 << std::endl;
}
void XpuInfer(const std::string &model_file, const std::string &params_file,
const std::vector<std::string> &image_file) {
auto option = fastdeploy::RuntimeOption();
option.UseXpu();
auto model = fastdeploy::vision::faceid::AdaFace(model_file, params_file);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
cv::Mat face0 = cv::imread(image_file[0]);
cv::Mat face1 = cv::imread(image_file[1]);
cv::Mat face2 = cv::imread(image_file[2]);
fastdeploy::vision::FaceRecognitionResult res0;
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
(!model.Predict(&face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
std::cout << "Prediction Done!" << std::endl;
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res2.embedding, model.l2_normalize);
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
void GpuInfer(const std::string &model_file, const std::string &params_file,
const std::vector<std::string> &image_file) {
auto option = fastdeploy::RuntimeOption();
@@ -134,7 +171,7 @@ int main(int argc, char *argv[]) {
"test_lite_focal_AdaFace_2.JPG 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend."
"with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
<< std::endl;
return -1;
}
@@ -147,6 +184,8 @@ int main(int argc, char *argv[]) {
GpuInfer(argv[1], argv[2], image_files);
} else if (std::atoi(argv[6]) == 2) {
TrtInfer(argv[1], argv[2], image_files);
} else if (std::atoi(argv[6]) == 3) {
CpuInfer(argv[1], argv[2], image_files);
}
return 0;
}