Files
FastDeploy/examples/vision/classification/paddleclas/c/infer.c
chenjian 859896cd2c [Other] add code and docs for ppclas examples (#1312)
* add code and docs for ppclas examples

* fix doc

* add code for printing results

* add ppcls demo and docs

* modify example according to refined c api

* modify example code and docs for ppcls and ppdet

* modify example code and docs for ppcls and ppdet

* update ppdet demo

* fix demo codes

* fix doc

* release resource when failed

* fix

* fix name

* fix name
2023-02-17 15:43:21 +08:00

157 lines
5.3 KiB
C

// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <stdlib.h>
#include "fastdeploy_capi/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void CpuInfer(const char* model_dir, const char* image_file) {
char model_file[100];
char params_file[100];
char config_file[100];
int max_size = 99;
snprintf(model_file, max_size, "%s%c%s", model_dir, sep, "inference.pdmodel");
snprintf(params_file, max_size, "%s%c%s", model_dir, sep,
"inference.pdiparams");
snprintf(config_file, max_size, "%s%c%s", model_dir, sep,
"inference_cls.yaml");
FD_C_RuntimeOptionWrapper* option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapperUseCpu(option);
FD_C_PaddleClasModelWrapper* model = FD_C_CreatePaddleClasModelWrapper(
model_file, params_file, config_file, option, PADDLE);
if (!FD_C_PaddleClasModelWrapperInitialized(model)) {
printf("Failed to initialize.\n");
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPaddleClasModelWrapper(model);
return;
}
FD_C_Mat im = FD_C_Imread(image_file);
FD_C_ClassifyResult* result =
(FD_C_ClassifyResult*)malloc(sizeof(FD_C_ClassifyResult));
if (!FD_C_PaddleClasModelWrapperPredict(model, im, result)) {
printf("Failed to predict.\n");
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPaddleClasModelWrapper(model);
FD_C_DestroyMat(im);
free(result);
return;
}
// print res
// You can directly access fields in FD_C_ClassifyResult and print it refer to
// ClassifyResult API Doc Or you can wrap it using
// FD_C_ClassifyResult_Wrapper, which containes C++ structure
// fastdeploy::vision::ClassifyResult, and using C API
// FD_C_ClassifyResultWrapperStr to call
// fastdeploy::vision::ClassifyResult::Str() in it. For convenience, we choose
// this method to print it.
FD_C_ClassifyResultWrapper* result_wrapper =
FD_C_CreateClassifyResultWrapperFromData(result);
printf("%s", FD_C_ClassifyResultWrapperStr(result_wrapper));
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPaddleClasModelWrapper(model);
FD_C_DestroyClassifyResultWrapper(result_wrapper);
FD_C_DestroyClassifyResult(result);
FD_C_DestroyMat(im);
}
void GpuInfer(const char* model_dir, const char* image_file) {
char model_file[100];
char params_file[100];
char config_file[100];
int max_size = 99;
snprintf(model_file, max_size, "%s%c%s", model_dir, sep, "inference.pdmodel");
snprintf(params_file, max_size, "%s%c%s", model_dir, sep,
"inference.pdiparams");
snprintf(config_file, max_size, "%s%c%s", model_dir, sep,
"inference_cls.yaml");
FD_C_RuntimeOptionWrapper* option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapperUseGpu(option, 0);
FD_C_PaddleClasModelWrapper* model = FD_C_CreatePaddleClasModelWrapper(
model_file, params_file, config_file, option, PADDLE);
if (!FD_C_PaddleClasModelWrapperInitialized(model)) {
printf("Failed to initialize.\n");
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPaddleClasModelWrapper(model);
return;
}
FD_C_Mat im = FD_C_Imread(image_file);
FD_C_ClassifyResult* result =
(FD_C_ClassifyResult*)malloc(sizeof(FD_C_ClassifyResult));
if (!FD_C_PaddleClasModelWrapperPredict(model, im, result)) {
printf("Failed to predict.\n");
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPaddleClasModelWrapper(model);
FD_C_DestroyMat(im);
free(result);
return;
}
// print res
// You can directly access fields in FD_C_ClassifyResult and print it refer to
// ClassifyResult API Doc Or you can wrap it using
// FD_C_ClassifyResult_Wrapper, which containes C++ structure
// fastdeploy::vision::ClassifyResult, and using C API
// FD_C_ClassifyResultWrapperStr to call
// fastdeploy::vision::ClassifyResult::Str() in it. For convenience, we choose
// this method to print it.
FD_C_ClassifyResultWrapper* result_wrapper =
FD_C_CreateClassifyResultWrapperFromData(result);
printf("%s", FD_C_ClassifyResultWrapperStr(result_wrapper));
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPaddleClasModelWrapper(model);
FD_C_DestroyClassifyResultWrapper(result_wrapper);
FD_C_DestroyClassifyResult(result);
FD_C_DestroyMat(im);
}
int main(int argc, char* argv[]) {
if (argc < 4) {
printf(
"Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./ppyoloe_model_dir ./test.jpeg 0"
"\n");
printf(
"The data type of run_option is int, 0: run with cpu; 1: run with gpu"
"\n");
return -1;
}
if (atoi(argv[3]) == 0) {
CpuInfer(argv[1], argv[2]);
} else if (atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
}
return 0;
}