mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
[Other] Optimize backend selection strategy (#574)
* Optimize backend selection strategy * remove debug code * Update runtime.cc * Update picodet.cc * Update yolox.cc * fix ci
This commit is contained in:
@@ -34,7 +34,6 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto im = cv::imread(image_file);
|
auto im = cv::imread(image_file);
|
||||||
auto im_bak = im.clone();
|
|
||||||
|
|
||||||
fastdeploy::vision::DetectionResult res;
|
fastdeploy::vision::DetectionResult res;
|
||||||
if (!model.Predict(&im, &res)) {
|
if (!model.Predict(&im, &res)) {
|
||||||
@@ -43,7 +42,7 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::cout << res.Str() << std::endl;
|
std::cout << res.Str() << std::endl;
|
||||||
auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5);
|
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
|
||||||
cv::imwrite("vis_result.jpg", vis_im);
|
cv::imwrite("vis_result.jpg", vis_im);
|
||||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||||
}
|
}
|
||||||
@@ -63,7 +62,6 @@ void GpuInfer(const std::string& model_dir, const std::string& image_file) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto im = cv::imread(image_file);
|
auto im = cv::imread(image_file);
|
||||||
auto im_bak = im.clone();
|
|
||||||
|
|
||||||
fastdeploy::vision::DetectionResult res;
|
fastdeploy::vision::DetectionResult res;
|
||||||
if (!model.Predict(&im, &res)) {
|
if (!model.Predict(&im, &res)) {
|
||||||
@@ -72,7 +70,7 @@ void GpuInfer(const std::string& model_dir, const std::string& image_file) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::cout << res.Str() << std::endl;
|
std::cout << res.Str() << std::endl;
|
||||||
auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5);
|
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
|
||||||
cv::imwrite("vis_result.jpg", vis_im);
|
cv::imwrite("vis_result.jpg", vis_im);
|
||||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||||
}
|
}
|
||||||
@@ -93,7 +91,6 @@ void TrtInfer(const std::string& model_dir, const std::string& image_file) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto im = cv::imread(image_file);
|
auto im = cv::imread(image_file);
|
||||||
auto im_bak = im.clone();
|
|
||||||
|
|
||||||
fastdeploy::vision::DetectionResult res;
|
fastdeploy::vision::DetectionResult res;
|
||||||
if (!model.Predict(&im, &res)) {
|
if (!model.Predict(&im, &res)) {
|
||||||
@@ -102,7 +99,7 @@ void TrtInfer(const std::string& model_dir, const std::string& image_file) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::cout << res.Str() << std::endl;
|
std::cout << res.Str() << std::endl;
|
||||||
auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res, 0.5);
|
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
|
||||||
cv::imwrite("vis_result.jpg", vis_im);
|
cv::imwrite("vis_result.jpg", vis_im);
|
||||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||||
}
|
}
|
||||||
|
@@ -16,18 +16,32 @@
|
|||||||
|
|
||||||
namespace fastdeploy {
|
namespace fastdeploy {
|
||||||
|
|
||||||
bool FastDeployModel::InitRuntime() {
|
std::string Str(const std::vector<Backend>& backends) {
|
||||||
FDASSERT(
|
std::ostringstream oss;
|
||||||
CheckModelFormat(runtime_option.model_file, runtime_option.model_format),
|
if (backends.size() == 0) {
|
||||||
"ModelFormatCheck Failed.");
|
oss << "[]";
|
||||||
if (runtime_initialized_) {
|
return oss.str();
|
||||||
FDERROR << "The model is already initialized, cannot be initliazed again."
|
|
||||||
<< std::endl;
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
if (runtime_option.backend != Backend::UNKNOWN) {
|
oss << "[ " << backends[0];
|
||||||
|
for (int i = 1; i < backends.size(); ++i) {
|
||||||
|
oss << " ," << backends[i];
|
||||||
|
}
|
||||||
|
oss << " ]";
|
||||||
|
return oss.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsSupported(const std::vector<Backend>& backends, Backend backend) {
|
||||||
|
for (size_t i = 0; i < backends.size(); ++i) {
|
||||||
|
if (backends[i] == backend) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
|
||||||
if (!IsBackendAvailable(runtime_option.backend)) {
|
if (!IsBackendAvailable(runtime_option.backend)) {
|
||||||
FDERROR << Str(runtime_option.backend)
|
FDERROR << runtime_option.backend
|
||||||
<< " is not compiled with current FastDeploy library."
|
<< " is not compiled with current FastDeploy library."
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
return false;
|
return false;
|
||||||
@@ -35,78 +49,45 @@ bool FastDeployModel::InitRuntime() {
|
|||||||
|
|
||||||
bool use_gpu = (runtime_option.device == Device::GPU);
|
bool use_gpu = (runtime_option.device == Device::GPU);
|
||||||
bool use_ipu = (runtime_option.device == Device::IPU);
|
bool use_ipu = (runtime_option.device == Device::IPU);
|
||||||
#ifndef WITH_GPU
|
|
||||||
use_gpu = false;
|
|
||||||
#endif
|
|
||||||
#ifndef WITH_IPU
|
|
||||||
use_ipu = false;
|
|
||||||
#endif
|
|
||||||
bool use_rknpu = (runtime_option.device == Device::RKNPU);
|
bool use_rknpu = (runtime_option.device == Device::RKNPU);
|
||||||
bool use_timvx = (runtime_option.device == Device::TIMVX);
|
bool use_timvx = (runtime_option.device == Device::TIMVX);
|
||||||
|
|
||||||
// whether the model is supported by the setted backend
|
|
||||||
bool is_supported = false;
|
|
||||||
if (use_gpu) {
|
if (use_gpu) {
|
||||||
for (auto& item : valid_gpu_backends) {
|
if (!IsSupported(valid_gpu_backends, runtime_option.backend)) {
|
||||||
if (item == runtime_option.backend) {
|
FDERROR << "The valid gpu backends of model " << ModelName() << " are " << Str(valid_gpu_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
|
||||||
is_supported = true;
|
return false;
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else if (use_rknpu) {
|
} else if (use_rknpu) {
|
||||||
for (auto& item : valid_rknpu_backends) {
|
if (!IsSupported(valid_rknpu_backends, runtime_option.backend)) {
|
||||||
if (item == runtime_option.backend) {
|
FDERROR << "The valid rknpu backends of model " << ModelName() << " are " << Str(valid_rknpu_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
|
||||||
is_supported = true;
|
return false;
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else if (use_timvx) {
|
} else if (use_timvx) {
|
||||||
for (auto& item : valid_timvx_backends) {
|
if (!IsSupported(valid_timvx_backends, runtime_option.backend)) {
|
||||||
if (item == runtime_option.backend) {
|
FDERROR << "The valid timvx backends of model " << ModelName() << " are " << Str(valid_timvx_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
|
||||||
is_supported = true;
|
return false;
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}else if(use_ipu) {
|
|
||||||
for (auto& item : valid_ipu_backends) {
|
|
||||||
if (item == runtime_option.backend) {
|
|
||||||
is_supported = true;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
} else if(use_ipu) {
|
||||||
|
if (!IsSupported(valid_ipu_backends, runtime_option.backend)) {
|
||||||
|
FDERROR << "The valid ipu backends of model " << ModelName() << " are " << Str(valid_ipu_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (auto& item : valid_cpu_backends) {
|
if (!IsSupported(valid_cpu_backends, runtime_option.backend)) {
|
||||||
if (item == runtime_option.backend) {
|
FDERROR << "The valid cpu backends of model " << ModelName() << " are " << Str(valid_cpu_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
|
||||||
is_supported = true;
|
return false;
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_supported) {
|
|
||||||
runtime_ = std::shared_ptr<Runtime>(new Runtime());
|
runtime_ = std::shared_ptr<Runtime>(new Runtime());
|
||||||
if (!runtime_->Init(runtime_option)) {
|
if (!runtime_->Init(runtime_option)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
runtime_initialized_ = true;
|
runtime_initialized_ = true;
|
||||||
return true;
|
return true;
|
||||||
} else {
|
}
|
||||||
FDWARNING << ModelName() << " is not supported with backend "
|
|
||||||
<< Str(runtime_option.backend) << "." << std::endl;
|
|
||||||
if (use_gpu) {
|
|
||||||
FDASSERT(valid_gpu_backends.size() > 0,
|
|
||||||
"There's no valid gpu backend for %s.", ModelName().c_str());
|
|
||||||
FDWARNING << "FastDeploy will choose " << Str(valid_gpu_backends[0])
|
|
||||||
<< " for model inference." << std::endl;
|
|
||||||
} else {
|
|
||||||
FDASSERT(valid_cpu_backends.size() > 0,
|
|
||||||
"There's no valid cpu backend for %s.", ModelName().c_str());
|
|
||||||
FDWARNING << "FastDeploy will choose " << Str(valid_cpu_backends[0])
|
|
||||||
<< " for model inference." << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
bool FastDeployModel::InitRuntimeWithSpecifiedDevice() {
|
||||||
if (runtime_option.device == Device::CPU) {
|
if (runtime_option.device == Device::CPU) {
|
||||||
return CreateCpuBackend();
|
return CreateCpuBackend();
|
||||||
} else if (runtime_option.device == Device::GPU) {
|
} else if (runtime_option.device == Device::GPU) {
|
||||||
@@ -130,10 +111,26 @@ bool FastDeployModel::InitRuntime() {
|
|||||||
return false;
|
return false;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
FDERROR << "Only support CPU/GPU/NPU now." << std::endl;
|
FDERROR << "Only support CPU/GPU/IPU/RKNPU/TIMVX now." << std::endl;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool FastDeployModel::InitRuntime() {
|
||||||
|
FDASSERT(
|
||||||
|
CheckModelFormat(runtime_option.model_file, runtime_option.model_format),
|
||||||
|
"ModelFormatCheck Failed.");
|
||||||
|
if (runtime_initialized_) {
|
||||||
|
FDERROR << "The model is already initialized, cannot be initliazed again."
|
||||||
|
<< std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (runtime_option.backend != Backend::UNKNOWN) {
|
||||||
|
return InitRuntimeWithSpecifiedBackend();
|
||||||
|
}
|
||||||
|
|
||||||
|
return InitRuntimeWithSpecifiedDevice();
|
||||||
|
}
|
||||||
|
|
||||||
bool FastDeployModel::CreateCpuBackend() {
|
bool FastDeployModel::CreateCpuBackend() {
|
||||||
if (valid_cpu_backends.size() == 0) {
|
if (valid_cpu_backends.size() == 0) {
|
||||||
FDERROR << "There's no valid cpu backends for model: " << ModelName()
|
FDERROR << "There's no valid cpu backends for model: " << ModelName()
|
||||||
|
@@ -41,12 +41,10 @@ class FASTDEPLOY_DECL FastDeployModel {
|
|||||||
std::vector<Backend> valid_gpu_backends = {Backend::ORT};
|
std::vector<Backend> valid_gpu_backends = {Backend::ORT};
|
||||||
/** Model's valid ipu backends. This member defined all the ipu backends have successfully tested for the model
|
/** Model's valid ipu backends. This member defined all the ipu backends have successfully tested for the model
|
||||||
*/
|
*/
|
||||||
std::vector<Backend> valid_ipu_backends = {Backend::PDINFER};
|
std::vector<Backend> valid_ipu_backends = {};
|
||||||
/** Model's valid timvx backends. This member defined all the timvx backends have successfully tested for the model
|
/** Model's valid timvx backends. This member defined all the timvx backends have successfully tested for the model
|
||||||
*/
|
*/
|
||||||
std::vector<Backend> valid_timvx_backends = {};
|
std::vector<Backend> valid_timvx_backends = {};
|
||||||
|
|
||||||
|
|
||||||
/** Model's valid hardware backends. This member defined all the gpu backends have successfully tested for the model
|
/** Model's valid hardware backends. This member defined all the gpu backends have successfully tested for the model
|
||||||
*/
|
*/
|
||||||
std::vector<Backend> valid_rknpu_backends = {};
|
std::vector<Backend> valid_rknpu_backends = {};
|
||||||
@@ -116,20 +114,22 @@ class FASTDEPLOY_DECL FastDeployModel {
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual bool InitRuntime();
|
virtual bool InitRuntime();
|
||||||
virtual bool CreateCpuBackend();
|
|
||||||
virtual bool CreateGpuBackend();
|
|
||||||
virtual bool CreateIpuBackend();
|
|
||||||
virtual bool CreateRKNPUBackend();
|
|
||||||
virtual bool CreateTimVXBackend();
|
|
||||||
|
|
||||||
bool initialized = false;
|
bool initialized = false;
|
||||||
std::vector<Backend> valid_external_backends_;
|
|
||||||
// Reused input tensors
|
// Reused input tensors
|
||||||
std::vector<FDTensor> reused_input_tensors_;
|
std::vector<FDTensor> reused_input_tensors_;
|
||||||
// Reused output tensors
|
// Reused output tensors
|
||||||
std::vector<FDTensor> reused_output_tensors_;
|
std::vector<FDTensor> reused_output_tensors_;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
bool InitRuntimeWithSpecifiedBackend();
|
||||||
|
bool InitRuntimeWithSpecifiedDevice();
|
||||||
|
bool CreateCpuBackend();
|
||||||
|
bool CreateGpuBackend();
|
||||||
|
bool CreateIpuBackend();
|
||||||
|
bool CreateRKNPUBackend();
|
||||||
|
bool CreateTimVXBackend();
|
||||||
|
|
||||||
std::shared_ptr<Runtime> runtime_;
|
std::shared_ptr<Runtime> runtime_;
|
||||||
bool runtime_initialized_ = false;
|
bool runtime_initialized_ = false;
|
||||||
// whether to record inference time
|
// whether to record inference time
|
||||||
|
@@ -97,7 +97,7 @@ std::string Str(const Backend& b) {
|
|||||||
}else if (b == Backend::OPENVINO) {
|
}else if (b == Backend::OPENVINO) {
|
||||||
return "Backend::OPENVINO";
|
return "Backend::OPENVINO";
|
||||||
} else if (b == Backend::LITE) {
|
} else if (b == Backend::LITE) {
|
||||||
return "Backend::LITE";
|
return "Backend::PDLITE";
|
||||||
}
|
}
|
||||||
return "UNKNOWN-Backend";
|
return "UNKNOWN-Backend";
|
||||||
}
|
}
|
||||||
@@ -116,9 +116,10 @@ std::ostream& operator<<(std::ostream& out, const Backend& backend) {
|
|||||||
}else if (backend == Backend::POROS) {
|
}else if (backend == Backend::POROS) {
|
||||||
out << "Backend::POROS";
|
out << "Backend::POROS";
|
||||||
} else if (backend == Backend::LITE) {
|
} else if (backend == Backend::LITE) {
|
||||||
out << "Backend::LITE";
|
out << "Backend::PDLITE";
|
||||||
}
|
} else {
|
||||||
out << "UNKNOWN-Backend";
|
out << "UNKNOWN-Backend";
|
||||||
|
}
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -23,10 +23,16 @@ PaddleClasModel::PaddleClasModel(const std::string& model_file,
|
|||||||
const std::string& config_file,
|
const std::string& config_file,
|
||||||
const RuntimeOption& custom_option,
|
const RuntimeOption& custom_option,
|
||||||
const ModelFormat& model_format) : preprocessor_(config_file) {
|
const ModelFormat& model_format) : preprocessor_(config_file) {
|
||||||
|
if (model_format == ModelFormat::PADDLE) {
|
||||||
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO, Backend::PDINFER,
|
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO, Backend::PDINFER,
|
||||||
Backend::LITE};
|
Backend::LITE};
|
||||||
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
|
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
|
||||||
valid_timvx_backends = {Backend::LITE};
|
valid_timvx_backends = {Backend::LITE};
|
||||||
|
valid_ipu_backends = {Backend::PDINFER};
|
||||||
|
} else if (model_format == ModelFormat::ONNX) {
|
||||||
|
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO};
|
||||||
|
valid_gpu_backends = {Backend::ORT, Backend::TRT};
|
||||||
|
}
|
||||||
|
|
||||||
runtime_option = custom_option;
|
runtime_option = custom_option;
|
||||||
runtime_option.model_format = model_format;
|
runtime_option.model_format = model_format;
|
||||||
|
@@ -24,7 +24,7 @@ PicoDet::PicoDet(const std::string& model_file, const std::string& params_file,
|
|||||||
const RuntimeOption& custom_option,
|
const RuntimeOption& custom_option,
|
||||||
const ModelFormat& model_format) {
|
const ModelFormat& model_format) {
|
||||||
config_file_ = config_file;
|
config_file_ = config_file;
|
||||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
|
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::ORT, Backend::LITE};
|
||||||
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
|
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
|
||||||
runtime_option = custom_option;
|
runtime_option = custom_option;
|
||||||
runtime_option.model_format = model_format;
|
runtime_option.model_format = model_format;
|
||||||
|
@@ -103,16 +103,11 @@ bool PaddleSegModel::BuildPreprocessPipelineFromConfig() {
|
|||||||
int input_height = input_shape[2].as<int>();
|
int input_height = input_shape[2].as<int>();
|
||||||
int input_width = input_shape[3].as<int>();
|
int input_width = input_shape[3].as<int>();
|
||||||
if (input_height == -1 || input_width == -1) {
|
if (input_height == -1 || input_width == -1) {
|
||||||
FDWARNING << "The exported PaddleSeg model is with dynamic shape input, "
|
FDWARNING << "Some exportd PaddleSeg models with dynamic shape may "
|
||||||
<< "which is not supported by ONNX Runtime and Tensorrt. "
|
"not be able inference with ONNX Runtime/TensorRT, if error "
|
||||||
<< "Only OpenVINO and Paddle Inference are available now. "
|
"happend, please try to change to use Paddle "
|
||||||
<< "For using ONNX Runtime or Tensorrt, "
|
"Inference/OpenVINO backends instead, or export model with "
|
||||||
<< "Please refer to "
|
"fixed input shape." << std::endl;
|
||||||
"https://github.com/PaddlePaddle/PaddleSeg/blob/develop/"
|
|
||||||
"docs/model_export.md"
|
|
||||||
<< " to export model with fixed input shape." << std::endl;
|
|
||||||
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::LITE};
|
|
||||||
valid_gpu_backends = {Backend::PDINFER};
|
|
||||||
}
|
}
|
||||||
if (input_height != -1 && input_width != -1 && !yml_contain_resize_op) {
|
if (input_height != -1 && input_width != -1 && !yml_contain_resize_op) {
|
||||||
processors_.push_back(
|
processors_.push_back(
|
||||||
|
@@ -45,9 +45,15 @@ import multiprocessing
|
|||||||
with open(os.path.join(TOP_DIR, "python", "requirements.txt")) as fin:
|
with open(os.path.join(TOP_DIR, "python", "requirements.txt")) as fin:
|
||||||
REQUIRED_PACKAGES = fin.read()
|
REQUIRED_PACKAGES = fin.read()
|
||||||
|
|
||||||
|
if os.getenv("BUILD_ON_CPU", "OFF") == "ON":
|
||||||
|
os.environ["ENABLE_PADDLE_BACKEND"] = "ON"
|
||||||
|
os.environ["ENABLE_ORT_BACKEND"] = "ON"
|
||||||
|
os.environ["ENABLE_OPENVINO_BACKEND"] = "ON"
|
||||||
|
os.environ["ENABLE_VISION"] = "ON"
|
||||||
|
os.environ["ENABLE_TEXT"] = "ON"
|
||||||
|
os.environ["WITH_GPU"] = "OFF"
|
||||||
|
|
||||||
setup_configs = dict()
|
setup_configs = dict()
|
||||||
setup_configs["ENABLE_PADDLE_FRONTEND"] = os.getenv("ENABLE_PADDLE_FRONTEND",
|
|
||||||
"ON")
|
|
||||||
setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND",
|
setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND",
|
||||||
"OFF")
|
"OFF")
|
||||||
setup_configs["ENABLE_ORT_BACKEND"] = os.getenv("ENABLE_ORT_BACKEND", "OFF")
|
setup_configs["ENABLE_ORT_BACKEND"] = os.getenv("ENABLE_ORT_BACKEND", "OFF")
|
||||||
|
Reference in New Issue
Block a user