Merge branch 'develop' into add_batch_size_for_uie

This commit is contained in:
Jack Zhou
2022-12-28 10:51:40 +08:00
committed by GitHub
253 changed files with 6554 additions and 2573 deletions

0
python/__init__.py Normal file
View File

View File

@@ -37,3 +37,4 @@ from . import vision
from . import pipeline
from . import text
from .download import download, download_and_decompress, download_model
from . import serving

View File

@@ -263,18 +263,18 @@ class RuntimeOption:
return
return self._option.use_gpu(device_id)
def use_xpu(self,
device_id=0,
l3_workspace_size=16 * 1024 * 1024,
locked=False,
autotune=True,
autotune_file="",
precision="int16",
adaptive_seqlen=False,
enable_multi_stream=False):
"""Inference with XPU
def use_kunlunxin(self,
device_id=0,
l3_workspace_size=16 * 1024 * 1024,
locked=False,
autotune=True,
autotune_file="",
precision="int16",
adaptive_seqlen=False,
enable_multi_stream=False):
"""Inference with KunlunXin XPU
:param device_id: (int)The index of XPU will be used for inference, default 0
:param device_id: (int)The index of KunlunXin XPU will be used for inference, default 0
:param l3_workspace_size: (int)The size of the video memory allocated by the l3 cache, the maximum is 16M, default 16M
:param locked: (bool)Whether the allocated L3 cache can be locked. If false, it means that the L3 cache is not locked,
and the allocated L3 cache can be shared by multiple models, and multiple models
@@ -285,11 +285,11 @@ class RuntimeOption:
the algorithm specified in the file will be used and autotune will not be performed again.
:param precision: (str)Calculation accuracy of multi_encoder
:param adaptive_seqlen: (bool)adaptive_seqlen Is the input of multi_encoder variable length
:param enable_multi_stream: (bool)Whether to enable the multi stream of xpu.
:param enable_multi_stream: (bool)Whether to enable the multi stream of KunlunXin XPU.
"""
return self._option.use_xpu(device_id, l3_workspace_size, locked,
autotune, autotune_file, precision,
adaptive_seqlen, enable_multi_stream)
return self._option.use_kunlunxin(device_id, l3_workspace_size, locked,
autotune, autotune_file, precision,
adaptive_seqlen, enable_multi_stream)
def use_cpu(self):
"""Inference with CPU

View File

@@ -0,0 +1,14 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import

View File

@@ -0,0 +1,16 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .base_handler import BaseModelHandler
from .vision_model_handler import VisionModelHandler

View File

@@ -0,0 +1,28 @@
# coding:utf-8
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from abc import ABCMeta, abstractmethod
class BaseModelHandler(metaclass=ABCMeta):
def __init__(self):
super().__init__()
@classmethod
@abstractmethod
def process(cls, predictor, data, parameters):
pass

View File

@@ -0,0 +1,30 @@
# coding:utf-8
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base_handler import BaseModelHandler
from ..utils import base64_to_cv2
from ...vision.utils import fd_result_to_json
class VisionModelHandler(BaseModelHandler):
def __init__(self):
super().__init__()
@classmethod
def process(cls, predictor, data, parameters):
# TODO: support batch predict
im = base64_to_cv2(data['image'])
result = predictor.predict(im)
r_str = fd_result_to_json(result)
return r_str

View File

@@ -0,0 +1,57 @@
# coding:utf-8
# copyright (c) 2022 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license"
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
import os
import time
import json
import logging
import threading
# from .predictor import Predictor
from .handler import BaseModelHandler
from .utils import lock_predictor
class ModelManager:
def __init__(self, model_handler, predictor):
self._model_handler = model_handler
self._predictors = []
self._predictor_locks = []
self._register(predictor)
def _register(self, predictor):
# Get the model handler
if not issubclass(self._model_handler, BaseModelHandler):
raise TypeError(
"The model_handler must be subclass of BaseModelHandler, please check the type."
)
# TODO: Create multiple predictors to run on different GPUs or different CPU threads
self._predictors.append(predictor)
self._predictor_locks.append(threading.Lock())
def _get_predict_id(self):
t = time.time()
t = int(round(t * 1000))
predictor_id = t % len(self._predictors)
logging.info("The predictor id: {} is selected by running the model.".
format(predictor_id))
return predictor_id
def predict(self, data, parameters):
predictor_id = self._get_predict_id()
with lock_predictor(self._predictor_locks[predictor_id]):
model_output = self._model_handler.process(
self._predictors[predictor_id], data, parameters)
return model_output

View File

@@ -0,0 +1,16 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .base_router import BaseRouterManager
from .http_router import HttpRouterManager

View File

@@ -0,0 +1,28 @@
# coding:utf-8
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
class BaseRouterManager(abc.ABC):
_app = None
def __init__(self, app):
super().__init__()
self._app = app
@abc.abstractmethod
def register_models_router(self):
return NotImplemented

View File

@@ -0,0 +1,80 @@
# coding:utf-8
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import typing
import logging
from typing import Optional
from fastapi import APIRouter, Request, HTTPException
from pydantic import BaseModel, Extra, create_model
from .base_router import BaseRouterManager
class ResponseBase(BaseModel):
text: Optional[str] = None
class RequestBase(BaseModel, extra=Extra.forbid):
parameters: Optional[dict] = {}
class HttpRouterManager(BaseRouterManager):
def register_models_router(self, task_name):
# Url path to register the model
paths = [f"/{task_name}"]
for path in paths:
logging.info("FastDeploy Model request [path]={} is genereated.".
format(path))
# Unique name to create the pydantic model
unique_name = hashlib.md5(task_name.encode()).hexdigest()
# Create request model
req_model = create_model(
"RequestModel" + unique_name,
data=(typing.Any, ...),
__base__=RequestBase, )
# Create response model
resp_model = create_model(
"ResponseModel" + unique_name,
result=(typing.Any, ...),
__base__=ResponseBase, )
# Template predict endpoint function to dynamically serve different models
def predict(request: Request, inference_request: req_model):
try:
result = self._app._model_manager.predict(
inference_request.data, inference_request.parameters)
except Exception as e:
raise HTTPException(
status_code=400,
detail=f"Error occurred while running predict: {str(e)}")
return {"result": result}
# Register the route and add to the app
router = APIRouter()
for path in paths:
router.add_api_route(
path,
predict,
methods=["post"],
summary=f"{task_name.title()}",
response_model=resp_model,
response_model_exclude_unset=True,
response_model_exclude_none=True, )
self._app.include_router(router)

View File

@@ -0,0 +1,46 @@
# coding:utf-8
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fastapi import FastAPI
from .router import HttpRouterManager
from .model_manager import ModelManager
class SimpleServer(FastAPI):
def __init__(self, **kwargs):
"""
Initial function for the FastDeploy SimpleServer.
"""
super().__init__(**kwargs)
self._router_manager = HttpRouterManager(self)
self._model_manager = None
self._service_name = "FastDeploy SimpleServer"
self._service_type = None
def register(self, task_name, model_handler, predictor):
"""
The register function for the SimpleServer, the main register argrument as follows:
Args:
task_name(str): API URL path.
model_handler: To process request data, run predictor,
and can also add your custom post processing on top of the predictor result
predictor: To run model predict
"""
self._server_type = "models"
model_manager = ModelManager(model_handler, predictor)
self._model_manager = model_manager
# Register model server router
self._router_manager.register_models_router(task_name)

View File

@@ -0,0 +1,40 @@
# coding:utf-8
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import base64
import numpy as np
import cv2
@contextlib.contextmanager
def lock_predictor(lock):
lock.acquire()
try:
yield
finally:
lock.release()
def cv2_to_base64(image):
data = cv2.imencode('.jpg', image)[1]
return base64.b64encode(data.tobytes()).decode('utf8')
def base64_to_cv2(b64str):
data = base64.b64decode(b64str.encode('utf8'))
data = np.fromstring(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data

View File

@@ -19,6 +19,7 @@ from .contrib.scaled_yolov4 import ScaledYOLOv4
from .contrib.nanodet_plus import NanoDetPlus
from .contrib.yolox import YOLOX
from .contrib.yolov5 import *
from .contrib.fastestdet import *
from .contrib.yolov5lite import YOLOv5Lite
from .contrib.yolov6 import YOLOv6
from .contrib.yolov7end2end_trt import YOLOv7End2EndTRT

View File

@@ -0,0 +1,149 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from .... import FastDeployModel, ModelFormat
from .... import c_lib_wrap as C
class FastestDetPreprocessor:
def __init__(self):
"""Create a preprocessor for FastestDet
"""
self._preprocessor = C.vision.detection.FastestDetPreprocessor()
def run(self, input_ims):
"""Preprocess input images for FastestDet
:param: input_ims: (list of numpy.ndarray)The input image
:return: list of FDTensor
"""
return self._preprocessor.run(input_ims)
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [352, 352]
"""
return self._preprocessor.size
@size.setter
def size(self, wh):
assert isinstance(wh, (list, tuple)),\
"The value to set `size` must be type of tuple or list."
assert len(wh) == 2,\
"The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
len(wh))
self._preprocessor.size = wh
class FastestDetPostprocessor:
def __init__(self):
"""Create a postprocessor for FastestDet
"""
self._postprocessor = C.vision.detection.FastestDetPostprocessor()
def run(self, runtime_results, ims_info):
"""Postprocess the runtime results for FastestDet
:param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
:param: ims_info: (list of dict)Record input_shape and output_shape
:return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
"""
return self._postprocessor.run(runtime_results, ims_info)
@property
def conf_threshold(self):
"""
confidence threshold for postprocessing, default is 0.65
"""
return self._postprocessor.conf_threshold
@property
def nms_threshold(self):
"""
nms threshold for postprocessing, default is 0.45
"""
return self._postprocessor.nms_threshold
@conf_threshold.setter
def conf_threshold(self, conf_threshold):
assert isinstance(conf_threshold, float),\
"The value to set `conf_threshold` must be type of float."
self._postprocessor.conf_threshold = conf_threshold
@nms_threshold.setter
def nms_threshold(self, nms_threshold):
assert isinstance(nms_threshold, float),\
"The value to set `nms_threshold` must be type of float."
self._postprocessor.nms_threshold = nms_threshold
class FastestDet(FastDeployModel):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a FastestDet model exported by FastestDet.
:param model_file: (str)Path of model file, e.g ./FastestDet.onnx
:param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
super(FastestDet, self).__init__(runtime_option)
assert model_format == ModelFormat.ONNX, "FastestDet only support model format of ModelFormat.ONNX now."
self._model = C.vision.detection.FastestDet(
model_file, params_file, self._runtime_option, model_format)
assert self.initialized, "FastestDet initialize failed."
def predict(self, input_image):
"""Detect an input image
:param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:return: DetectionResult
"""
assert input_image is not None, "Input image is None."
return self._model.predict(input_image)
def batch_predict(self, images):
assert len(images) == 1,"FastestDet is only support 1 image in batch_predict"
"""Classify a batch of input image
:param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
:return list of DetectionResult
"""
return self._model.batch_predict(images)
@property
def preprocessor(self):
"""Get FastestDetPreprocessor object of the loaded model
:return FastestDetPreprocessor
"""
return self._model.preprocessor
@property
def postprocessor(self):
"""Get FastestDetPostprocessor object of the loaded model
:return FastestDetPostprocessor
"""
return self._model.postprocessor

View File

@@ -13,9 +13,4 @@
# limitations under the License.
from __future__ import absolute_import
from .contrib.adaface import AdaFace
from .contrib.arcface import ArcFace
from .contrib.cosface import CosFace
from .contrib.insightface_rec import InsightFaceRecognitionModel
from .contrib.partial_fc import PartialFC
from .contrib.vpl import VPL
from .contrib import *

View File

@@ -13,3 +13,5 @@
# limitations under the License.
from __future__ import absolute_import
from .insightface import *
from .adaface import *

View File

@@ -1,126 +0,0 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .... import FastDeployModel, ModelFormat
from .... import c_lib_wrap as C
class AdaFace(FastDeployModel):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.PADDLE):
"""Load a AdaFace model exported by InsigtFace.
:param model_file: (str)Path of model file, e.g ./adaface.onnx
:param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
# 调用基函数进行backend_option的初始化
# 初始化后的option保存在self._runtime_option
super(AdaFace, self).__init__(runtime_option)
self._model = C.vision.faceid.AdaFace(
model_file, params_file, self._runtime_option, model_format)
# 通过self.initialized判断整个模型的初始化是否成功
assert self.initialized, "AdaFace initialize failed."
def predict(self, input_image):
""" Predict the face recognition result for an input image
:param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:return: FaceRecognitionResult
"""
return self._model.predict(input_image)
# 一些跟模型有关的属性封装
# 多数是预处理相关可通过修改如model.size = [112, 112]改变预处理时resize的大小前提是模型支持
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112)
"""
return self._model.size
@property
def alpha(self):
"""
Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f]
"""
return self._model.alpha
@property
def beta(self):
"""
Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
"""
return self._model.beta
@property
def swap_rb(self):
"""
Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True.
"""
return self._model.swap_rb
@property
def l2_normalize(self):
"""
Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False;
"""
return self._model.l2_normalize
@size.setter
def size(self, wh):
assert isinstance(wh, (list, tuple)), \
"The value to set `size` must be type of tuple or list."
assert len(wh) == 2, \
"The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
len(wh))
self._model.size = wh
@alpha.setter
def alpha(self, value):
assert isinstance(value, (list, tuple)), \
"The value to set `alpha` must be type of tuple or list."
assert len(value) == 3, \
"The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
len(value))
self._model.alpha = value
@beta.setter
def beta(self, value):
assert isinstance(value, (list, tuple)), \
"The value to set `beta` must be type of tuple or list."
assert len(value) == 3, \
"The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
len(value))
self._model.beta = value
@swap_rb.setter
def swap_rb(self, value):
assert isinstance(
value, bool), "The value to set `swap_rb` must be type of bool."
self._model.swap_rb = value
@l2_normalize.setter
def l2_normalize(self, value):
assert isinstance(
value,
bool), "The value to set `l2_normalize` must be type of bool."
self._model.l2_normalize = value

View File

@@ -0,0 +1,109 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from ..... import FastDeployModel, ModelFormat
from ..... import c_lib_wrap as C
class AdaFacePreprocessor:
def __init__(self):
"""Create a preprocessor for AdaFace Model
"""
self._preprocessor = C.vision.faceid.AdaFacePreprocessor()
def run(self, input_ims):
"""Preprocess input images for AdaFace Model
:param: input_ims: (list of numpy.ndarray)The input image
:return: list of FDTensor, include image, scale_factor, im_shape
"""
return self._preprocessor.run(input_ims)
class AdaFacePostprocessor:
def __init__(self):
"""Create a postprocessor for AdaFace Model
"""
self._postprocessor = C.vision.faceid.AdaFacePostprocessor()
def run(self, runtime_results):
"""Postprocess the runtime results for PaddleClas Model
:param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
:return: list of FaceRecognitionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
"""
return self._postprocessor.run(runtime_results)
@property
def l2_normalize(self):
"""
confidence threshold for postprocessing, default is 0.5
"""
return self._postprocessor.l2_normalize
class AdaFace(FastDeployModel):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a AdaFace model exported by PaddleClas.
:param model_file: (str)Path of model file, e.g adaface/model.pdmodel
:param params_file: (str)Path of parameters file, e.g adaface/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
super(AdaFace, self).__init__(runtime_option)
self._model = C.vision.faceid.AdaFace(
model_file, params_file, self._runtime_option, model_format)
assert self.initialized, "AdaFace model initialize failed."
def predict(self, im):
"""Detect an input image
:param im: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:return: DetectionResult
"""
assert im is not None, "The input image data is None."
return self._model.predict(im)
def batch_predict(self, images):
"""Detect a batch of input image list
:param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
:return list of DetectionResult
"""
return self._model.batch_predict(images)
@property
def preprocessor(self):
"""Get AdaFacePreprocessor object of the loaded model
:return AdaFacePreprocessor
"""
return self._model.preprocessor
@property
def postprocessor(self):
"""Get AdaFacePostprocessor object of the loaded model
:return AdaFacePostprocessor
"""
return self._model.postprocessor

View File

@@ -1,127 +0,0 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from .... import FastDeployModel, ModelFormat
from .... import c_lib_wrap as C
from ..contrib.insightface_rec import InsightFaceRecognitionModel
class ArcFace(FastDeployModel):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a ArcFace model exported by InsigtFace.
:param model_file: (str)Path of model file, e.g ./arcface.onnx
:param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
# 调用基函数进行backend_option的初始化
# 初始化后的option保存在self._runtime_option
super(ArcFace, self).__init__(runtime_option)
self._model = C.vision.faceid.ArcFace(
model_file, params_file, self._runtime_option, model_format)
# 通过self.initialized判断整个模型的初始化是否成功
assert self.initialized, "ArcFace initialize failed."
def predict(self, input_image):
""" Predict the face recognition result for an input image
:param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:return: FaceRecognitionResult
"""
return self._model.predict(input_image)
# 一些跟模型有关的属性封装
# 多数是预处理相关可通过修改如model.size = [112, 112]改变预处理时resize的大小前提是模型支持
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112)
"""
return self._model.size
@property
def alpha(self):
"""
Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f]
"""
return self._model.alpha
@property
def beta(self):
"""
Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
"""
return self._model.beta
@property
def swap_rb(self):
"""
Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True.
"""
return self._model.swap_rb
@property
def l2_normalize(self):
"""
Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False;
"""
return self._model.l2_normalize
@size.setter
def size(self, wh):
assert isinstance(wh, (list, tuple)),\
"The value to set `size` must be type of tuple or list."
assert len(wh) == 2,\
"The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
len(wh))
self._model.size = wh
@alpha.setter
def alpha(self, value):
assert isinstance(value, (list, tuple)),\
"The value to set `alpha` must be type of tuple or list."
assert len(value) == 3,\
"The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
len(value))
self._model.alpha = value
@beta.setter
def beta(self, value):
assert isinstance(value, (list, tuple)),\
"The value to set `beta` must be type of tuple or list."
assert len(value) == 3,\
"The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
len(value))
self._model.beta = value
@swap_rb.setter
def swap_rb(self, value):
assert isinstance(
value, bool), "The value to set `swap_rb` must be type of bool."
self._model.swap_rb = value
@l2_normalize.setter
def l2_normalize(self, value):
assert isinstance(
value,
bool), "The value to set `l2_normalize` must be type of bool."
self._model.l2_normalize = value

View File

@@ -1,126 +0,0 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from .... import FastDeployModel, ModelFormat
from .... import c_lib_wrap as C
class CosFace(FastDeployModel):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a CosFace model exported by InsigtFace.
:param model_file: (str)Path of model file, e.g ./cosface.onnx
:param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
# 调用基函数进行backend_option的初始化
# 初始化后的option保存在self._runtime_option
super(CosFace, self).__init__(runtime_option)
self._model = C.vision.faceid.CosFace(
model_file, params_file, self._runtime_option, model_format)
# 通过self.initialized判断整个模型的初始化是否成功
assert self.initialized, "CosFace initialize failed."
def predict(self, input_image):
""" Predict the face recognition result for an input image
:param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:return: FaceRecognitionResult
"""
return self._model.predict(input_image)
# 一些跟模型有关的属性封装
# 多数是预处理相关可通过修改如model.size = [112, 112]改变预处理时resize的大小前提是模型支持
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112)
"""
return self._model.size
@property
def alpha(self):
"""
Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f]
"""
return self._model.alpha
@property
def beta(self):
"""
Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
"""
return self._model.beta
@property
def swap_rb(self):
"""
Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True.
"""
return self._model.swap_rb
@property
def l2_normalize(self):
"""
Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False;
"""
return self._model.l2_normalize
@size.setter
def size(self, wh):
assert isinstance(wh, (list, tuple)),\
"The value to set `size` must be type of tuple or list."
assert len(wh) == 2,\
"The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
len(wh))
self._model.size = wh
@alpha.setter
def alpha(self, value):
assert isinstance(value, (list, tuple)),\
"The value to set `alpha` must be type of tuple or list."
assert len(value) == 3,\
"The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
len(value))
self._model.alpha = value
@beta.setter
def beta(self, value):
assert isinstance(value, (list, tuple)),\
"The value to set `beta` must be type of tuple or list."
assert len(value) == 3,\
"The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
len(value))
self._model.beta = value
@swap_rb.setter
def swap_rb(self, value):
assert isinstance(
value, bool), "The value to set `swap_rb` must be type of bool."
self._model.swap_rb = value
@l2_normalize.setter
def l2_normalize(self, value):
assert isinstance(
value,
bool), "The value to set `l2_normalize` must be type of bool."
self._model.l2_normalize = value

View File

@@ -0,0 +1,222 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from ..... import FastDeployModel, ModelFormat
from ..... import c_lib_wrap as C
class InsightFaceRecognitionPreprocessor:
def __init__(self):
"""Create a preprocessor for InsightFaceRecognition Model
"""
self._preprocessor = C.vision.faceid.InsightFaceRecognitionPreprocessor(
)
def run(self, input_ims):
"""Preprocess input images for InsightFaceRecognition Model
:param: input_ims: (list of numpy.ndarray)The input image
:return: list of FDTensor, include image, scale_factor, im_shape
"""
return self._preprocessor.run(input_ims)
@property
def size(self):
"""
Argument for image preprocessing step, tuple of (width, height),
decide the target size after resize, default (112, 112)
"""
return self._preprocessor.size
@property
def alpha(self):
"""
Argument for image preprocessing step, alpha values for normalization,
default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
"""
return self._preprocessor.alpha
@property
def beta(self):
"""
Argument for image preprocessing step, beta values for normalization,
default beta = {-1.f, -1.f, -1.f}
"""
return self._preprocessor.beta
@property
def permute(self):
"""
Argument for image preprocessing step, whether to swap the B and R channel,
such as BGR->RGB, default true.
"""
return self._preprocessor.permute
class InsightFaceRecognitionPostprocessor:
def __init__(self):
"""Create a postprocessor for InsightFaceRecognition Model
"""
self._postprocessor = C.vision.faceid.InsightFaceRecognitionPostprocessor(
)
def run(self, runtime_results):
"""Postprocess the runtime results for PaddleClas Model
:param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
:return: list of FaceRecognitionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
"""
return self._postprocessor.run(runtime_results)
@property
def l2_normalize(self):
"""
confidence threshold for postprocessing, default is 0.5
"""
return self._postprocessor.l2_normalize
class InsightFaceRecognitionBase(FastDeployModel):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a InsightFaceRecognitionBase model exported by PaddleClas.
:param model_file: (str)Path of model file, e.g InsightFaceRecognitionBase/model.pdmodel
:param params_file: (str)Path of parameters file, e.g InsightFaceRecognitionBase/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
super(InsightFaceRecognitionBase, self).__init__(runtime_option)
self._model = C.vision.faceid.InsightFaceRecognitionBase(
model_file, params_file, self._runtime_option, model_format)
assert self.initialized, "InsightFaceRecognitionBase model initialize failed."
def predict(self, im):
"""Detect an input image
:param im: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:return: DetectionResult
"""
assert im is not None, "The input image data is None."
return self._model.predict(im)
def batch_predict(self, images):
"""Detect a batch of input image list
:param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
:return list of DetectionResult
"""
return self._model.batch_predict(images)
@property
def preprocessor(self):
"""Get InsightFaceRecognitionPreprocessor object of the loaded model
:return InsightFaceRecognitionPreprocessor
"""
return self._model.preprocessor
@property
def postprocessor(self):
"""Get InsightFaceRecognitionPostprocessor object of the loaded model
:return InsightFaceRecognitionPostprocessor
"""
return self._model.postprocessor
class ArcFace(InsightFaceRecognitionBase):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a ArcFace model exported by PaddleClas.
:param model_file: (str)Path of model file, e.g ArcFace/model.pdmodel
:param params_file: (str)Path of parameters file, e.g ArcFace/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
super(InsightFaceRecognitionBase, self).__init__(runtime_option)
self._model = C.vision.faceid.ArcFace(
model_file, params_file, self._runtime_option, model_format)
assert self.initialized, "ArcFace model initialize failed."
class CosFace(InsightFaceRecognitionBase):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a CosFace model exported by PaddleClas.
:param model_file: (str)Path of model file, e.g CosFace/model.pdmodel
:param params_file: (str)Path of parameters file, e.g CosFace/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
super(InsightFaceRecognitionBase, self).__init__(runtime_option)
self._model = C.vision.faceid.CosFace(
model_file, params_file, self._runtime_option, model_format)
assert self.initialized, "CosFace model initialize failed."
class PartialFC(InsightFaceRecognitionBase):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a PartialFC model exported by PaddleClas.
:param model_file: (str)Path of model file, e.g PartialFC/model.pdmodel
:param params_file: (str)Path of parameters file, e.g PartialFC/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
super(InsightFaceRecognitionBase, self).__init__(runtime_option)
self._model = C.vision.faceid.PartialFC(
model_file, params_file, self._runtime_option, model_format)
assert self.initialized, "PartialFC model initialize failed."
class VPL(InsightFaceRecognitionBase):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a VPL model exported by PaddleClas.
:param model_file: (str)Path of model file, e.g VPL/model.pdmodel
:param params_file: (str)Path of parameters file, e.g VPL/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
super(InsightFaceRecognitionBase, self).__init__(runtime_option)
self._model = C.vision.faceid.VPL(model_file, params_file,
self._runtime_option, model_format)
assert self.initialized, "VPL model initialize failed."

View File

@@ -1,126 +0,0 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from .... import FastDeployModel, ModelFormat
from .... import c_lib_wrap as C
class InsightFaceRecognitionModel(FastDeployModel):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a InsightFace model exported by InsigtFace.
:param model_file: (str)Path of model file, e.g ./arcface.onnx
:param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
# 调用基函数进行backend_option的初始化
# 初始化后的option保存在self._runtime_option
super(InsightFaceRecognitionModel, self).__init__(runtime_option)
self._model = C.vision.faceid.InsightFaceRecognitionModel(
model_file, params_file, self._runtime_option, model_format)
# 通过self.initialized判断整个模型的初始化是否成功
assert self.initialized, "InsightFaceRecognitionModel initialize failed."
def predict(self, input_image):
""" Predict the face recognition result for an input image
:param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:return: FaceRecognitionResult
"""
return self._model.predict(input_image)
# 一些跟InsightFaceRecognitionModel模型有关的属性封装
# 多数是预处理相关可通过修改如model.size = [112, 112]改变预处理时resize的大小前提是模型支持
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112)
"""
return self._model.size
@property
def alpha(self):
"""
Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f]
"""
return self._model.alpha
@property
def beta(self):
"""
Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
"""
return self._model.beta
@property
def swap_rb(self):
"""
Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True.
"""
return self._model.swap_rb
@property
def l2_normalize(self):
"""
Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False;
"""
return self._model.l2_normalize
@size.setter
def size(self, wh):
assert isinstance(wh, (list, tuple)),\
"The value to set `size` must be type of tuple or list."
assert len(wh) == 2,\
"The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
len(wh))
self._model.size = wh
@alpha.setter
def alpha(self, value):
assert isinstance(value, (list, tuple)),\
"The value to set `alpha` must be type of tuple or list."
assert len(value) == 3,\
"The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
len(value))
self._model.alpha = value
@beta.setter
def beta(self, value):
assert isinstance(value, (list, tuple)),\
"The value to set `beta` must be type of tuple or list."
assert len(value) == 3,\
"The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
len(value))
self._model.beta = value
@swap_rb.setter
def swap_rb(self, value):
assert isinstance(
value, bool), "The value to set `swap_rb` must be type of bool."
self._model.swap_rb = value
@l2_normalize.setter
def l2_normalize(self, value):
assert isinstance(
value,
bool), "The value to set `l2_normalize` must be type of bool."
self._model.l2_normalize = value

View File

@@ -1,126 +0,0 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from .... import FastDeployModel, ModelFormat
from .... import c_lib_wrap as C
class PartialFC(FastDeployModel):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a PartialFC model exported by InsigtFace.
:param model_file: (str)Path of model file, e.g ./partial_fc.onnx
:param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
# 调用基函数进行backend_option的初始化
# 初始化后的option保存在self._runtime_option
super(PartialFC, self).__init__(runtime_option)
self._model = C.vision.faceid.PartialFC(
model_file, params_file, self._runtime_option, model_format)
# 通过self.initialized判断整个模型的初始化是否成功
assert self.initialized, "PartialFC initialize failed."
def predict(self, input_image):
""" Predict the face recognition result for an input image
:param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:return: FaceRecognitionResult
"""
return self._model.predict(input_image)
# 一些跟模型有关的属性封装
# 多数是预处理相关可通过修改如model.size = [112, 112]改变预处理时resize的大小前提是模型支持
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112)
"""
return self._model.size
@property
def alpha(self):
"""
Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f]
"""
return self._model.alpha
@property
def beta(self):
"""
Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
"""
return self._model.beta
@property
def swap_rb(self):
"""
Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True.
"""
return self._model.swap_rb
@property
def l2_normalize(self):
"""
Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False;
"""
return self._model.l2_normalize
@size.setter
def size(self, wh):
assert isinstance(wh, (list, tuple)),\
"The value to set `size` must be type of tuple or list."
assert len(wh) == 2,\
"The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
len(wh))
self._model.size = wh
@alpha.setter
def alpha(self, value):
assert isinstance(value, (list, tuple)),\
"The value to set `alpha` must be type of tuple or list."
assert len(value) == 3,\
"The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
len(value))
self._model.alpha = value
@beta.setter
def beta(self, value):
assert isinstance(value, (list, tuple)),\
"The value to set `beta` must be type of tuple or list."
assert len(value) == 3,\
"The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
len(value))
self._model.beta = value
@swap_rb.setter
def swap_rb(self, value):
assert isinstance(
value, bool), "The value to set `swap_rb` must be type of bool."
self._model.swap_rb = value
@l2_normalize.setter
def l2_normalize(self, value):
assert isinstance(
value,
bool), "The value to set `l2_normalize` must be type of bool."
self._model.l2_normalize = value

View File

@@ -1,126 +0,0 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from .... import FastDeployModel, ModelFormat
from .... import c_lib_wrap as C
class VPL(FastDeployModel):
def __init__(self,
model_file,
params_file="",
runtime_option=None,
model_format=ModelFormat.ONNX):
"""Load a VPL model exported by InsigtFace.
:param model_file: (str)Path of model file, e.g ./vpl.onnx
:param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
# 调用基函数进行backend_option的初始化
# 初始化后的option保存在self._runtime_option
super(VPL, self).__init__(runtime_option)
self._model = C.vision.faceid.VPL(model_file, params_file,
self._runtime_option, model_format)
# 通过self.initialized判断整个模型的初始化是否成功
assert self.initialized, "VPL initialize failed."
def predict(self, input_image):
""" Predict the face recognition result for an input image
:param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:return: FaceRecognitionResult
"""
return self._model.predict(input_image)
# 一些跟模型有关的属性封装
# 多数是预处理相关可通过修改如model.size = [112, 112]改变预处理时resize的大小前提是模型支持
@property
def size(self):
"""
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112)
"""
return self._model.size
@property
def alpha(self):
"""
Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f]
"""
return self._model.alpha
@property
def beta(self):
"""
Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
"""
return self._model.beta
@property
def swap_rb(self):
"""
Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True.
"""
return self._model.swap_rb
@property
def l2_normalize(self):
"""
Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False;
"""
return self._model.l2_normalize
@size.setter
def size(self, wh):
assert isinstance(wh, (list, tuple)),\
"The value to set `size` must be type of tuple or list."
assert len(wh) == 2,\
"The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
len(wh))
self._model.size = wh
@alpha.setter
def alpha(self, value):
assert isinstance(value, (list, tuple)),\
"The value to set `alpha` must be type of tuple or list."
assert len(value) == 3,\
"The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
len(value))
self._model.alpha = value
@beta.setter
def beta(self, value):
assert isinstance(value, (list, tuple)),\
"The value to set `beta` must be type of tuple or list."
assert len(value) == 3,\
"The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
len(value))
self._model.beta = value
@swap_rb.setter
def swap_rb(self, value):
assert isinstance(
value, bool), "The value to set `swap_rb` must be type of bool."
self._model.swap_rb = value
@l2_normalize.setter
def l2_normalize(self, value):
assert isinstance(
value,
bool), "The value to set `l2_normalize` must be type of bool."
self._model.l2_normalize = value

View File

@@ -46,6 +46,81 @@ def classify_to_json(result):
return json.dumps(r_json)
def keypoint_to_json(result):
r_json = {
"keypoints": result.keypoints,
"scores": result.scores,
"num_joints": result.num_joints,
}
return json.dumps(r_json)
def ocr_to_json(result):
r_json = {
"boxes": result.boxes,
"text": result.text,
"rec_scores": result.rec_scores,
"cls_scores": result.cls_scores,
"cls_labels": result.cls_labels,
}
return json.dumps(r_json)
def mot_to_json(result):
r_json = {
"boxes": result.boxes,
"ids": result.ids,
"scores": result.scores,
"class_ids": result.class_ids,
}
return json.dumps(r_json)
def face_detection_to_json(result):
r_json = {
"boxes": result.boxes,
"landmarks": result.landmarks,
"scores": result.scores,
"landmarks_per_face": result.landmarks_per_face,
}
return json.dumps(r_json)
def face_alignment_to_json(result):
r_json = {"landmarks": result.landmarks, }
return json.dumps(r_json)
def face_recognition_to_json(result):
r_json = {"embedding": result.embedding, }
return json.dumps(r_json)
def segmentation_to_json(result):
r_json = {
"label_map": result.label_map,
"score_map": result.score_map,
"shape": result.shape,
"contain_score_map": result.contain_score_map,
}
return json.dumps(r_json)
def matting_to_json(result):
r_json = {
"alpha": result.alpha,
"foreground": result.foreground,
"shape": result.shape,
"contain_foreground": result.contain_foreground,
}
return json.dumps(r_json)
def head_pose_to_json(result):
r_json = {"euler_angles": result.euler_angles, }
return json.dumps(r_json)
def fd_result_to_json(result):
if isinstance(result, list):
r_list = []
@@ -58,7 +133,124 @@ def fd_result_to_json(result):
return mask_to_json(result)
elif isinstance(result, C.vision.ClassifyResult):
return classify_to_json(result)
elif isinstance(result, C.vision.KeyPointDetectionResult):
return keypoint_to_json(result)
elif isinstance(result, C.vision.OCRResult):
return ocr_to_json(result)
elif isinstance(result, C.vision.MOTResult):
return mot_to_json(result)
elif isinstance(result, C.vision.FaceDetectionResult):
return face_detection_to_json(result)
elif isinstance(result, C.vision.FaceAlignmentResult):
return face_alignment_to_json(result)
elif isinstance(result, C.vision.FaceRecognitionResult):
return face_recognition_to_json(result)
elif isinstance(result, C.vision.SegmentationResult):
return segmentation_to_json(result)
elif isinstance(result, C.vision.MattingResult):
return matting_to_json(result)
elif isinstance(result, C.vision.HeadPoseResult):
return head_pose_to_json(result)
else:
assert False, "{} Conversion to JSON format is not supported".format(
type(result))
return {}
def json_to_mask(result):
mask = C.vision.Mask()
mask.data = result['data']
mask.shape = result['shape']
return mask
def json_to_detection(result):
masks = []
for mask in result['masks']:
masks.append(json_to_mask(json.loads(mask)))
det_result = C.vision.DetectionResult()
det_result.boxes = result['boxes']
det_result.scores = result['scores']
det_result.label_ids = result['label_ids']
det_result.masks = masks
det_result.contain_masks = result['contain_masks']
return det_result
def json_to_classify(result):
cls_result = C.vision.ClassifyResult()
cls_result.label_ids = result['label_ids']
cls_result.scores = result['scores']
return cls_result
def json_to_keypoint(result):
kp_result = C.vision.KeyPointDetectionResult()
kp_result.keypoints = result['keypoints']
kp_result.scores = result['scores']
kp_result.num_joints = result['num_joints']
return kp_result
def json_to_ocr(result):
ocr_result = C.vision.OCRResult()
ocr_result.boxes = result['boxes']
ocr_result.text = result['text']
ocr_result.rec_scores = result['rec_scores']
ocr_result.cls_scores = result['cls_scores']
ocr_result.cls_labels = result['cls_labels']
return ocr_result
def json_to_mot(result):
mot_result = C.vision.MOTResult()
mot_result.boxes = result['boxes']
mot_result.ids = result['ids']
mot_result.scores = result['scores']
mot_result.class_ids = result['class_ids']
return mot_result
def json_to_face_detection(result):
face_result = C.vision.FaceDetectionResult()
face_result.boxes = result['boxes']
face_result.landmarks = result['landmarks']
face_result.scores = result['scores']
face_result.landmarks_per_face = result['landmarks_per_face']
return face_result
def json_to_face_alignment(result):
face_result = C.vision.FaceAlignmentResult()
face_result.landmarks = result['landmarks']
return face_result
def json_to_face_recognition(result):
face_result = C.vision.FaceRecognitionResult()
face_result.embedding = result['embedding']
return face_result
def json_to_segmentation(result):
seg_result = C.vision.SegmentationResult()
seg_result.label_map = result['label_map']
seg_result.score_map = result['score_map']
seg_result.shape = result['shape']
seg_result.contain_score_map = result['contain_score_map']
return seg_result
def json_to_matting(result):
matting_result = C.vision.MattingResult()
matting_result.alpha = result['alpha']
matting_result.foreground = result['foreground']
matting_result.shape = result['shape']
matting_result.contain_foreground = result['contain_foreground']
return matting_result
def json_to_head_pose(result):
hp_result = C.vision.HeadPoseResult()
hp_result.euler_angles = result['euler_angles']
return hp_result

View File

@@ -3,5 +3,6 @@ requests
tqdm
numpy
opencv-python
fastdeploy-tools==0.0.1
fastdeploy-tools>=0.0.1
pyyaml
fastapi

View File

@@ -72,7 +72,7 @@ setup_configs["ENABLE_FLYCV"] = os.getenv("ENABLE_FLYCV", "OFF")
setup_configs["ENABLE_TEXT"] = os.getenv("ENABLE_TEXT", "OFF")
setup_configs["WITH_GPU"] = os.getenv("WITH_GPU", "OFF")
setup_configs["WITH_IPU"] = os.getenv("WITH_IPU", "OFF")
setup_configs["WITH_XPU"] = os.getenv("WITH_XPU", "OFF")
setup_configs["WITH_KUNLUNXIN"] = os.getenv("WITH_KUNLUNXIN", "OFF")
setup_configs["BUILD_ON_JETSON"] = os.getenv("BUILD_ON_JETSON", "OFF")
setup_configs["TRT_DIRECTORY"] = os.getenv("TRT_DIRECTORY", "UNDEFINED")
setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY",