mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-09-26 19:41:29 +08:00
Nvidia Jetson ffmpeg + TensorRT support (#6458)
* Non-Jetson changes Required for later commits: - Allow base image to be overridden (and don't assume its WORKDIR) - Ensure python3.9 - Map hwaccel decode presets as strings instead of lists Not required: - Fix existing documentation - Simplify hwaccel scale logic * Prepare for multi-arch tensorrt build * Add tensorrt images for Jetson boards * Add Jetson ffmpeg hwaccel * Update docs * Add CODEOWNERS * CI * Change default model from yolov7-tiny-416 to yolov7-320 In my experience the tiny models perform markedly worse without being much faster * fixup! Update docs
This commit is contained in:
28
docker/tensorrt/detector/build_python_tensorrt.sh
Executable file
28
docker/tensorrt/detector/build_python_tensorrt.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
mkdir -p /trt-wheels
|
||||
|
||||
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
|
||||
# NVIDIA supplies python-tensorrt for python3.8, but frigate uses python3.9,
|
||||
# so we must build python-tensorrt ourselves.
|
||||
|
||||
# Get python-tensorrt source
|
||||
mkdir /workspace
|
||||
cd /workspace
|
||||
git clone -b ${TENSORRT_VER} https://github.com/NVIDIA/TensorRT.git --depth=1
|
||||
|
||||
# Collect dependencies
|
||||
EXT_PATH=/workspace/external && mkdir -p $EXT_PATH
|
||||
pip3 install pybind11 && ln -s /usr/local/lib/python3.9/dist-packages/pybind11 $EXT_PATH/pybind11
|
||||
ln -s /usr/include/python3.9 $EXT_PATH/python3.9
|
||||
ln -s /usr/include/aarch64-linux-gnu/NvOnnxParser.h /workspace/TensorRT/parsers/onnx/
|
||||
|
||||
# Build wheel
|
||||
cd /workspace/TensorRT/python
|
||||
EXT_PATH=$EXT_PATH PYTHON_MAJOR_VERSION=3 PYTHON_MINOR_VERSION=9 TARGET_ARCHITECTURE=aarch64 /bin/bash ./build.sh
|
||||
mv build/dist/*.whl /trt-wheels/
|
||||
|
||||
fi
|
@@ -2,27 +2,35 @@
|
||||
# shellcheck shell=bash
|
||||
# Generate models for the TensorRT detector
|
||||
|
||||
# One or more comma-separated models may be specified via the YOLO_MODELS env.
|
||||
# Append "-dla" to the model name to generate a DLA model with GPU fallback;
|
||||
# otherwise a GPU-only model will be generated.
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
|
||||
TRT_VER=${TRT_VER:-$(cat /etc/TENSORRT_VER)}
|
||||
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
|
||||
|
||||
# Create output folder
|
||||
mkdir -p ${OUTPUT_FOLDER}
|
||||
|
||||
FIRST_MODEL=true
|
||||
MODEL_DOWNLOAD=""
|
||||
MODEL_CONVERT=""
|
||||
|
||||
for model in ${YOLO_MODELS//,/ }
|
||||
do
|
||||
# Remove old link in case path/version changed
|
||||
rm -f ${MODEL_CACHE_DIR}/${model}.trt
|
||||
|
||||
|
||||
if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then
|
||||
if [[ ${FIRST_MODEL} = true ]]; then
|
||||
MODEL_DOWNLOAD="${model%-dla}";
|
||||
MODEL_CONVERT="${model}"
|
||||
FIRST_MODEL=false;
|
||||
else
|
||||
MODEL_DOWNLOAD+=",${model%-dla}";
|
||||
MODEL_CONVERT+=",${model}";
|
||||
fi
|
||||
else
|
||||
@@ -35,19 +43,49 @@ if [[ -z ${MODEL_CONVERT} ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# On Jetpack 4.6, the nvidia container runtime will mount several host nvidia libraries into the
|
||||
# container which should not be present in the image - if they are, TRT model generation will
|
||||
# fail or produce invalid models. Thus we must request the user to install them on the host in
|
||||
# order to run libyolo here.
|
||||
# On Jetpack 5.0, these libraries are not mounted by the runtime and are supplied by the image.
|
||||
if [[ "$(arch)" == "aarch64" ]]; then
|
||||
if [[ ! -e /usr/lib/aarch64-linux-gnu/tegra ]]; then
|
||||
echo "ERROR: Container must be launched with nvidia runtime"
|
||||
exit 1
|
||||
elif [[ ! -e /usr/lib/aarch64-linux-gnu/libnvinfer.so.8 ||
|
||||
! -e /usr/lib/aarch64-linux-gnu/libnvinfer_plugin.so.8 ||
|
||||
! -e /usr/lib/aarch64-linux-gnu/libnvparsers.so.8 ||
|
||||
! -e /usr/lib/aarch64-linux-gnu/libnvonnxparser.so.8 ]]; then
|
||||
echo "ERROR: Please run the following on the HOST:"
|
||||
echo " sudo apt install libnvinfer8 libnvinfer-plugin8 libnvparsers8 libnvonnxparsers8 nvidia-container"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Generating the following TRT Models: ${MODEL_CONVERT}"
|
||||
|
||||
# Build trt engine
|
||||
cd /usr/local/src/tensorrt_demos/yolo
|
||||
|
||||
# Download yolo weights
|
||||
./download_yolo.sh $MODEL_CONVERT > /dev/null
|
||||
echo "Downloading yolo weights"
|
||||
./download_yolo.sh $MODEL_DOWNLOAD 2> /dev/null
|
||||
|
||||
for model in ${MODEL_CONVERT//,/ }
|
||||
do
|
||||
echo "Converting ${model} model"
|
||||
python3 yolo_to_onnx.py -m ${model} > /dev/null
|
||||
python3 onnx_to_tensorrt.py -m ${model} > /dev/null
|
||||
cp ${model}.trt ${OUTPUT_FOLDER}/${model}.trt
|
||||
python3 yolo_to_onnx.py -m ${model%-dla} > /dev/null
|
||||
|
||||
echo -e "\nGenerating ${model}.trt. This may take a few minutes.\n"; start=$(date +%s)
|
||||
if [[ $model == *-dla ]]; then
|
||||
cmd="python3 onnx_to_tensorrt.py -m ${model%-dla} --dla_core 0"
|
||||
else
|
||||
cmd="python3 onnx_to_tensorrt.py -m ${model}"
|
||||
fi
|
||||
$cmd > /tmp/onnx_to_tensorrt.log || { cat /tmp/onnx_to_tensorrt.log && continue; }
|
||||
|
||||
mv ${model%-dla}.trt ${OUTPUT_FOLDER}/${model}.trt;
|
||||
ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
|
||||
echo "Generated ${model}.trt in $(($(date +%s)-start)) seconds"
|
||||
done
|
||||
|
||||
echo "Available tensorrt models:"
|
||||
cd ${OUTPUT_FOLDER} && ls *.trt;
|
||||
|
@@ -8,7 +8,10 @@ SCRIPT_DIR="/usr/local/src/tensorrt_demos"
|
||||
git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download
|
||||
|
||||
# Build libyolo
|
||||
cd ./tensorrt_demos/plugins && make all
|
||||
if [ ! -e /usr/local/cuda ]; then
|
||||
ln -s /usr/local/cuda-* /usr/local/cuda
|
||||
fi
|
||||
cd ./tensorrt_demos/plugins && make all -j$(nproc)
|
||||
cp libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
|
||||
# Store yolo scripts for later conversion
|
||||
|
Reference in New Issue
Block a user