From 1a0555897ea47ad3b961aa765296a1d67cb19b7a Mon Sep 17 00:00:00 2001 From: Bronyale Date: Fri, 12 Aug 2022 13:48:48 +0800 Subject: [PATCH] PointPillars --- PointPillars/README.md | 172 +++++++++ PointPillars/eval.sh | 20 ++ PointPillars/image/operations.jpg | Bin 0 -> 36426 bytes PointPillars/infer.sh | 33 ++ PointPillars/pipeline/pfe.pipeline | 77 +++++ PointPillars/pipeline/rpn.pipeline | 28 ++ PointPillars/requirments/requiements.txt | 7 + PointPillars/src/eval.py | 38 ++ PointPillars/src/get_preds.py | 319 +++++++++++++++++ PointPillars/src/infer.py | 422 +++++++++++++++++++++++ PointPillars/src/point_to_pillars.py | 197 +++++++++++ PointPillars/view.sh | 21 ++ 12 files changed, 1334 insertions(+) create mode 100644 PointPillars/README.md create mode 100644 PointPillars/eval.sh create mode 100644 PointPillars/image/operations.jpg create mode 100644 PointPillars/infer.sh create mode 100644 PointPillars/pipeline/pfe.pipeline create mode 100644 PointPillars/pipeline/rpn.pipeline create mode 100644 PointPillars/requirments/requiements.txt create mode 100644 PointPillars/src/eval.py create mode 100644 PointPillars/src/get_preds.py create mode 100644 PointPillars/src/infer.py create mode 100644 PointPillars/src/point_to_pillars.py create mode 100644 PointPillars/view.sh diff --git a/PointPillars/README.md b/PointPillars/README.md new file mode 100644 index 0000000..1218d4f --- /dev/null +++ b/PointPillars/README.md @@ -0,0 +1,172 @@ +# 基于PointPillars的3D点云目标检测模型 + +## 1 介绍 + + 基于PointPillars的3D点云目标检测模型在晟腾芯片上进行目标检测,可对3D点云数据进行目标检测,并把检测结果输出。 + + 项目主要流程为:首先获取KITTI数据集中四维点数据,之后将其转化为pillars数据,送入PFE模型进行推理,获取pillar feature数据,并经由Pillar Scatter转化为二维图像信息,然后将其送入RPN网络进行推理,获取预测候选框信息数据,最后结合anchor进行解码、NMS等操作获取最终预测框信息,并结合第三方库Mayavi进行3-D点云目标检测结果的可视化。 + +### 1.1 支持的产品 + +昇腾310(推理),昇腾200dk + +### 1.2 支持的版本 + +本样例配套的CANN版本为[5.0.5](https://www.hiascend.com/software/cann/commercial)。支持的SDK版本为[2.0.4](https://www.hiascend.com/software/Mindx-sdk)。 + +### 1.3 软件方案介绍 + +表1.1 系统方案各子系统功能描述: + +| 序号 | 子系统 | 功能描述 | +| ---- | -------------- | :----------------------------------------------- | +| 1 | 数据输入 | 获取bin数据,从中提取4-D Point数据 | +| 2 | pillar数据提取 | 用于将4-D Point数据转化为8-D Pillar数据 | +| 3 | PFE模型推理 | 对Pillar数据进行推理,获取64-D特征数据 | +| 4 | Scatter处理 | 处理PFE推理结果,将其转化为64通道的伪图像 | +| 5 | RPN模型推理 | 对64通道伪图像数据进行推理,获取检测结果数据信息 | +| 6 | 候选框处理 | 从候选框中获取最终预测框数据 | + +### 1.4 代码目录结构与说明 + +本工程名称为PointPillars,工程目录如下图所示: + +``` +PointPillars/ +├── image +│   └── operations.jpg +├── README.md +├── pipeline +│ ├── pfe.pipeline +│ └── rpn.pipeline +├── requirments +│   └── requiements.txt +├── src +│ ├── eval.py +│ ├── get_preds.py +│ ├── infer.py +│ └── point_to_pillars.py +├── eval.sh +├── infer.sh +└── view.sh +``` + +### 1.5 技术实现流程图 + +![](./image/operations.jpg) + +## 2 环境依赖 + +| 软件名称 | 版本 | +| ------------------- | ------------- | +| MindX SDK | 2.0.4 | +| ubuntu | 18.04.5 LTS | +| Ascend-CANN-toolkit | 5.0.5alpha001 | +| python | 3.7.10 | + +环境搭建可参考[200dk开发板环境搭建](https://gitee.com/ascend/docs-openmind/blob/master/guide/mindx/ascend_community_projects/tutorials/200dk%E5%BC%80%E5%8F%91%E6%9D%BF%E7%8E%AF%E5%A2%83%E6%90%AD%E5%BB%BA.md) + +在编译运行项目前,需要设置环境变量: +1、根据自己安装的ascend-toolkit下的set_env.sh设置环境变量。 + +``` +. ${SDK-path}/set_env.sh +. ${ascend-toolkit-path}/set_env.sh +``` + +2、通过命令把全局日志级别设置为error级别 + +``` +export ASCEND_GLOBAL_LOG_LEVEL=3 +``` + +环境变量介绍 + +``` +SDK-path: SDK mxVision 安装路径 +ascend-toolkit-path: CANN 安装路径 +``` + +## 3 软件依赖 + +推理中涉及到第三方软件依赖详见PointPillars/requirments/requirments.txt。 + +其中,PyQt5、traits以及VTK为mayavi安装所需依赖,在安装时,可从[镜像网站](https://www.lfd.uci.edu/~gohlke/pythonlibs/)预先下载各whl包,之后***依次***安装,完成后可成功安装mayavi。 + +注:安装软件及依赖后,本项目可完全在Atlas 200DK上运行,若使用者不便在开发板上安装,则可在本地完成配置后,将数据预处理、精度验证以及结果可视化的部分在本地进行。 + +## 4 模型准备 + +本项目中适用的模型是[Pointillars](https://arxiv.org/abs/1812.05784)模型,该模型源码可从[gitee仓](https://gitee.com/shy718/nutonomy_pointpillars?_from=gitee_search)中下载。 +下载后,按照[流程](https://gitee.com/shy718/nutonomy_pointpillars/blob/master/README.md)下载配置KITTI数据集,搭建训练环境进行模型的训练,并将tckpt模型转化为onnx模型,并将onnx模型存储在 `PointPillars/models/model_onnx/`目录下。 +之后使用ATC工具,将onnx模型转化为om模型,转化指令如下: + +``` +atc --input_shape="pillar_x:1,1,12000,100;pillar_y:1,1,12000,100;pillar_z:1,1,12000,100;pillar_i:1,1,12000,100;num_points_per_pillar:1,12000;x_sub_shaped:1,1,12000,100;y_sub_shaped:1,1,12000,100;mask:1,1,12000,100" --input_fp16_nodes="pillar_x;pillar_y;pillar_z;pillar_i;num_points_per_pillar;x_sub_shaped;y_sub_shaped;mask" --check_report=/home/bronyale/modelzoo/pfe/Ascend310/network_analysis.report --input_format=NCHW --output="/home/bronyale/modelzoo/pfe/Ascend310/pfe" --soc_version=Ascend310 --framework=5 --model="PointPillars/model/model_onnx/pfe.onnx" +``` + +``` +atc --input_shape="input.1:1,64,496,432" --input_fp16_nodes="input.1" --check_report=/home/bronyale/modelzoo/rpn/Ascend310/network_analysis.report --input_format=NCHW --output="/home/bronyale/modelzoo/rpn/Ascend310/rpn" --soc_version=Ascend310 --framework=5 --model="PointPillars/model/model_onnx/rpn.onnx +``` + +注:该操作步骤适用于手动训练模型并进行模型转换,除此之外,可通过接下来 `编译与运行`中 `步骤3`中的操作,直接下载训练后的onnx模型与转换后的om模型。 + +## 5 编译与运行 + +**步骤1** 按照第2小结**环境依赖**中的步骤设置环境变量。 + +**步骤2** 按照第4小节 **模型获取** 中的步骤获取模型文件,把模型放置在 `./models/model_om/` 目录下。 + +**步骤3** 执行模型推理。 +首先将推理所用的输入数据从[数据下载地址](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/ascend_community_projects/pointpillar/data.zip)中下载,将推理所需模型从[模型下载地址](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/ascend_community_projects/pointpillar/models.zip)中下载,将精度验证所需的标杆推理结果从[标杆下载地址](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/ascend_community_projects/pointpillar/benchmark.rar)中下载,并将下载的目录置于PointPillars目录中,完成后目录结构如下所示: + +``` +PointPillars/ +├── benchmark +│ └── test +├── data +│ └── test +├── image +├── models +│ ├── model_om +│ └── model_onnx +├── README.md +├── pipeline +├── requirments +├── src +├── eval.sh +├── infer.sh +└── view.sh +``` + +之后在 `PointPillars` 目录下执行命令: + +``` +bash infer.sh +``` + +之后候选框信息将保存在 `PointPillars/data/test/`中。 + +**步骤4**处理候选框信息及可视化。在 `PointPillars`目录下执行: + +``` +bash view.sh +``` + +之后检测结果将显示到控制台,检测结果的可视化将通过mayavi显示。 + +注:由于不同操作系统的文件路径表示方法不同,使用者需根据自身情况修改view.sh以及get_preds.py中的文件路径。 + +## 6 精度测试 + +进入 `PointPillars` 目录下执行命令: + +``` +bash eval.sh +``` + +执行后om模型相比于原模型推理结果的精度损失将显示到控制台。 + +## 7 适用场景 + +PointPillars网络模型用于3-D点云数据的目标检测,其在检测过程中衡了检测速度与精度,因此适用于自动驾驶领域,用于实时检测目前场景中需要进行避障的目标物体。 diff --git a/PointPillars/eval.sh b/PointPillars/eval.sh new file mode 100644 index 0000000..dfde828 --- /dev/null +++ b/PointPillars/eval.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +cd src/ +python eval.py evaluate --file_dir="../result/test/" --benchmark_dir="../benchmark/test/" + +exit 0 diff --git a/PointPillars/image/operations.jpg b/PointPillars/image/operations.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d55ca5123e153b4b4ed582a915640b5003adf2b7 GIT binary patch literal 36426 zcmeFZcUTnLwlCU}Gma?UwRPLh*= zKsT|0rg^Nh)xG!9eeZtnoOi$Tox2+5A2n-M)f}@%jWOm3zl+I>1;EvZ@=Ef6D_5=n zAeVoDiy43{022e_kN?Xb*5yAo9yT@>7B&GcE)E_M0TB@)0U;qV3HeoG5;77(!mHF* z$tWnPsHliYX=te_X~`+6DF3+070kP600ID4urFu($ArKCu3%iw5eF9!pMdakK>1Yw#uZFV3@pq)W_>x@^YT0Z ziwv9mI{!T!3iYSBHykJhe4|qFSnd}$Q)vvrSp}cH^us5hrlF;yzj=#|{Wgb?u!!g# zF>%=ka`Fm_N)I))v~_g#^bJhS%%59WT3I_fIlH(*+}!YA3;w)T$BuI`@Ukk5YJWy2)H!oJRrLw-*k_o)NL4FO*~ z%KK5N#m)FEf*NqDXD^2cs9A*;ZzBF6?LR2{_XzX*|3ukO!v0RzG=K>6%H_+$Bm>9* z&ef95D+4lKY@4WZWFNHwXXDXG;aj;EfNLigfZ5#b3&5xX`YT$iO&dKKbQXO9n8%6O ze(hb&dk(igq_o5aem}7Mc^alJ@1y?n=zq*$E0R)-(AY@QV`5NVLN>~O{Q}?;1}F(- z(hj;WsG9R*p|o#a51>@Pi~XBMHYhODGA!Z(W24eMdq%p`djWvb$7o#uBB2+6ozbHU zz$VKDz{#8A8OH_Sh3t8^_pZOfQJc({KVFG@unG#Hm0J9zPEn?sFnB8mnX?>n|H)9}{c+b&u$Na)4{=bjhi z9-ChP8i)fIG^6yo`D|`YQ@Qk7T}| z&mUF)Nclf;uxvq7`b{&;^H$x>tRG)|`dzMcTB&8y(Rj$7_AkGMU%rn#>y$mxl=JCIZDF+&+(GoVa23VR z+dmASKY!r86S1nH0Z0pG|6RlQ$tgc|gEr0&=K0yBbxN3flT!U(^35;2^kX)LKV@se z7yQ4=R0S|PP3Bl(FZVV;U5@g1?aI%!{0Bkm|6_gsq0bY0G&-0)gYRF^_Y2KGCsg=T z9;w)2{asi`r&nG8)PZN|Ct7cP*wcamziYXFmOl6X+#&6^sU%0KLHjSr`-NofAHq@R z@851#CB{$jMrrtykFe;!tE=pvGySDU^D9rr+paaGw`F!ksln0NEY~6xMz1O?YSrC? z=^;UKbz|jXVf6t?&-F9)rtjy8f5;;Qe#EIuTuAa_s6oWPaVEV2Ni9P0OHYBFa4TXp z#`o?U_P_3;{P=`{;|efKI855|p=Vv$Szo4RoV66{@GQbAtpSX5KybDdE~NPHisTEA zCajfvo98IKq}EMljaz=g)f9O*O#U7t!R)X!r>HNA0>NuEwrk!H*V`_7WMEt0SFk$2 za&;z4jj-x~@k-TE;c7sVVer`8d|0Sy@T+JKUL=jwKzjJnZL>I`C}zdi`c!)4uYB&Z zRAK=;8uUgQt{Xu`hnyPmH|o(5iL3M9dkZDJkQ zZL-yDpSv?wcdyXWL=xVEj4_IwGnqEhlbj$VdLT=8O{VJ|XTUl`r*y)(QcjPZ0WWCF zl(&eDyzgxug(^vO_CQh$CBS|{kY>X z+CYVjT`Ck^ddrNBC{K<)jz1#yewy>u%eti*pOhiv)h9`cNQb7w<;2as)io98v#62n z<^G*5qWaF8Q?A406uey`F>b`ETj7E7dy*JL-&R7Nk{Q%7xc+6vJ_{9!p{L zeG2yD;da7!nlF0&h8QbJ1O)2k}go$~1r0>vQrocwePQnX)m-+#UnyHqMm6eY#3hhpum` zQn_`K(Jvb&P~7Nedao;DYrRA-04yWy1+-Al#gH(|uS1XsrFdN;t&I&&vZ^oQ5nLkQ z(3IPzohA%$)JSU{Bs_%#Brdc#1Uv4M7AMrF&(J{KI;9Nw*U6P>`SY? zbXuLwQ4>NZ(+1wK)-fR;e*9q6pfBtxc=>Tj$lEAWz2fRU7F_!~trq1r@}YU7p(52E z42H7tl|7gdxz^(b!j;p)guNaNo;}I7efjiuYyk7ubZj3pE62EYU_6D_YW-|#@24%{ zt>{EEwZ7ba{IZ%c^pI+AqEqn@L~m>*u9}^V^p`{6WJym8y%X-W;(8i9_kiKko*eU0 z*R`E4wmZVg&F;`sV0-;(#vIQl5GBY^&W6j2rTjx>>y@r*zMi$mE=lHhF;~}Wmi<*z z1mo(v( z9Vz#mS<`}V6a7xd{H~AVr?$>_{;42G41Ir^W*Psmak)mop62tr#_`=|Nd9geJpR+M z+Q*vcOT88NJDPsiNA+_mV>RD3qg7$kkGsoJ|FrRMU-{Lv_*d2jggB^en{e?0(BU0I zW*slVvqL|5t^?tdR{@K*Pb<=KaXEFDS(X) z5P9WBeeGj-TtwcS6^*VAnX+{Bnlkl@=ZE0!9^Xz?&PcF-)#t-=k%_kHifHGsm&VVP z`+4qxCB2>+f6gaD1ax?wIv4}>Ll5H;E&$v82V!U#yA_l7VzeZvwE;M8+du?~Y0b^7 zY@LD?u8caXUERW<^%{y-lcJu!#mo3igd>9pAhJN+Z>F$J?9tkIRcM~f&_Ct)dDHaxrNl(B+8HN=L%C2~Te+oJWo;8^`5`+->#;1C zW7o+u-!Vh$$ZW>&SJq!;aCcdd{sqYRHt`kr)%x*7o)D|d5XwLa51O}1tHq$hAOMMf zZc#+R@YvjOqZ-nCPade7pH?yF4WC~dU|IItoG$amZliC{a3AX^#HTN>Z~OpWXL$K2 zm+ombem5Zbm9B0Uihb;W$y66nQM^P~_jDPrtlQRUm@9gk!J+Jv-lEQnQV}t>>chf8 zViXm8H|PLJVx@5o(~fAJP;t#tcC(+^{u1LO5>>6m4-kPAomOSPF4MaG4K`#0n{&Az zu(B?7u(f&&?pFp~2Q}9aH+I_&`wJpjn}3>Ph3&9%l)jWEZOPidMk2Y)1d^$->NF|Ci;n zJBJ%iW0m_T5&m#`E6yo{c?qH2%J?P7U~@_!nhqgfjj(t;E@Y^py6H_duO~SBfOQ5> zZ^n9kXQgY31#7{bU|&S9Tx~MD={UZ))4>5JlaZ`nAkZw-nk4a_6wc^lg|y()iPlS< zU&k?>ZspQwE}nUi5Ieuigz60ep`}>E6F4chWQP&!Z5iO9$b}zP=cK7Y_QUkq`slpHS8Meng9R*XS zBMwsdM;&7kEkOK@8r9gW=FFAJ4lgP$Xox@F7j=_dZnRmo4EOxqMrsZnCc;|-> zVHfUD<|>?eVlNWpJxMVfzZ!X=cf1#XvgOLNM_-I~c^h5M=OHnn%hToY&EhwZ31@A< zCpu%WLh6n+3m5ZfY%#pYAF9?zZq={_zp5^CpGVlBLazrV+2#$;`+2WHgO=~?*r6#Q zrfu{VekkrmAhA;JRCksT4ezZ28F!3gdmr*v*3}UAmEglSP>NJ!>rvSRgVBiXq29MT zDKMAFu_fzk2{Ge#Qlh)8ME;@cMZ_-r=)Okc>(9;MZBM`?AxE`-#eDWBVbDH>efEJS z0Rd91;O#;bFv9>@vfDOfU^~837(HME6CU{bc2(ETC6sx=VlFR>hgg{K5M{e|MnS~2 zle9eoZ;al7G`0-}O`x$6F15>NK$5T0Jn(M`FVyG1PKR2@=r@Duux6+=5?zSiHgy94 zHqqx<+=ntisOYfp7N^sQByRS`saLK!eI&C-$eYwK7Rye8;&zsULGBZepkQfCBtwV6 z_IPoDWtf|{)8j9qGj`-+E_rD?VP<%f+Ui}~x?XHhv5+O2*4jb*nb})mSo0QSy-Zck z0nZY|FkTU*(?)>RL(iVYN@}riy!6O7p2?ou-FiCO`cOS5ZqR)Fn73wq;L_xlX4*g# zS1In;mDk^&XpNjr)E5TgiR3?Js>?Lj5~z6m`owrWYVeT`Y4eE??CNF%4T5v(i?@}P z=nb0?86qS_&x6=VUy3jHacQ5-^L2W=pUSiY@$Bk6m*^7P=5E%GO5Q5r`UJ7Vi^ekw z1jzt6yjWywc}G zZ5fTk#BOpM%x7m!OlZ;s#LGzPgk*2kmwi4+__2YU{b3_46%LyH(3l$kk*p@ zF~9AGU};Ms_TsaonPVC&6523SR1*+-(auiM+uGr!gOam_a-LTU_3L(hLe$3}X?M3= z0Ic+LA+&ITjb1j#4u#?R-ET%(pN86ojk@!v>>7-{6cW!<<%NyuzD|7cl=<_H)?Bt| zV~!QRF)Eb5-Js&9euqyx4DGnz&d$3uYB=O6)zys(3c>8YvA5Xo z3ljNEE<}BK?nAL1=~?=%0qSi%wRXbLasIwzyWSyLP|4ZT_SsV!8ZyK88CToaszCe$ z_W8*f3vjWe2DQ)65uLB41!k?)mS%CJoK3()8}S-nhwF54X1#`9vJgy$<~}3jKdFcm zC)0%LB38$%5ehBhFH!g4lGC=y`3`wA-JIyn?$rA?l(Q%Tp7&Bc7D?KX9^3F#+Rxc{ zY6R^`?iTUr@|aX?pe@FQ5(5{i=~%AmCPVoVZ4KArwdf00PCCoOd$oQcz+C!c_aNL0+s@AR?$0B1L0VKPy?~`821LG_ytByC#fw`a{-e z>G%B}qOJAHZ84A=UCAQ3WaQ`wfL%|I3AK>Gzg36hh@IlQhHrM?Ne++C)Zx6C6G6^d=# z9#C-iqtxGJuv};0;+tH@P12E!t}o23l(hBuKzz^B+cwLf`#*LuWzf1mb}>mrF-L}B&m-j=9uMtJ#js{=*V;56YuvqJ7*!wteX-G4TtNt!USw;EY{IbkgHx6Mr4?cBYbjZ2?4 zuUg$1M(~NuQ7(clpq$&Jhs6}uz9gaYaJa?rb$V?$zHMb3WSM=2DSY1#Xa6%Z_DeJs z0{2x}3N2J6@j)qqJL8kp3dD0obgKAPb6@|h;}d*n$$J5Js!mE-j8(Pd-JOH|eN;Qw zvoKX<-o}aEt~2378}>u?gY~*fK}--_d&n|8BCD@i$mlyq-en{;P-Z+_cK*PPDUKG(k#HAZWT|4H8+ zhzUKBLN2KUpIFB6^<11aSXPZKd$rFZ->2a#Stn_+3U1SxXla~@c{BzMA!V5IHaAM* zJ&H|;_l>~hNh)!bH1p(~L$|jp6EmvZ>`SPR+ghDiaci8WG}RtT4)g3BIsLQyOGit5 z=fjd3$p_ho4V9xWG9P6e4dcpwKBUV3?q-A$XEQOcrzNohVn zpjOQ(aKQEfh_cnZx^(beYV{L&v5kI(Pu9>dH0fT6`I*_`K%?*l0PBdJa~+6OKr?y| zHEqo!jXTl88;zvSDWrx-uVs07W!N)BN2V=fJRVrMYPU#<&1WYgGVwKCCtbEUW8Ow% zb?W^}!BXYj>NzE6B278MUV0TCXw{UDMW+!7GW#T$@%j@+zx(UaI0D}4Cll-F)a63( zT|{8K1(>R^z9QT?3Jguis1zCRcObw|5B6qHl8t@Zy3oDPsG9~+)0=qVbqlIQR_;^BS@7D{UZyP}vG+&gZ({ZxtyKMzcnrmjYRo7%% zt5r$VtXq=FwHZQOtTJmZ0D|uCa++`7bQqj z6<4O{sJ+YS*u$PWk)`VQ2#9!NhBQ2rp0uIgQzAZ!O1*EFUh~YjDDI|l=*yS=yuC?> z)(P*tw5p9~(6z-QZH}US1@VVd%wgDVx{nJ!bw#($J1;-!@p`5Cq|R?6Q}21W&YKEF z7W;OF&&#Ptuy`+q&14xQ{{j%hP&#EXq+YbGrm*aWQ%#jeWgPsyWH(Mz3dagl#`#d_ zVd144&YQdpgn>-&h@w**~ zz8xy}%QejA%^5wi<@Hgb#cO_>+pPUHJ=49jos=aW_eio!1jo_e zNl<<-?7=QA#P(FL`NYPG(5aEF9mWO1GzGGxIu6n0QA&)Ez5X4sutH?o77Yxd?Hl{2g7>NlGY zns!P~^+k4@H>ppjkEC#dkyX4tPAM3Vc*)$tJ;g~w%0#^5CZ}Nz z*ml^%Ej<|@M?>+Qz^6kj=Pb}Bb&YZUtjU*F3tMKe4EFzD}pC-yO+4&Xo(m zn|boXl;BG_%(T$>t&mr?aTpBJXsj69^%{D{!y<3_ELt&;goaDY)fqRn5Ey{Ih6rsC zLiE+#fiu7N6e)^3-WKkk4Bqklh<{xLDG}n_#MPNR2GWQQ68i7~hMY_(Vw2V?f`MDs zM@MxB=gAIasHafI@bD;e1chPACxqO<$9^L9))2WsOpXjURe8vIxtnOatuzCO3~C&q zQI4Q_Eo^+NQ$<O92_+vFg!xqtcuogTxv;KVSXus0Xnc!WvDtc#N z<^(jDdNfA07tvfRB{H}S^utL^+nTQ)>S+cE#X1|YTp($$r6 zm+nw9IY!qF1PgnX8J0J~BGu)n`kb(TY_9EoM9FRJ*?z=K)#ZMqGX&rLC3^cy$G-^Y zUWv-N3`PyxB~PCjjuh-@Vy9NshKthC(P6D;$|xh`!wSC$(YwG*y~T_6nF@Opn_uE6 zYCyUf&w@`pmfH{9C)(BU34V|tCnJ7O4 zS~oBC-&${{F<}ZufDFnj4XX=XM09T5(HY?HKJK@f@I#4Q0AgN`UI4zSj$8l^uA3a1 zcv@R0MH@%W%BSZfdQM)hqHondMzD2nBV=1-;7Jg+$3vHnvt6eM2~*E}wHUK>8=MYL z5!2wYKDJ(FZYm#_xWW?6wNpP8X+)JpP7RWu)HmE?(T}>zW|(TrBHb< zs9gX|8UlX28?D%l3xJKOcM2S|;&84^ifRmz{zE6pTmbT7mS8CS3&7Up^NGAK0NQNr zASA^FU;>tAnSN%-fVu$qT_(Go#sN>fsiBt{2mdrE9Zh=yKtyDihba7Mj&IT;wqJnv(S#`fO+D{bl>UC@KaYq&nqB}-bEPkH zzM4|d^_~=R!0E~Pbrj7B?&zlrKrga%9sJKjfbh->z@Y|I`vTy5xhki(f@R8!8wZiR zd!)HfF97ifi*?-Xd+A1!VF<~j4!sAV0MC#kX0Eu6c z&)}+)$oOI{V1^r|OD5N~`URCgQ4RaN`&y=SJrKk;nap$}9 zB4vD6>awSG-X@m0yGnZ^na=pEWeA?e=!`*CoWRBcrzVNFl{Jr5SqD#B_`h;~o%xY` z^vkCOb>XDarPG`qceSXM4`o+~PWXJ8&`~64pLZJ8CX(PeddxL6(@7y2nplYqh?bvo zj8OIDXs5gauW$1rC`IV{zNjNDLdlMoeJ$bz+9gM`_881gCK@B;Mafn7&u7OrO!z(@ z)Z?}%bcPf-tKm+!XIg$ToU^P!zds5|w4E`VWp(JPem@oKIPen3<_k)B;W>gdT#{+& zN{;6TNvWHofg|0sNB%-M*Y|fAM0VIx2UW$#Mca9Oz37m+^j}O2iDgWDg38AW$>iEp zt2k>z6+im!Rd^PU*QXS=XoSs_mv2oBF)NH&(DrnjoH}i6_JLkWGmc7{!}s<;^hn*i zL*5l>{DpBbX5+&QIwn|6rB^$PFdaJFiX>!hl^1v027@cSu;FzE1@O*~`qNfhe17a# zXZo;0)wE3nzpRUG+RRG|TI6n=Vh1#o=G@B9>m)Di|7QPM$6}`$mr&1y2~PMGNLFXO z6qUoZv4r>S^VA8$Bb+&ryRJJDm0&go`8!40#wOV!gXV>mpfqt~8cRdx3FXV3s}7{{ z+wOv(C&Fflxqj*Ewy+w_MkTz1C-wey-<2TV)!Q2#G!E;8J2u(pIl>`K%FQ*@NzSwb zA&yIlOftqcZk;=YA^cHh*HCqNEA&I-4OccbAic&DDADCv!I_2auNER6m!$?xX0b8CWY zW4CQ3tYGn}p_Zdsvs4Twr^n}>s)EaE0ZycN$oxgAiJ*fs6$T^eTjix;PDpCDxc(9z zDnH-1gq30&DenG@5k4rXrIgSVT3+D~%p+RKQui4w`&aHij;gy>^%#P+U=|L6J%oEV zlVTwWb&NITK9A2oR_x>dGUy=O+7{%OW9~A^-`N|E&$RV%ND`O)^?ou)Qu`^x3{GV7bJHlkLqz5;Cw2Xx(cqpXAGEM%-~_&um0g9mRq%hT7y=hj?3! zcG)THRA;7pq%Rgr!1gtZ$io0kAO89A3K`2n!L~UrB+g-H|#ydrTSaS(6D>b?xg?1`U;jI?HBpO|-2#bUGvv&6Gp)B~ik z?J_t(HyO922HjB_H_f1TYp)Oe$f1OaK^f!AHO9(%C$1NsC5dR8tsx0oNyhq^pkv!? z9SEo`A}GNk(5~Ry&W`X*y-e%={M<~p^CHbS*VD=xvg!~I@>g=J#b3UJb!KsfAe9|5 zk!jw`a`%S%M#3%Em*(I7?FZYLWnyX}QCSfP<<{hD2)k4d@5K+2 zJOH*9NYUC&_DcPHt^O zW$fIjMov1*DME4-K+)i+kpd#xDV|mB?3|90MYoQa&8%8ohM*#i-1Ov#&pNJE=uxj{ zeRC;NL|sMHLfWB^z1S;{h9{YXF0~!rd4u$T89CKVBM^d_^t>cnBmQQda%RnO ziJPZ|{ADZtrQZKi@BatX`yOzbqvOm7d_~_fYI;-rFyi%gc~#ka*t3eb zH3sv@?oZY0*l+Dg1e2c7J0R2f4z0%k=fiETH<}|lj^R4qe4@0$>LXsl`!KY8(SHQgva^)w>ykuZM1z2bGR-y_oLQxDDvY#m6LA zxJpADhnr+CL8O7`Y_b*YG`UlWc!a)zppMBgoIZ~x%b0&nEqt?*N<#Vele-d%15#hL zuWjb`7#Ny+B^Z9TOB!RtD+G_vI#+je!Uq+QH1Ov=YiG?B(&D3BBFdLpeUWv&3?k2? zM6Yud``Opo3n;GDvpj(BzB-J+Q_F?OD1O$EGF5Z(TjFF@%u+BRH}>IwwES{~8E+xD z8CVJqurs$9b0#I<%Ish`QvlW2&b^=BgtUX9caWOdP|lHZPno;5H4&Y(3}ZG9IWqMga_@_B&Ud3(vQ^cijdJDZWx7u8A9p0cUZLa)yB!@CB!u9e zZ)8wwJ zqEi;cr?kMJ905R19p26!m(+kW~y0>QK23fDJko9Jx2UgTJZVfwZ^He$uIwHl+ox7C|i2pj?;m89`-%(pq7 z#+{a_0IH4eN{=5uNX2I?5tUG{1~9&C3cp9VC^I#%B?~ta7eY%S7zOek7BZ(e6;BiI z^aq5d-Z`mes21Bu7>`*PCuA z+qdB<%_8a$PThu)W!)Ii@In5}R&BMk4Oy&t)D#@2oz8I(YweSyId7|>a-yelOY&us zvKXT{gO$$sDNmx*Sbcj2I$(6n9EEs&~ zm+P3J3tA&>3UT~z=RO>vGEYgen04Q#W5aAO0C&*YgWgvdS`#u;T77dMTPE=uBSjj@ zD!6AHvNn``eTffRm}(VGV;UKgZHA<8LVzX+(M~Y-6f&airHHR5PX^bEg8-2jS%R5d zb2r~F?dUCl0%oY)oedNj+_iHK^K)4}g(@=M>OAHN1}pJm8JDI5D_AflCT6hS={SYB zoLh5gEoEtIm#kYPKC}JC#x#3b*aKd!`(k=M($8_}ng>Yl<5Fq!Up*>vh%V1WZiAf- zy^*XVsk~;X>|4%A@X?2(3xE%_AAvEv2F69%kEXN+nv0kBry^M9{dz*2uNXePA~sDb z#^|_h5|g#@y!WU|aRt8gHSRdU_@H3khK4X_P5tZ$v@m$A;zWGLH)T_TEP(if#uEk& z#&#}~h)T<5A1c}s+4c0WhA`+iGbXu!9*)81n_S?H?J6?u_Q=bfapCsCNx?Le-H~#4 zr340n1Ta=!+UTvAnj@@-;_zz<_eD);&UH#$_0YC8!Q;m z=AG8ZA>b>6r0pT|bPc&{?G728nYTV1*-L(kelL2GSl#`68SGW}?EUh55D1=RlYDj5 zyG*+M(m+LL2m^V$_a#xcjo*I}Io$F;D1toGGG~|`-yZMSYps^oso$N??1BFbwV8>>cU(Kh4Fo z?|<|WX>kD{sO&H>`EXvFvvnEA`b5L*m2oyJ1KBDAs>RE^DZ*``-+ZEhz)a6SP9$p*<&#v|Jh^{ zOEz?=9`nIxr(NQWVyjc_u!axjS6lAc(mr4`{fKrx7+O*@4S2=EY#ldNPlEzB?U`t9 zb3Rm=mXb)3Mv~|*+^P{Rdhf}r@-{Q~ESuco{3-1L)q(8~2kkFu(+v_- z!TF)N+cpxVv+44bEZr)Wm&G=^bbt6Nuk7=-Hew)Q9X1GonMa656idR*&X95&j}S$t zvILht9|^9$<>&V>calt53N4`;NY{@L1q~!;s@}%u`mln=WOhqI% zYlE4-K12n60M4{R-`HmP2NOaq9S>xR0ZO3?;V{< zVm_*{G(YbcxX0G>wN@0B4RC=yLv*$@5}@R56c8j)Ob18r_jsuFuO~WEc)2r}3 zjklHa2;;)j6e;T%5ae{ZDIq(+X@>X#>dvsxS^imK9NC+!r}!dsPp{t%qsrh?rSuGt&!UZI&HoSj5_DKXCE+X)BB5^!L%sZHiIuMd}`1S7xERJ%9!>>)CC3_2*R z{fcN{plz`hSW`kFrdkqo=p#`yc{;(h{qYWiBv9Su^sGEK3Lxg_ZS0_ zdkq?pqXZG8fs$;tBq7cPc#>xu8PWPT`Fo1!W<&Vz%tqWF^VZ88*e#7c+VD1+_^L3r zxyNajZhJa2()fPqw6e945+yuZC*zuGRVj@><{YLmubiSA(Uz@2^Rnj~wD%qJtd54X zX){RmaK>xs6x>GJK%oM5D6kUz3fDLIptJj}fM>@ptOeu7V;8O`+mfM2>j|6AYEwII z&T1r}d&^*QYc}Pv?Q)um32^@?PgF50M*uC95obH5-3de^O}FVy$z>~41y$n-N)xnD zFAW|L3tP}txXaXF*~RXLn{<$Q_^#&d%Jhh<#p&#+6L4%Lci^^^i!KOGle;=kSw{_( zS!xWZu^dupOI-6*-?=fq;-loXMk%r4YMgTI>|;0;mBKwEShEaC{X`pxQV@D`c>Y`R zt+4)<*%=l~bC(ari!T?Y(G-BF5#gPAO0TQ32-!Af>YBR7+-&>?Tj&*IgYeYJVh*6q3Yp98v*bHO`H8!m7!&NgOS*FCB)e8C`x8R+NCw}q>9PK?(Wp*-W!s@RiH`XcTEft1 zTq(|QWT*4OyMso0Yi0&SNBQ&9h7T`DLcp1Qp zZ>vy(xqrY6Xkh$O;`;9wy^@k5MBaEVq=bwhYh*|vF`dVi)D6Z_fr>iw55vNl*;}c) zpWzQ0XXCQ5!W*!V5k+5RuGYDwwZhj8M!Cuf1ICMl-Wbg8TuZjcEG3_LWYKG!-%Yz# zAESrlKQGLi19`VNFDvtwJsdty7}T30GJGSU?!uHO*z}E)nsh2#rF@*H+!AWDtXD3P zp_g4}Y=mzvcb3<`y5c)sZ>f{nGkQ2Y>pi$9S*@Pm53wpdB}M`lC0-$b!IHZ98*|r2 z4nt0`>l|#NAIxdz+#J4BSK_73Mt50o;e{q%V zf7i0E|Dq2H<2UFE|Izou0{^Ru{P$Jz|94c8i)X2(@G*WU#}Y5QO@3SHB_-w_y(gCC zkk+qY8C)%^n;gI3m&m;{s+ao}E5@>vFg_>mHy{wyPhTEV{R+^L*2iO#mh%e^ijaOl z@2K{#05u_e)$YN+fu0z8PQ$DIUorQ*pH-JxKag4;gRIrR7%0=TCcb-OPE7sOQT_+g z!avn+{4H7RS6CB*gg@snYyGwOzh|7k79WlLi}Sw&`urt!{)O22$0BQ@%k-Jw0xA7V z_5auSzt-z79{7s~{^EhZc;GJ{_((7v8pagbmyv%ZQtlp8=$?Gx(UklG={=3jzhSNY zsAhR22l{r>xPAocQa?`-MWK672|8bp6>DmP#Wy<@yZcf^Q`i~1Q<1gNW-x?z?5ex0vFQfh6 zuZ;NqRP9anDoXDkS1NpeEXN_G5rNG2E7QL}mdZVt2OIcbHQT_wX%8E#n>5k<>PZd; zu|J(ipRwT4pF7mpEYaFG{|@o?FVSrZbGn8m`+6RJqQlZ6n;c}dp0|B#2RNYO$^9K% z>G#F_@46=cf4#vRFn4xEMYOsb3l&`_RqhX$A3zPCo5*a>Nc4_}}TL`i|TAqZ~2kPsiDxjg>ArOZIm_Ucc@8!Ad`%Masq(e%w2kL*hpn zhLYmH+7153H}#{S^C#fsf51zs%l%7eM5Lc4tH2#*$X z?sQm9ZYY1V3=SPfQy{Z1L)-Z8ow}aZlq4Uk*Nnobr)(piJ|*b%>st&Q77SN#-vdvZ z%rB;(Zh0|PRp}k7k@puFh8c9KRmIula>wCc&5n-PE9SgP3phBuRSac;*BXsT^BUhz z(D7fg$=@ZJ*~A!Z=kKO;x$3q{$(Q!+y?=(fd-Q zym!ILLoHGhDI255DS<#*E|g(LJ9Djfs#Wqen_SztjzMid2WiGU$-83DXiWj3Nfxv> zN^YFzN-~IKR1n%Mkye!8OZum)GG3REjjEJz>~Pn;a>ju))r@Irpl*zBVWx^v+6NnL zJruQ8+;ZSEAu^?PD7)IZikW?nJ3ge&m2F$>=uHFgG6^4P&1uDT0jPZTlqq{g?>yV9 zk0M$jJ`|C(|KQU>eWBtESm?6W-^4%6J$Zn<0j-kgno@o%6`Vbo+YCpaZYikfm+9^5 zYF4V>?DF(=0lqUkW??)mI$oS_J6G;7VVfvp(rt-nrf(cts*H0^IAZXDZm$CPqs8z# zVXGi!NLZm0jCghm)Voy6t&FC?sS?Sh+%jKD^nY{JcG|r2zSu_a(e5jkW9e%Mns!B~ zXf7mpKt)-lSVhJFK}r5TT1$WIjQ~J(cq`Ni_;9*{`mI$p`^=!p)?oj;p|;W32Z@bw zA5!8|H|I!^g}~std&{(886sXxpLxR4wj7ND!re6ZMvCy7$Rej~Cp)CJFxx?8x`79c zG)M)NVU*Hh;vePJ7HJ^;yGHO@nCWx@gFWptsM||Qf^g5h~K0CO;NkzAvTD;S* zjs-Hbv32BBZmg*ZJ$Xg^?YS1KpOVD;PdEY`DxTNt5b{Bu4uvS%;**_{ z7I~eKrz^wr689OWO#YBI{k+rm3s5wv?}^ z3tCM$e8KA;5PiW$*WBSr>_(Ncjk$DgDm!u5&$~EQKldhm(3xmL0OD)!Xe z1>=O~f((U~Gy|OJ65{kQWDr+xqh;Av3w^mb3Cm`gYmgqDXr&eh1w&`l zFt?6fHRry}Y(^Ax@x+>OJ?469um2JcH+LzUIPldf>#~O-np8_t{)T<1K`hK0$wYS$ zd%R7yOw87Nst^oAGa4Fx6_1~KBucnMhMIwP8UszF})E!%??9`(0byJn4r z@lh~%{IXplf}%wvp(5QO%W^_jQ;AW;)+>x9ty=44Hny^9z`I4IqVQoN42+yWfxiL*A{oClT=5PEpebX!VSvW5MQUE&5 z2}u&gOig07xr+GdbKy^muX8Qu4PCY}2aOP&ox6l6sR=`+v8A!N6-kHunT?WO%J2m$ zIeWf7$Ip(*oTTdKY0p#i{5o5`h(^ozZ$(b`B|9M|m54YcBP&)Caj&Nh<`Usi^`M8O zIS|ko2(c+#;glX3p;TL$U~6#G&2CT6Kmc1p`k7XDbUNLNt{B1qz0trSmyb+qZS$gw zUT$u|32H6rwkKcS){G*GdGO9`CQyFQ*r;>}M3` znS!neyXV2ub2Gf)4KB0RW zP3mZ@&_M_ctcmF4CK!1HpG0_7vreoV4wtRE#97s)uHrs_5)wqDgu4oeme7giD?5jA z=;*|6*-j<#3mzHu&(3<0#8<;x_W>w(xdR4<^-TFXHkJo9HQLfm!n%%AB z60Sv!O(xe7g&gD3?{gF(Pz{%uy+Fh;(D|4L;ZDRl#Jl#+_U??<$43+ISW^{iCI;gelUMr#y-$K+SBZIWMok$U@EOvq2RZ>WRG* zH}~YIBB)|jIkGSfBJ-%s4`DK3E2~=-cfavjC9U2Ji;SR%mxj12lMT++@+f^+W$Uc$ zy;7^PM#{}jH35+PP}gfr^HIz}4tqD8I!(RL<(G&#$lA-~Hb}NgDT3`al$UZM;lsCr zxQZ;B&Y+&x0c@)Q7%-qmlCK_GeAw#}lQ6z@#u(EeaIHg1g=Ngl_}OZ)9gCRbS)W48 ztjJ;0L4F{0Ov%{B5GdU{uQD~MSXkJd+r3e!aq(QO^6$|rm?Tmn7b@htTb2UY96v7M zJJW9C-=r|E!X&H}sJ$wN=^t+MS-2Y-c4J2+;#ODhi%|1K#s!kKx;L_M&>GZOgaP?l zgy?Fn^>?$xXg9p!Cd6j3Aps1P%QxH@T6i0lDA3!ycoo_|@(C8(AmlYUcXMQyM~IZ< zYbo-%qHoyFp>K^lde*gCL#zTAfR*z zQlvycnzR54Ql$t8C{^jw0#ZVg(7V!!gc?A4NJ5Pe!d?4~t#|Kz&UyRXH^zHo+&AuD zDPtyUtuf}BbFSa~em~#2z)T`90agID#vScd=9x~J8Mi?9+>Q!PETSSNaY~!sJ3Xre zBR4Q@VE-|Q`xt~aCMwqt2ca3pcV{K_L`Puy(EC80gTg6=nnE{ezR7)lB#)d-*|Sl z8v)z&^VuV(1yWDvF-UM8(LcLh8?;1Z-_d3UyxD*J^SD3%d_rz^k!bWI9QhQ`$`k(f z0Lq`-5IqFgHeDI8)JntI4@nt)o&D1(D5J}j*V257k3lC82cto@3qgr^@Dh(j+c8LX zZXnwMmVx+li#DsEF&RLgd@5+S@Ym@pHH4E~TQZOS%HM;odmWk|6;00rXdk!vyxGpc zFPX=ny9eBzx;J`fw4>8<3{eZz*C@&SkOI6^W@l1rVqmKWoCYwRN@(n!V0M&9^vanJ zr4W>TSk*paY?o78xZ|{8-RF!)k(zR#N_q-yFpT-AY%!MSC52|#BMZZhbZ4=x$P@+I z#%HC4BYV}nlwywu^hg3F2;>T=9=;462E zd~D{8y5h(XZIE-7X)2QD>!vRa#jFu_US}>{?k)Kqdq?C&15EII46Gd%F(ZzjjZu(C zXUMmetG>4=QPm%7#Dtnk+eK%~nDoqO(Yy!A@geWwN}cmlyoXWWmc#fJcgK6DJ?k(4 zlmZEW2p)f?Yk?q`?qs9X<^v?nIaogA_ta%Gl@yiHhmqNtxFxnIEOTMH@RD7DjQH7? zfO2p3;EZFFK!O$|Xv6#M7rPsUcAD&8Cr)&m7ji8g*ee%}8hx(c#S}}dOHG%4Cv2G4 z6Ncu`#&H5D-c-7_$!R}r3geu*S_>W2R3KjV!%2PGGY5}zL*`TB${Gg*(b7{*W^ZTv z@Z=%RkH&QP<^Wt39h;+(Gyd#?azxdaJBPA{xG&@J#ght&RZq3QfBTMpeJZ#tEhTf& z{Td`Lu^ojCCY|lT%WI5%NNUs$D9o_YnRx+S&saSr0^;=KKZg+nu{5(=2b@Nu`Rc5z zYxbv$_vQE_(#@7!wJ^?!aGMp_38Lkwg293#^lGt>zANoajF6X4#_nnHVPg)kn8sDc z>({Z9Q-1)BEriUAmzviBJA zrOerWI|;d2fs;FA9#e-#FB&ayS(XGF<&e-eN0fM?Q)Hpz!Elcae=Xywcj=fDsXBp% zi;0^;$Tov$gJADrlBKgJLR(FhL0{j=*$bYPv+h!V9u`dEQ>x4(OzMp@&U6m6p*k=r zX)GI@KFntQ00x8QtUZ^TWdJ%0rL$~22xTvtDhR)1VYsQWE3Zq^HXAyk-IOIx93bzEIpmMb4rnl}$D} z2BDJc)i$2h>|K5Hl>ffLJ`nN12clJ16zQ+-I|?!+(Ba;%E1e&ea+bX}CgmNqtzIu{ zB#9DBe?L{!*^vFj`2yE#V-NG4@}1nWvE7>WEz@I=(G6kbM^;-*-d<4J$U(*IRN#o| z_fkY;ehNBos{@_Q`p^{vG1#*2 zlTU*k{X*d#{tzuJ=hU=icXlU_`1ht^=!cHE$({l2ZwsiVbzB+?t}OcQH6}YM;q@?{ z3v;oZIqd4^a#-0Wu^SDa%99EVwKZoqHxGFYqb|clcUpf5>mGRhq~CE-=vxbi>{k)=FDr~byj`YR6FYW_`3hAcW3LC6?Kt$0m-Mr){uAe1$t>fU*TH|961Va16n(4( zIq3HdMjn#vA5wb>>A(6Y=C|@U(_wMkp&%zQ3jn4{2K_&oZ?kR>fdR+RA0}kLFVqb1 zW}E@u3MP@hshNSsZlVcFgi(}A(NdQSha|{~K`FRaj z4mNpyASnFz{$XR|;dUt!WMb_qDj!;-#l+{#ziO_LUDGOsuUUW2MsywVu9PSYAH-AvJoon-Y$ zh<>Ie5NOM(PRUlr(zQ^~L8Zjlg1@wI8`tkgefiUraag*AdDjw|?4z={v?$6z zgp~S(%qzx)NhMpzx0wc(EO`_;$?nG;Wj!t=L>C30?~hX9dR}`WQ_bx@^d8sE zJLzk6HPM*+3N9{u7fb$az25t!c$FB3E4`_)$+R|XYFg7hdr6~Unp5L8^gYTCr*nsS z)u2sZM`7I)K7!aUdkUmLR@HGMCoN~EmC6YIr9J~Dq`@E-g8M*Q5xaJ}47_zUYHpLZ z`9yq|%7AqIN2sa4ZUky3h9=j*L5X{4Qh(fwrYoTFC5y%zPfJzOHC z8!^K>Bo#*v4{tOpa1`@$R9ucIbHyw&UcEh4q5*nMHTPN$cAc9gs}&eU85^(9OR!FS z1FV2IuC4oZe&7{+D-dO}$;fpox!Lai(=#E%6|vlm>97gMwefn{kpT}Aqx46d-hKyQ z?Ue5oJC}%-q+z^n8$?^?3*u9w1;pQbX-0i$fJ2p0^Wm1ZTNi9 z^Xw66hzELBKnoc#@nVhy3ZaxGs)R+V3JWIfLvcG(#?;q(+dfzsfw z)qwHWJD~2(RciE4ZOE~A^+LGRPPa0=VVW%3_Fsd9mkRajC4vPPYX-4vzvRMy2x&oP z!WI8{nil|-{HN(L|14?c-}&z6R?DwUn18TO0;w|!BJe8OvI$Loc~rKr zEa+$Z3_ITR7A4El+`}gDZqKJ_(0(g%K%~5{%4ITrLD4O7xZzRcVs&ya;MHy8N^IMn z!FmV8B>{2<>}hkTQexYB5H8I;yq(I8&j5mefCVmpPwL#}NsAVBInlcOU9i;C+fbQ7 z8XJ)s*G^VhLGaBpTbZ5kFo#fL;M}(j8V7FNdJ9^(K&eRK+ub!JWsrQe*TqE`J4UkY zI+MdpF2M2mAsg`ncLWhGiS zg2OdJl&Dq!fJVB~=F>+U4KRbwRV&T{o|D@f4nyu8njof8JXl5Vq38pCa&nr_${MiN z_AZ*sU;5I5z$8tQtdax4bxwH$OTU7wS8j&5(wcR7aLN=HudRx8)Z0_9GqD8BF{*1U z(zUV9^`jD2NEcZUXD;I3fG78lLHKJ4YrT?LvYXa64RL*`2c1#6Y>OES{*ypeL7v2G ztj1|-<}0yPu|3c%(o^L+F?qi}hNSy!r;7`BcE%P?PbdE^19dA59`?HE2Xia zsmsv{*XQZj2(+HQ)NS-JPY8H6RUn1-?8CU=prBg2Ws>)Nj1?_HXiNn z3m@eu*_B8sUijdVX40uaYⅅ?11Vlw2d8U-J|D>L|Jw1JL%n8T$GhN?D|OYSv|G8 zq}N16*gZEkoUNNkMO?Fw*n{oIBbHZ>M3OkTzXW0?w{}SxamhP8;Upy-PmF7mDD35i z^x4u!d9sLJrZo4{0ghnmgD{d-9&Yl|3{IsQBp?LN;Rz>#S^4?woO_|x*Y@z3}7qWMb42YuU<(t|)T1M*! z(g#`Ri6;qh`3_BtxR|`<&Z`AoR4RgDu_%8W!)KYb58OvN7d+oz-pOQF(Gn;oi97SEF%$OW0oN)v6Uepr&R!`Nho%%TN*^cNr=s5}I zfC^}Mt{W$S8H&0qnv1x6om-k0jq3HxhsqZC`DzS_qMmodrOeHyXYqCF1Gh#zCqoqT zvmlV`7GWAz53K9HUZcGCqN`f@XQc}2p7)NH1cRf@LA3vs zglqH0wQI3TcI)4ey*%d-k3Jh{NrHVY?H`&>CX>E2S|t`PqQRkoZ5ajYV5k3t6YO5` z%@;X&4Du3_S2WsPaYMEYwZj4+vmfV_D_p`03EpUz;)1mrSq`tA8>6jS{)PQ6h`R8? zOP>#-L=u@Je2TV8#_BS@tc>hQjY(mxfLh+0Z-14W{M`5ddkKlrJJMh#-5AjgM^co% zU3KWZGFmE54dm}_@qI}D-pzGlgw~? z1~4*yGG$1kVc){c2X;57!!E#ivlo#?dir<+*EAhO6hQOTq9LmbP^R=)GMIBDi9nqQ zS$X++T-=-%MjDC6HQy$&yip?&Dg$S1Z*q=Ohjh}JxgAQb`entImAN*C%(aS(e;9l; zb8gmLyr|$0o0RS>mSUB;%G`1wYg?#JgZR zTpAF=?IpsW^!-YGzxOLdYbMlX-Ypp^O z;an8l)zi=uRk+WHjTyLKP1+OBmk9^Xx7eK(hIym5>>nmbQ=3^oe>Qg{GKDEA-RSR} znYm0o%x-t}y~XBD^0f0hpv8kT#-bR^qbqG>ZnrfB7KI#c*LR;4(lcMj8tkf&jTXftQ;ZQEi{A?CXS-u z-a|LmcqUl3fCqMK1>dxLwgO6eEda~!@;0NIw*TL8$Ge9C#`?dU^hTc+8MmKQ~AoJ?6$(EOfop;&x z9`V_=SGbLFLHm#uus2FVfo*>I$&Z_GYS_JMxY$vePm7>l4?veV3YXU%ZyJOD49ZPf zv)aCANKRTaedY&SADoAU^x9kV<*zknH^)hvNmMTa5KZwt)) zU#%GR#BWz=#D2Q)0+lJRXFKfT&yhR%cn$+WSz&hYdd^f zxHnd%<*y|~{B{iT7tW!uXn>1Tah0DeS4JBXQb_EqBzoA!luz2BZ1*8u7VGRb+$Io{ zA-~z-KgJ9;$xL;2ZM&^IwgAgG2Av;Z!l#4~O$wA-1H5k!BF&p{Rr;#Fc}Hb(=U>(o zpJn#GsH9I9=mRr8)G75s2UV%r)5O;CXT44KzUCXR`qbc(tnFbhPq0xTmth|Bo1(hh z)JY=tAXPmz#98qe)Z6uSmc9xR1!ix&1Z6KKP-+kIHl#{aLk5aE+^FxyvNc|bo1u_v z9%+Ta4#&gMD4;e4#e{TqW6-!H?~0Qhgq?bKOtrg!Qq26#py;$#{od8sPPt;P;3aW z>65*+5T71lJ1fBZ<|S1v_cdAy8gqgHt_@tCHvbAtfau_y;gJ`E6_wg_69nPU&-(K6 zGVEMP5`FgE=<31z znxQ-(qm2R)<+GU)KX~N<{~OSQjH&J6au+2ILfgDx^EsSSHM$$BSnOapesy(Z%Cf_Q zeCIVy?x+0uqKBX#4Lu{VfdE&sR8zBuF9VAVYE&xayj+8UcusCANVYVwB3$Vmu!7A4 z)INH1?`&(wh?9a>oyj(z-wNHgrDlRNj}UorfiQI3IpUqhQH;_T*W&dUUt5xsavDps z>?2K)?v-yqmerdFx(q@Br3je+?vuj7XjAXWk&)~>Q4SpA@g=0ultBle8(x6XPE9(= zi&-+qu3|4Hk|mvZ7iRYKtM7H`xf@C$M;~z8I?j~n{YZ)GSnC3noigYVc+^bK}olW>DcvxXI&)?ud@pcs4!_9m|Lwu#=Y#}t})&WaT zDi`{J9rpp3;D#VFl;gobm&7YW@a}kyYf_KI23>dyn)~t#hnh|3h(LWfCCg5;p`%j$%w(Vw<}KYH;_{Eu7|NK*eFx#<6?RP?vs{rhrJptET_ z`XeCZfA{{UlfdlP;vjX33iCU-<&XZ2eFcXb|C`* zervqPAkQG-gq&Y1k2lZ%`WW={^UpiVQu4=OJHir6YYEk`@-uGuGZgSC*Uu9h>`m6Ql+=<`l2*3N2ez{a`vI0bgznFyn z8@2qouWJ9h=}G95qx;=Z?k#nLQTod#zyH#c=XW-lJJo$1it_rGTJ^sQ@N?@!Ffv#M zc~$DiMP+H%3Qz9cr#$uIp4tJxyZDQ-3-rG*6@Rs5-6#JIjq%^Q>90HR-OrBmB-K3O z!=1+XH@jqKKy|+xVLl2~z*0EMJgTfS{^hvykHY9+uBeQRU=w@m~W;f%@;=SBUy};y(a2uI_69 literal 0 HcmV?d00001 diff --git a/PointPillars/infer.sh b/PointPillars/infer.sh new file mode 100644 index 0000000..f6d95e5 --- /dev/null +++ b/PointPillars/infer.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +out_path="result/" +if [ -d "$out_path" ]; then + rm -rf "$out_path" +else + echo "file $out_path is not exist." +fi + +mkdir -p "$out_path" +cd result +mkdir test +cd .. + +cd src/ +python point_to_pillars.py generate --file_dir="../data/test/" +python infer.py infer --file_dir="../data/test/" + +exit 0 diff --git a/PointPillars/pipeline/pfe.pipeline b/PointPillars/pipeline/pfe.pipeline new file mode 100644 index 0000000..3b174ec --- /dev/null +++ b/PointPillars/pipeline/pfe.pipeline @@ -0,0 +1,77 @@ +{ + "pfe": { + "stream_config": { + "deviceId": "0" + }, + "appsrc0": { + "props": { + "blocksize": "1200000" + }, + "factory": "appsrc", + "next": "mxpi_tensorinfer0:0" + }, + "appsrc1": { + "props": { + "blocksize": "1200000" + }, + "factory": "appsrc", + "next": "mxpi_tensorinfer0:1" + }, + "appsrc2": { + "props": { + "blocksize": "1200000" + }, + "factory": "appsrc", + "next": "mxpi_tensorinfer0:2" + }, + "appsrc3": { + "props": { + "blocksize": "1200000" + }, + "factory": "appsrc", + "next": "mxpi_tensorinfer0:3" + }, + "appsrc4": { + "props": { + "blocksize": "12000" + }, + "factory": "appsrc", + "next": "mxpi_tensorinfer0:4" + }, + "appsrc5": { + "props": { + "blocksize": "1200000" + }, + "factory": "appsrc", + "next": "mxpi_tensorinfer0:5" + }, + "appsrc6": { + "props": { + "blocksize": "1200000" + }, + "factory": "appsrc", + "next": "mxpi_tensorinfer0:6" + }, + "appsrc7": { + "props": { + "blocksize": "1200000" + }, + "factory": "appsrc", + "next": "mxpi_tensorinfer0:7" + }, + "mxpi_tensorinfer0": { + "props": { + "dataSource":"appsrc0,appsrc1,appsrc2,appsrc3,appsrc4,appsrc5,appsrc6,appsrc7", + "modelPath": "../models/model_om/pfe.om" + }, + "factory": "mxpi_tensorinfer", + "next": "appsink0" + }, + "appsink0": { + "props": { + "blocksize": "10000000000" + }, + "factory": "appsink" + } + } +} diff --git a/PointPillars/pipeline/rpn.pipeline b/PointPillars/pipeline/rpn.pipeline new file mode 100644 index 0000000..74c28a5 --- /dev/null +++ b/PointPillars/pipeline/rpn.pipeline @@ -0,0 +1,28 @@ +{ + "rpn": { + "stream_config": { + "deviceId": "0" + }, + "appsrc0": { + "props": { + "blocksize": "13713408" + }, + "factory": "appsrc", + "next": "mxpi_tensorinfer0" + }, + "mxpi_tensorinfer0": { + "props": { + "dataSource":"appsrc0", + "modelPath": "../models/model_om/rpn.om" + }, + "factory": "mxpi_tensorinfer", + "next": "appsink0" + }, + "appsink0": { + "props": { + "blocksize": "10000000000" + }, + "factory": "appsink" + } + } +} diff --git a/PointPillars/requirments/requiements.txt b/PointPillars/requirments/requiements.txt new file mode 100644 index 0000000..8ffb919 --- /dev/null +++ b/PointPillars/requirments/requiements.txt @@ -0,0 +1,7 @@ +fire +numpy +torch +PyQt5 +traits +VTK +mayavi diff --git a/PointPillars/src/eval.py b/PointPillars/src/eval.py new file mode 100644 index 0000000..1671980 --- /dev/null +++ b/PointPillars/src/eval.py @@ -0,0 +1,38 @@ +""" +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +import numpy as np +import fire + + +def evaluate(file_dir="../result/test/", benchmark_dir="../benchmark/test/"): + om = np.fromfile(f"{file_dir}/result.bin", dtype=np.float32).reshape(-1, 7) + benchmark = np.fromfile(f"{benchmark_dir}/result.bin", dtype=np.float32).reshape(-1, 7) + cnt = om.shape + error = 0 + for i in range(cnt[0]): + miss = 0 + benchmark_sum = 0 + for j in range(0, 3): + miss += abs(benchmark[i][j] - om[i][j]) + benchmark_sum += abs(benchmark[i][j]) + + error = max(error, miss / benchmark_sum) + + print('the error of the model is :', error) + + +if __name__ == '__main__': + fire.Fire() diff --git a/PointPillars/src/get_preds.py b/PointPillars/src/get_preds.py new file mode 100644 index 0000000..8fd52c5 --- /dev/null +++ b/PointPillars/src/get_preds.py @@ -0,0 +1,319 @@ +""" +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +import os +from math import cos, sin +from mayavi import mlab +import numpy as np +import torch +import fire + + +def box_decode(box_encodings, anchors, encode_angle_to_vector=False, smooth_dim=False): + # need to convert box_encodings to z-bottom format + xa, ya, za, wa, la, ha, ra = np.split(anchors, 7, axis=-1) + if encode_angle_to_vector: + xt, yt, zt, wt, lt, ht, rtx, rty = np.split(box_encodings, 8, axis=-1) + else: + xt, yt, zt, wt, lt, ht, rt = np.split(box_encodings, 7, axis=-1) + za = za + ha / 2 + diagonal = np.sqrt(la**2 + wa**2) + xg = xt * diagonal + xa + yg = yt * diagonal + ya + + zg = zt * ha + za + if smooth_dim: + lg = (lt + 1) * la + wg = (wt + 1) * wa + hg = (ht + 1) * ha + else: + lg = np.exp(lt) * la + wg = np.exp(wt) * wa + hg = np.exp(ht) * ha + if encode_angle_to_vector: + rax = np.cos(ra) + ray = np.sin(ra) + rgx = rtx + rax + rgy = rty + ray + rg = np.arctan2(rgy, rgx) + else: + rg = rt + ra + zg = zg - hg / 2 + return torch.Tensor(np.concatenate([xg, yg, zg, wg, lg, hg, rg], axis=-1)) + + +def nms_op_kernel(dets, thresh=0.01, eps=0.0): + x1 = dets[:, 0] + y1 = dets[:, 1] + x2 = dets[:, 2] + y2 = dets[:, 3] + scores = dets[:, 4].numpy() + areas = (x2 - x1 + eps) * (y2 - y1 + eps) + nms_order = scores.argsort()[::-1].astype(np.int32) + ndets = dets.shape[0] + suppressed = np.zeros((ndets), dtype=np.int32) + index_to_keep = [] + for _i in range(ndets): + i = nms_order[_i] + if suppressed[ + i] == 1: + continue + index_to_keep.append(i) + for _j in range(_i + 1, ndets): + j = nms_order[_j] + if suppressed[j] == 1: + continue + w = max(min(x2[i], x2[j]) - max(x1[i], x1[j]) + eps, 0.0) + h = max(min(y2[i], y2[j]) - max(y1[i], y1[j]) + eps, 0.0) + inter = w * h + ovr = inter / (areas[i] + areas[j] - inter) + if ovr >= thresh: + suppressed[j] = 1 + return index_to_keep + + +def nms_op(boxes, scores, pre_maxsize=None): + nms_order = scores.sort(0, descending=True)[1] + + if pre_maxsize is not None: + nms_order = nms_order[:pre_maxsize] + boxes = boxes[nms_order].contiguous() + index_to_keep = nms_op_kernel(boxes) + return index_to_keep + + +def limit_period(val, offset=0.5, period=np.pi): + limited_val = val - np.floor(val / period + offset) * period + return limited_val + + +def generate_anchors(feature_size, + anchor_range, + sizes, + rotations, + dtype): + anchor_range = np.array(anchor_range, dtype) + z_centers = np.linspace( + (anchor_range[2] + anchor_range[5]) / 2, anchor_range[5], feature_size[0], dtype=dtype) + y_centers = np.linspace( + anchor_range[1], anchor_range[4], feature_size[1], dtype=dtype) + x_centers = np.linspace( + anchor_range[0], anchor_range[3], feature_size[2], dtype=dtype) + sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3]) + rotations = np.array(rotations, dtype=dtype) + rets = np.meshgrid( + x_centers, y_centers, z_centers, rotations, indexing='ij') + tile_shape = [1] * 5 + tile_shape[-2] = int(sizes.shape[0]) + length = len(rets) + for i in range(length): + rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape) + rets[i] = rets[i][..., np.newaxis] + sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3]) + tile_size_shape = list(rets[0].shape) + tile_size_shape[3] = 1 + sizes = np.tile(sizes, tile_size_shape) + rets.insert(3, sizes) + ret = np.concatenate(rets, axis=-1) + return np.transpose(ret, [2, 1, 0, 3, 4, 5]) + + +def get_predict_result(bbox_cls_pred, bbox_pred, bbox_dir_cls_pred, anchors): + bbox_cls_pred = bbox_cls_pred.reshape(-1, 1) + bbox_pred = bbox_pred.reshape(-1, 7) + bbox_dir_cls_pred = bbox_dir_cls_pred.reshape(-1, 2) + anchors = anchors.reshape(-1, 7) + bbox_cls_pred = torch.sigmoid(bbox_cls_pred) + bbox_dir_cls_pred = torch.max(bbox_dir_cls_pred, dim=1)[1] + + inds = bbox_cls_pred.max(1)[0].topk(100)[1] + bbox_cls_pred = bbox_cls_pred[inds] + bbox_pred = bbox_pred[inds] + bbox_dir_cls_pred = bbox_dir_cls_pred[inds] + anchors = anchors[inds] + + bbox_pred = box_decode(bbox_pred, anchors) + + bbox_2d_xy = bbox_pred[:, [0, 1]] + bbox_2d_wl = bbox_pred[:, [3, 4]] + bbox_pred2d = torch.cat([bbox_2d_xy - bbox_2d_wl / 2, + bbox_2d_xy + bbox_2d_wl / 2, + bbox_cls_pred], dim=-1) + ret_bboxes, ret_labels, ret_scores = [], [], [] + for i in range(1): + cur_bbox_cls_pred = bbox_cls_pred[:, i] + score_inds = cur_bbox_cls_pred > 0.1 + if score_inds.sum() == 0: + continue + + cur_bbox_cls_pred = cur_bbox_cls_pred[score_inds] + cur_bbox_pred2d = bbox_pred2d[score_inds] + cur_bbox_pred = bbox_pred[score_inds] + cur_bbox_dir_cls_pred = bbox_dir_cls_pred[score_inds] + + keep_inds = nms_op(boxes=cur_bbox_pred2d, + scores=cur_bbox_cls_pred, + pre_maxsize=None) + + cur_bbox_cls_pred = cur_bbox_cls_pred[keep_inds] + cur_bbox_pred = cur_bbox_pred[keep_inds] + cur_bbox_dir_cls_pred = cur_bbox_dir_cls_pred[keep_inds] + cur_bbox_pred[:, -1] = limit_period(cur_bbox_pred[:, -1].detach().cpu(), 1, np.pi).to(cur_bbox_pred) + cur_bbox_pred[:, -1] += (1 - cur_bbox_dir_cls_pred) * np.pi + + ret_bboxes.append(cur_bbox_pred) + ret_labels.append(torch.zeros_like(cur_bbox_pred[:, 0], dtype=torch.long) + i) + ret_scores.append(cur_bbox_cls_pred) + + if len(ret_bboxes) == 0: + return [], [], [] + ret_bboxes = torch.cat(ret_bboxes, 0) + ret_labels = torch.cat(ret_labels, 0) + ret_scores = torch.cat(ret_scores, 0) + cnt = 0 + for i in range(50): + cnt += 1 + if ret_scores[i] < 0.6: + cnt -= 1 + break + final_inds = ret_scores.topk(cnt)[1] + ret_bboxes = ret_bboxes[final_inds] + ret_labels = ret_labels[final_inds] + ret_scores = ret_scores[final_inds] + result = { + 'lidar_bboxes': ret_bboxes.detach().cpu().numpy(), + 'labels': ret_labels.detach().cpu().numpy(), + 'scores': ret_scores.detach().cpu().numpy() + } + return result + + +def get_box_points(box_list): + box_points_list = [] + length = len(box_list) + for cnt in range(length): + box = box_list[cnt] + box_points = np.zeros((8, 3)) + x = float(box[0]) + y = float(box[1]) + z = float(box[2]) + width = float(box[3]) + long = float(box[4]) + depth = float(box[5]) + theta = np.pi / 2 - box[6] + box_points[0] = [x + long / 2 * cos(theta) + width / 2 * sin(theta), + y + long / 2 * sin(theta) - width / 2 * cos(theta), + z + depth / 2] + box_points[3] = [x + long / 2 * cos(theta) + width / 2 * sin(theta), + y + long / 2 * sin(theta) - width / 2 * cos(theta), + z - depth / 2] + + box_points[1] = [x + long / 2 * cos(theta) - width / 2 * sin(theta), + y + width / 2 * cos(theta) + long / 2 * sin(theta), + z + depth / 2] + box_points[2] = [x + long / 2 * cos(theta) - width / 2 * sin(theta), + y + width / 2 * cos(theta) + long / 2 * sin(theta), + z - depth / 2] + + box_points[5] = [2 * x - (x + long / 2 * cos(theta) + width / 2 * sin(theta)), + 2 * y - (y + long / 2 * sin(theta) - width / 2 * cos(theta)), + z + depth / 2] + box_points[6] = [2 * x - (x + long / 2 * cos(theta) + width / 2 * sin(theta)), + 2 * y - (y + long / 2 * sin(theta) - width / 2 * cos(theta)), + z - depth / 2] + + box_points[4] = [2 * x - (x + long / 2 * cos(theta) - width / 2 * sin(theta)), + 2 * y - (y + width / 2 * cos(theta) + long / 2 * sin(theta)), + z + depth / 2] + box_points[7] = [2 * x - (x + long / 2 * cos(theta) - width / 2 * sin(theta)), + 2 * y - (y + width / 2 * cos(theta) + long / 2 * sin(theta)), + z - depth / 2] + + box_points_list.append(box_points) + + return np.array(box_points_list) + + +def draw_box(box_point_list): + length = len(box_point_list) + for cnt in range(length): + for k in range(0, 4): + box_point = box_point_list[cnt] + i, j = k, (k + 1) % 4 + mlab.plot3d([box_point[i, 0], box_point[j, 0]], + [box_point[i, 1], box_point[j, 1]], + [box_point[i, 2], box_point[j, 2]]) + i, j = k + 4, (k + 3) % 4 + 4 + mlab.plot3d([box_point[i, 0], box_point[j, 0]], + [box_point[i, 1], box_point[j, 1]], + [box_point[i, 2], box_point[j, 2]]) + i , j = k, k + 4 + mlab.plot3d([box_point[i, 0], box_point[j, 0]], + [box_point[i, 1], box_point[j, 1]], + [box_point[i, 2], box_point[j, 2]]) + + +def point_show(points, box): + x = points[:, 0] + y = points[:, 1] + z = points[:, 2] + fig = mlab.figure(bgcolor=(0, 0, 0), size=(640, 360)) + mlab.points3d(x, y, z, + z, # Values used for Color + mode="point", + colormap='spectral', + figure=fig, + ) + box_point = get_box_points(box) + draw_box(box_point) + + mlab.show() + + +def get_result(file_dir='../result/test/'): + anchors = torch.as_tensor(generate_anchors(feature_size=[1, 248, 216], + anchor_range = [0, -39.68, -3, 69.12, 39.68, 1], + sizes=[1.6, 3.9, 1.56], + rotations=[0, np.pi / 2], + dtype=np.float32).reshape((248, 216, 1, 2, 7))) + bbox_cls_pred = torch.as_tensor(np.fromfile(f"{file_dir}/cls.bin", dtype=np.float32) + .reshape((1, 248, 216, 2))) + bbox_pred = torch.as_tensor(np.fromfile(f"{file_dir}/box.bin", dtype=np.float32) + .reshape((1, 248, 216, 14))) + bbox_dir_cls_pred = torch.as_tensor(np.fromfile(f"{file_dir}/dir.bin", dtype=np.float32) + .reshape((1, 248, 216, 4))) + result = get_predict_result(bbox_cls_pred, bbox_pred, bbox_dir_cls_pred, anchors) + return result + + +def viewer(file_dir='../result/test/'): + if os.path.exists(file_dir): + file = file_dir + "point.bin" + if os.path.exists(file): + points = np.fromfile(file, dtype=np.float32).reshape([-1, 4]) + result = get_result(file_dir) + print(result) + box = result['lidar_bboxes'] + boxes = np.array(box) + boxes.tofile(f"{file_dir}/result.bin") + point_show(points, box) + else: + print(f"file : {file} does not exist") + else: + print(f"path : {file_dir} does not exist") + + +if __name__ == '__main__': + fire.Fire() diff --git a/PointPillars/src/infer.py b/PointPillars/src/infer.py new file mode 100644 index 0000000..85a8281 --- /dev/null +++ b/PointPillars/src/infer.py @@ -0,0 +1,422 @@ +""" +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +import math +import datetime +import fire +import numpy as np +import MxpiDataType_pb2 as MxpiDataType +from StreamManagerApi import StreamManagerApi, StringVector, MxDataInput, InProtobufVector, MxProtobufIn + + +def get_pseudo_image(pillar_feature, coors): + pseudo_image = np.zeros((1, 64, 496, 432)) + for i in range(0, 12000): + x = math.ceil(coors[i, 0]) + y = math.ceil(coors[i, 1]) + for j in range(0, 64): + pseudo_image[0, j, y, x] = pillar_feature[0, j, i, 0] + return pseudo_image + + +def infer(file_dir = "../data/test/"): + stream_manager_api = StreamManagerApi() + ret = stream_manager_api.InitManager() + if ret != 0: + print("Failed to init Stream manager, ret=%s" % str(ret)) + exit() + path = b"../pipeline/pfe.pipeline" + ret = stream_manager_api.CreateMultipleStreamsFromFile(path) + if ret != 0: + print("Failed to create Stream, ret=%s" % str(ret)) + exit() + stream_name = b'pfe' + + stream_manager_api_rpn = StreamManagerApi() + ret = stream_manager_api_rpn.InitManager() + if ret != 0: + print("Failed to init Stream manager, ret=%s" % str(ret)) + exit() + path_rpn = b"../pipeline/rpn.pipeline" + ret = stream_manager_api_rpn.CreateMultipleStreamsFromFile(path_rpn) + if ret != 0: + print("Failed to create Stream, ret=%s" % str(ret)) + exit() + stream_name_rpn = b'rpn' + + # Get the pillar_x + pillar_x = np.fromfile(f"{file_dir}/pillar_x.bin", dtype=np.float16) + pillar_x = pillar_x.astype(np.float16).reshape((1, 12000, 100)) + pillar_x_tensor = pillar_x[None] + print("---------------PILLAR_X INFO--------------") + print(pillar_x_tensor.size) + print(pillar_x_tensor.shape) + pillar_x_panckage_list = MxpiDataType.MxpiTensorPackageList() + pillar_x_panckage = pillar_x_panckage_list.tensorPackageVec.add() + pillar_x_vec = pillar_x_panckage.tensorVec.add() + + pillar_x_byte = pillar_x_tensor.tobytes() + pillar_x_input = MxDataInput() + pillar_x_input.data = pillar_x_byte + + pillar_x_vec.deviceId = 0 + pillar_x_vec.memType = 0 + for i in pillar_x_tensor.shape: + pillar_x_vec.tensorShape.append(i) + pillar_x_vec.dataStr = pillar_x_input.data + pillar_x_vec.tensorDataSize = len(pillar_x_byte) + + plugin_id_x = 0 + key = "appsrc{}".format(plugin_id_x).encode('utf-8') + buffer_vec_x = InProtobufVector() + xbuf = MxProtobufIn() + xbuf.key = key + xbuf.type = b'MxTools.MxpiTensorPackageList' + xbuf.protobuf = pillar_x_panckage_list.SerializeToString() + buffer_vec_x.push_back(xbuf) + + # Get the pillar_y + pillar_y = np.fromfile(f"{file_dir}/pillar_y.bin", dtype=np.float16) + pillar_y = pillar_y.astype(np.float16).reshape((1, 12000, 100)) + pillar_y_tensor = pillar_y[None] + print("---------------PILLAR_Y INFO--------------") + print(pillar_y_tensor.size) + print(pillar_y_tensor.shape) + pillar_y_panckage_list = MxpiDataType.MxpiTensorPackageList() + pillar_y_panckage = pillar_y_panckage_list.tensorPackageVec.add() + pillar_y_vec = pillar_y_panckage.tensorVec.add() + + pillar_y_byte = pillar_y_tensor.tobytes() + pillar_y_input = MxDataInput() + pillar_y_input.data = pillar_y_byte + + pillar_y_vec.deviceId = 0 + pillar_y_vec.memType = 0 + for i in pillar_y_tensor.shape: + pillar_y_vec.tensorShape.append(i) + pillar_y_vec.dataStr = pillar_y_input.data + pillar_y_vec.tensorDataSize = len(pillar_y_byte) + + plugin_id_y = 1 + key = "appsrc{}".format(plugin_id_y).encode('utf-8') + buffer_vec_y = InProtobufVector() + ybuf = MxProtobufIn() + ybuf.key = key + ybuf.type = b'MxTools.MxpiTensorPackageList' + ybuf.protobuf = pillar_y_panckage_list.SerializeToString() + buffer_vec_y.push_back(ybuf) + + # Get the pillar_z + pillar_z = np.fromfile(f"{file_dir}/pillar_z.bin", dtype=np.float16) + pillar_z = pillar_z.astype(np.float16).reshape((1, 12000, 100)) + pillar_z_tensor = pillar_z[None] + print("---------------PILLAR_Z INFO--------------") + print(pillar_z_tensor.size) + print(pillar_z_tensor.shape) + pillar_z_panckage_list = MxpiDataType.MxpiTensorPackageList() + pillar_z_panckage = pillar_z_panckage_list.tensorPackageVec.add() + pillar_z_vec = pillar_z_panckage.tensorVec.add() + + pillar_z_byte = pillar_z_tensor.tobytes() + pillar_z_input = MxDataInput() + pillar_z_input.data = pillar_z_byte + + pillar_z_vec.deviceId = 0 + pillar_z_vec.memType = 0 + for i in pillar_z_tensor.shape: + pillar_z_vec.tensorShape.append(i) + pillar_z_vec.dataStr = pillar_z_input.data + pillar_z_vec.tensorDataSize = len(pillar_z_byte) + + plugin_id_z = 2 + key = "appsrc{}".format(plugin_id_z).encode('utf-8') + buffer_vec_z = InProtobufVector() + zbuf = MxProtobufIn() + zbuf.key = key + zbuf.type = b'MxTools.MxpiTensorPackageList' + zbuf.protobuf = pillar_z_panckage_list.SerializeToString() + buffer_vec_z.push_back(zbuf) + + # Get the pillar_i + pillar_i = np.fromfile(f"{file_dir}/pillar_i.bin", dtype=np.float16) + pillar_i = pillar_i.astype(np.float16).reshape((1, 12000, 100)) + pillar_i_tensor = pillar_i[None] + print("---------------PILLAR_I INFO--------------") + print(pillar_i_tensor.size) + print(pillar_i_tensor.shape) + pillar_i_panckage_list = MxpiDataType.MxpiTensorPackageList() + pillar_i_panckage = pillar_i_panckage_list.tensorPackageVec.add() + pillar_i_vec = pillar_i_panckage.tensorVec.add() + + pillar_i_byte = pillar_i_tensor.tobytes() + pillar_i_input = MxDataInput() + pillar_i_input.data = pillar_i_byte + + pillar_i_vec.deviceId = 0 + pillar_i_vec.memType = 0 + for i in pillar_i_tensor.shape: + pillar_i_vec.tensorShape.append(i) + pillar_i_vec.dataStr = pillar_i_input.data + pillar_i_vec.tensorDataSize = len(pillar_i_byte) + + plugin_id_i = 3 + key = "appsrc{}".format(plugin_id_i).encode('utf-8') + buffer_vec_i = InProtobufVector() + ibuf = MxProtobufIn() + ibuf.key = key + ibuf.type = b'MxTools.MxpiTensorPackageList' + ibuf.protobuf = pillar_i_panckage_list.SerializeToString() + buffer_vec_i.push_back(ibuf) + + # Get the num_points_per_pillar + num_points_per_pillar = np.fromfile(f"{file_dir}/num_points_per_pillar.bin", dtype=np.float16) + num_points_per_pillar = num_points_per_pillar.astype(np.float16).reshape((12000,)) + num_points_per_pillar_tensor = num_points_per_pillar[None] + print("---------------NUM INFO--------------") + print(num_points_per_pillar_tensor.size) + print(num_points_per_pillar_tensor.shape) + num_points_per_pillar_panckage_list = MxpiDataType.MxpiTensorPackageList() + num_points_per_pillar_panckage = num_points_per_pillar_panckage_list.tensorPackageVec.add() + num_points_per_pillar_vec = num_points_per_pillar_panckage.tensorVec.add() + + num_points_per_pillar_byte = num_points_per_pillar_tensor.tobytes() + num_points_per_pillar_input = MxDataInput() + num_points_per_pillar_input.data = num_points_per_pillar_byte + + num_points_per_pillar_vec.deviceId = 0 + num_points_per_pillar_vec.memType = 0 + for i in num_points_per_pillar_tensor.shape: + num_points_per_pillar_vec.tensorShape.append(i) + num_points_per_pillar_vec.dataStr = num_points_per_pillar_input.data + num_points_per_pillar_vec.tensorDataSize = len(num_points_per_pillar_byte) + + plugin_id_num = 4 + key = "appsrc{}".format(plugin_id_num).encode('utf-8') + buffer_vec_num = InProtobufVector() + numbuf = MxProtobufIn() + numbuf.key = key + numbuf.type = b'MxTools.MxpiTensorPackageList' + numbuf.protobuf = num_points_per_pillar_panckage_list.SerializeToString() + buffer_vec_num.push_back(numbuf) + + # Get the x_sub + x_sub = np.fromfile(f"{file_dir}/x_sub_shaped.bin", dtype=np.float16) + x_sub = x_sub.astype(np.float16).reshape((1, 12000, 100)) + x_sub_tensor = x_sub[None] + print("---------------X_SUB INFO--------------") + print(x_sub_tensor.size) + print(x_sub_tensor.shape) + x_sub_panckage_list = MxpiDataType.MxpiTensorPackageList() + x_sub_panckage = x_sub_panckage_list.tensorPackageVec.add() + x_sub_vec = x_sub_panckage.tensorVec.add() + + x_sub_byte = x_sub_tensor.tobytes() + x_sub_input = MxDataInput() + x_sub_input.data = x_sub_byte + + x_sub_vec.deviceId = 0 + x_sub_vec.memType = 0 + for i in x_sub_tensor.shape: + x_sub_vec.tensorShape.append(i) + x_sub_vec.dataStr = x_sub_input.data + x_sub_vec.tensorDataSize = len(x_sub_byte) + + plugin_id_x_sub = 5 + key = "appsrc{}".format(plugin_id_x_sub).encode('utf-8') + buffer_vec_x_sub = InProtobufVector() + x_sub_buf = MxProtobufIn() + x_sub_buf.key = key + x_sub_buf.type = b'MxTools.MxpiTensorPackageList' + x_sub_buf.protobuf = x_sub_panckage_list.SerializeToString() + buffer_vec_x_sub.push_back(x_sub_buf) + + # Get the y_sub + y_sub = np.fromfile(f"{file_dir}/y_sub_shaped.bin", dtype=np.float16) + y_sub = y_sub.astype(np.float16).reshape((1, 12000, 100)) + y_sub_tensor = y_sub[None] + print("---------------Y_SUB INFO--------------") + print(y_sub_tensor.size) + print(y_sub_tensor.shape) + y_sub_panckage_list = MxpiDataType.MxpiTensorPackageList() + y_sub_panckage = y_sub_panckage_list.tensorPackageVec.add() + y_sub_vec = y_sub_panckage.tensorVec.add() + + y_sub_byte = y_sub_tensor.tobytes() + y_sub_input = MxDataInput() + y_sub_input.data = y_sub_byte + + y_sub_vec.deviceId = 0 + y_sub_vec.memType = 0 + for i in y_sub_tensor.shape: + y_sub_vec.tensorShape.append(i) + y_sub_vec.dataStr = y_sub_input.data + y_sub_vec.tensorDataSize = len(y_sub_byte) + + plugin_id_y_sub = 6 + key = "appsrc{}".format(plugin_id_y_sub).encode('utf-8') + buffer_vec_y_sub = InProtobufVector() + y_sub_buf = MxProtobufIn() + y_sub_buf.key = key + y_sub_buf.type = b'MxTools.MxpiTensorPackageList' + y_sub_buf.protobuf = y_sub_panckage_list.SerializeToString() + buffer_vec_y_sub.push_back(y_sub_buf) + + # Get the mask + mask = np.fromfile(f"{file_dir}/mask.bin", dtype=np.float16) + mask = mask.astype(np.float16).reshape((1, 12000, 100)) + mask_tensor = mask[None] + print("---------------MASK INFO--------------") + print(mask_tensor.size) + print(mask_tensor.shape) + mask_panckage_list = MxpiDataType.MxpiTensorPackageList() + mask_panckage = mask_panckage_list.tensorPackageVec.add() + mask_vec = mask_panckage.tensorVec.add() + + mask_byte = mask_tensor.tobytes() + mask_input = MxDataInput() + mask_input.data = mask_byte + + mask_vec.deviceId = 0 + mask_vec.memType = 0 + for i in mask_tensor.shape: + mask_vec.tensorShape.append(i) + mask_vec.dataStr = mask_input.data + mask_vec.tensorDataSize = len(mask_byte) + + plugin_id_mask = 7 + key = "appsrc{}".format(plugin_id_mask).encode('utf-8') + buffer_vec_mask = InProtobufVector() + mask_buf = MxProtobufIn() + mask_buf.key = key + mask_buf.type = b'MxTools.MxpiTensorPackageList' + mask_buf.protobuf = mask_panckage_list.SerializeToString() + buffer_vec_mask.push_back(mask_buf) + + # Send data to the stream + unique_id_x = stream_manager_api.SendProtobuf(stream_name, plugin_id_x, buffer_vec_x) + unique_id_y = stream_manager_api.SendProtobuf(stream_name, plugin_id_y, buffer_vec_y) + unique_id_z = stream_manager_api.SendProtobuf(stream_name, plugin_id_z, buffer_vec_z) + unique_id_i = stream_manager_api.SendProtobuf(stream_name, plugin_id_i, buffer_vec_i) + unique_id_num = stream_manager_api.SendProtobuf(stream_name, plugin_id_num, buffer_vec_num) + unique_id_x_sub = stream_manager_api.SendProtobuf(stream_name, plugin_id_x_sub, buffer_vec_x_sub) + unique_id_y_sub = stream_manager_api.SendProtobuf(stream_name, plugin_id_y_sub, buffer_vec_y_sub) + unique_id_mask = stream_manager_api.SendProtobuf(stream_name, plugin_id_mask, buffer_vec_mask) + begin_time = datetime.datetime.now() + if unique_id_x < 0 or unique_id_y < 0 or unique_id_z < 0 or unique_id_i < 0 \ + or unique_id_num < 0 or unique_id_x_sub < 0 or unique_id_y_sub < 0 or unique_id_mask < 0: + print("Failed to send data to stream.") + exit() + + key_vec = StringVector() + key_vec.push_back(b'mxpi_tensorinfer0') + # get inference result + get_result = stream_manager_api.GetResult(stream_name, b'appsink0', key_vec) + spend_time = (datetime.datetime.now() - begin_time).total_seconds() + if get_result.errorCode != 0: + print("ERROR") + exit() + print("-----------Result---------------") + print(get_result) + + infer_result = get_result.metadataVec[0] + + result = MxpiDataType.MxpiTensorPackageList() + result.ParseFromString(infer_result.serializedMetadata) + result.tensorPackageVec[0].tensorVec[0].dataStr + result_np = np.frombuffer(result.tensorPackageVec[0].tensorVec[0].dataStr, dtype = np.float32) + result_np.tofile(f"{file_dir}/feature.bin") + + # Pillar Scatter + pillar_feature = np.fromfile(f"{file_dir}/feature.bin", dtype=np.float32) + pillar_feature = pillar_feature.astype(np.float16).reshape((1, 64, 12000, 1)) + print(pillar_feature.shape) + coors = np.load(f"{file_dir}/coor.npy") + print(coors.shape) + pseudo_image = get_pseudo_image(pillar_feature, coors).astype(np.float16) + print(pseudo_image.shape) + pseudo_image.tofile(f"{file_dir}/pseudo_image.bin") + + # Get the pseudo image + pseudo_image = np.fromfile(f"{file_dir}/pseudo_image.bin", dtype=np.float16) + pseudo_image = pseudo_image.astype(np.float32).reshape((64, 496, 432)) + pseudo_image_tensor = pseudo_image[None] + print("---------------PSEUDO IMAGE INFO--------------") + print(pseudo_image_tensor.size) + print(pseudo_image_tensor.shape) + pseudo_image_panckage_list = MxpiDataType.MxpiTensorPackageList() + pseudo_image_panckage = pseudo_image_panckage_list.tensorPackageVec.add() + pseudo_image_vec = pseudo_image_panckage.tensorVec.add() + + pseudo_image_byte = pseudo_image_tensor.tobytes() + pseudo_image_input = MxDataInput() + pseudo_image_input.data = pseudo_image_byte + + pseudo_image_vec.deviceId = 0 + pseudo_image_vec.memType = 0 + for i in pseudo_image_tensor.shape: + pseudo_image_vec.tensorShape.append(i) + pseudo_image_vec.dataStr = pseudo_image_input.data + pseudo_image_vec.tensorDataSize = len(pseudo_image_byte) + + plugin_id_pseudo_image = 0 + key = "appsrc{}".format(plugin_id_pseudo_image).encode('utf-8') + buffer_vec_pseudo_image = InProtobufVector() + pseudo_image_buf = MxProtobufIn() + pseudo_image_buf.key = key + pseudo_image_buf.type = b'MxTools.MxpiTensorPackageList' + pseudo_image_buf.protobuf = pseudo_image_panckage_list.SerializeToString() + buffer_vec_pseudo_image.push_back(pseudo_image_buf) + + # Send data to the stream + unique_id_pseudo_image = stream_manager_api_rpn.\ + SendProtobuf(stream_name_rpn, plugin_id_pseudo_image, buffer_vec_pseudo_image) + begin_time = datetime.datetime.now() + if unique_id_pseudo_image < 0: + print("Failed to send data to stream.") + exit() + + + key_vec = StringVector() + key_vec.push_back(b'mxpi_tensorinfer0') + # get inference result + get_result = stream_manager_api_rpn.GetResult(stream_name_rpn, b'appsink0', key_vec) + spend_time += (datetime.datetime.now() - begin_time).total_seconds() + if get_result.errorCode != 0: + print("ERROR") + exit() + print("-----------Result---------------") + infer_result = get_result.metadataVec[0] + result = MxpiDataType.MxpiTensorPackageList() + result.ParseFromString(infer_result.serializedMetadata) + result_box = result.tensorPackageVec[0].tensorVec[0].dataStr + result_cls = result.tensorPackageVec[0].tensorVec[1].dataStr + result_dir = result.tensorPackageVec[0].tensorVec[2].dataStr + result_shape0 = result.tensorPackageVec[0].tensorVec[0].tensorShape + result_shape1 = result.tensorPackageVec[0].tensorVec[1].tensorShape + result_shape2 = result.tensorPackageVec[0].tensorVec[2].tensorShape + print(result_shape0) + print(result_shape1) + print(result_shape2) + result_box_np = np.frombuffer(result_box, dtype = np.float32) + result_cls_np = np.frombuffer(result_cls, dtype = np.float32) + result_dir_np = np.frombuffer(result_dir, dtype = np.float32) + result_dir = "../result/test/" + result_box_np.tofile(f"{result_dir}/box.bin") + result_cls_np.tofile(f"{result_dir}/cls.bin") + result_dir_np.tofile(f"{result_dir}/dir.bin") + print("The total time consumed for model inference is : ", spend_time, "s") + +if __name__ == '__main__': + fire.Fire() diff --git a/PointPillars/src/point_to_pillars.py b/PointPillars/src/point_to_pillars.py new file mode 100644 index 0000000..9056da1 --- /dev/null +++ b/PointPillars/src/point_to_pillars.py @@ -0,0 +1,197 @@ +""" +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +import os +import struct +import numpy as np +import torch +import fire + + +def read_lidar_info(file_path): + size = os.path.getsize(file_path) + point_num = int(size / 16) + assert point_num * 16 == size + + lidar_pt_list = np.zeros((point_num, 4)) + with open(file_path, 'rb') as f: + for i in range(point_num * 4): + data = f.read(4) + val = struct.unpack('f', data) + row = int(i / 4) + col = i % 4 + lidar_pt_list[row][col] = val[0] + return lidar_pt_list + + +def points_to_voxel_kernel(points, + voxel_size, + coors_range, + num_points_per_voxel, + coor_to_voxelidx, + voxels, + coors, + max_points=100, + max_voxels=12000): + point_cnt = points.shape[0] + ndim = 3 + grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size + grid_size = np.round(grid_size, 0, grid_size).astype(np.int32) + + coor = np.zeros(shape=(3, ), dtype=np.int32) + voxel_num = 0 + failed = False + for i in range(point_cnt): + failed = False + for j in range(ndim): + c = np.floor((points[i, j] - coors_range[j]) / voxel_size[j]) + if c < 0 or c >= grid_size[j]: + failed = True + break + coor[j] = c + if failed: + continue + voxelidx = coor_to_voxelidx[coor[0], coor[1], coor[2]] + if voxelidx == -1: + voxelidx = voxel_num + if voxel_num >= max_voxels: + break + voxel_num += 1 + coor_to_voxelidx[coor[0], coor[1], coor[2]] = voxelidx + coors[voxelidx] = coor + num = num_points_per_voxel[voxelidx] + if num < max_points: + voxels[voxelidx, num] = points[i] + num_points_per_voxel[voxelidx] += 1 + return voxel_num + + +def points_to_voxel(points, + voxel_size, # (0.16, 0.16, 4.0) + coors_range, # (0.0, -39.68, -3.0, 69.12, 39.68, 1.0) + max_points=100, + max_voxels=12000): + if not isinstance(voxel_size, np.ndarray): + voxel_size = np.array(voxel_size, dtype=points.dtype) + if not isinstance(coors_range, np.ndarray): + coors_range = np.array(coors_range, dtype=points.dtype) + voxelmap_shape = (coors_range[3:] - coors_range[:3]) / voxel_size + voxelmap_shape = tuple(np.round(voxelmap_shape).astype(np.int32).tolist()) + num_points_per_voxel = np.zeros(shape=(max_voxels, ), dtype=np.int32) + coor_to_voxelidx = -np.ones(shape=voxelmap_shape, dtype=np.int32) + voxels = np.zeros( + shape=(max_voxels, max_points, points.shape[-1]), dtype=points.dtype) + coors = np.zeros(shape=(max_voxels, 3), dtype=np.int32) + voxel_num = points_to_voxel_kernel( + points, voxel_size, coors_range, num_points_per_voxel, + coor_to_voxelidx, voxels, coors, max_points, max_voxels) + + coors = coors[:voxel_num] + voxels = voxels[:voxel_num] + num_points_per_voxel = num_points_per_voxel[:voxel_num] + return voxels, coors, num_points_per_voxel + + +def get_sub_shaped(coors): + x_sub = coors[:, 0] * 0.16 + 0.08 + y_sub = coors[:, 1] * 0.16 - 39.6 + x_sub_shaped = np.zeros((12000, 100)) + y_sub_shaped = np.zeros((12000, 100)) + for i in range(0, 100): + x_sub_shaped[:12000, i] = x_sub + y_sub_shaped[:12000, i] = y_sub + x_sub_shaped = torch.as_tensor(x_sub_shaped).unsqueeze(0).unsqueeze(0).numpy() + y_sub_shaped = torch.as_tensor(y_sub_shaped).unsqueeze(0).unsqueeze(0).numpy() + return x_sub_shaped, y_sub_shaped + + +def pillar_expand(voxel): + pillar = np.zeros((12000, 100)) + pillar_len = voxel.shape[0] + for i in range(0, 100): + pillar[:pillar_len, i] = voxel[:, i] + return pillar + + +def cnt_expand(num_points_per_vexols): + cnt = np.zeros((12000)) + cnt_len = num_points_per_vexols.shape[0] + cnt[:cnt_len] = num_points_per_vexols + return cnt + + +def coors_expand(coor): + coors = np.zeros((12000, 3)) + coors_len = coor.shape[0] + coors[:coors_len, :] = coor[:, :] + return coors + + +def get_mask(actual_num_numpy, max_num, axis=0): + actual_num = torch.as_tensor(actual_num_numpy) + actual_num = torch.unsqueeze(actual_num, axis+1) + max_num_shape = [1] * len(actual_num.shape) + max_num_shape[axis+1] = -1 + max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape) + paddings_indicator = actual_num.int() > max_num + paddings_indicator = paddings_indicator.permute(0, 2, 1) + paddings_indicator = paddings_indicator.unsqueeze(1) + return paddings_indicator + + +def generate(file_dir="../data/test/"): + point = read_lidar_info(f"{file_dir}/point.bin") + voxel_size = [0.16, 0.16, 4.0] + coors_range = [0.0, -39.68, -3.0, 69.12, 39.68, 1.0] + voxels, coor, num_points_per_vexols = points_to_voxel(point, voxel_size, coors_range) + coors = coors_expand(coor) + print(point.shape) + print(voxels.shape) + print(coors.shape) + print(num_points_per_vexols.shape) + print(voxels) + pillar_x = torch.as_tensor(pillar_expand(voxels[:, :, 0])).unsqueeze(0).unsqueeze(0).numpy().astype(np.float16) + pillar_y = torch.as_tensor(pillar_expand(voxels[:, :, 1])).unsqueeze(0).unsqueeze(0).numpy().astype(np.float16) + pillar_z = torch.as_tensor(pillar_expand(voxels[:, :, 2])).unsqueeze(0).unsqueeze(0).numpy().astype(np.float16) + pillar_i = torch.as_tensor(pillar_expand(voxels[:, :, 3])).unsqueeze(0).unsqueeze(0).numpy().astype(np.float16) + x_sub_shaped, y_sub_shaped = get_sub_shaped(coors) + x_sub_shaped = x_sub_shaped.astype(np.float16) + y_sub_shaped = y_sub_shaped.astype(np.float16) + num_points_per_pillar = torch.as_tensor(cnt_expand(num_points_per_vexols)).unsqueeze(0).numpy().astype(np.float16) + num_points_for_pillar = torch.as_tensor(pillar_x).size()[3] + mask = get_mask(num_points_per_pillar, num_points_for_pillar, axis=0).numpy().astype(np.float16) + print(pillar_x.shape) + print(pillar_y.shape) + print(pillar_z.shape) + print(pillar_i.shape) + print(x_sub_shaped.shape) + print(y_sub_shaped.shape) + print(num_points_per_pillar.shape) + print(mask.shape) + + pillar_x.tofile(f"{file_dir}/pillar_x.bin") + pillar_y.tofile(f"{file_dir}/pillar_y.bin") + pillar_z.tofile(f"{file_dir}/pillar_z.bin") + pillar_i.tofile(f"{file_dir}/pillar_i.bin") + x_sub_shaped.tofile(f"{file_dir}/x_sub_shaped.bin") + y_sub_shaped.tofile(f"{file_dir}/y_sub_shaped.bin") + num_points_per_pillar.tofile(f"{file_dir}/num_points_per_pillar.bin") + mask.tofile(f"{file_dir}/mask.bin") + + np.save(f"{file_dir}/coor.npy", coors) + + +if __name__ == '__main__': + fire.Fire() diff --git a/PointPillars/view.sh b/PointPillars/view.sh new file mode 100644 index 0000000..13d3196 --- /dev/null +++ b/PointPillars/view.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +cp data/test/point.bin result/test/ +cd src/ +python get_preds.py viewer --file_dir="../result/test/" + +exit 0