commit e6038aa0bd3198087e0188e21ca19d6db364c430 Author: we0091234 <447587096@qq.com> Date: Fri Aug 11 17:13:31 2023 +0800 first commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5f88239 --- /dev/null +++ b/.gitignore @@ -0,0 +1,32 @@ +# .gitignore +# 首先忽略所有的文件 +* +# 但是不忽略目录 +!*/ +# 忽略一些指定的目录名 +ut/ +runs/ +.vscode/ +build/ +result/ +*.pyc +pretrained_model/ +# 不忽略下面指定的文件类型 +!*.cpp +!*.h +!*.hpp +!*.c +!.gitignore +!*.py +!*.sh +!*.npy +!*.jpg +!*.pt +!*.npy +!*.pth +!*.png +!*.md +!*.txt +!*.yaml +!*.ttf +!*.cu \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..dd05788 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,115 @@ +## Contributing to YOLOv8 🚀 + +We love your input! We want to make contributing to YOLOv8 as easy and transparent as possible, whether it's: + +- Reporting a bug +- Discussing the current state of the code +- Submitting a fix +- Proposing a new feature +- Becoming a maintainer + +YOLOv8 works so well due to our combined community effort, and for every small improvement you contribute you will be +helping push the frontiers of what's possible in AI 😃! + +## Submitting a Pull Request (PR) 🛠️ + +Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps: + +### 1. Select File to Update + +Select `requirements.txt` to update by clicking on it in GitHub. + +

PR_step1

+ +### 2. Click 'Edit this file' + +Button is in top-right corner. + +

PR_step2

+ +### 3. Make Changes + +Change `matplotlib` version from `3.2.2` to `3.3`. + +

PR_step3

+ +### 4. Preview Changes and Submit PR + +Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** +for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose +changes** button. All done, your PR is now submitted to YOLOv8 for review and approval 😃! + +

PR_step4

+ +### PR recommendations + +To allow your work to be integrated as seamlessly as possible, we advise you to: + +- ✅ Verify your PR is **up-to-date** with `ultralytics/ultralytics` `main` branch. If your PR is behind you can update + your code by clicking the 'Update branch' button or by running `git pull` and `git merge main` locally. + +

Screenshot 2022-08-29 at 22 47 15

+ +- ✅ Verify all YOLOv8 Continuous Integration (CI) **checks are passing**. + +

Screenshot 2022-08-29 at 22 47 03

+ +- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase + but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee + +### Docstrings + +Not all functions or classes require docstrings but when they do, we +follow [google-style docstrings format](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings). +Here is an example: + +```python +""" + What the function does. Performs NMS on given detection predictions. + + Args: + arg1: The description of the 1st argument + arg2: The description of the 2nd argument + + Returns: + What the function returns. Empty if nothing is returned. + + Raises: + Exception Class: When and why this exception can be raised by the function. +""" +``` + +## Submitting a Bug Report 🐛 + +If you spot a problem with YOLOv8 please submit a Bug Report! + +For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few +short guidelines below to help users provide what we need in order to get started. + +When asking a question, people will be better able to provide help if you provide **code** that they can easily +understand and use to **reproduce** the problem. This is referred to by community members as creating +a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces +the problem should be: + +- ✅ **Minimal** – Use as little code as possible that still produces the same problem +- ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself +- ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem + +In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code +should be: + +- ✅ **Current** – Verify that your code is up-to-date with current + GitHub [main](https://github.com/ultralytics/ultralytics/tree/main) branch, and if necessary `git pull` or `git clone` + a new copy to ensure your problem has not already been resolved by previous commits. +- ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this + repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. + +If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 +**Bug Report** [template](https://github.com/ultralytics/ultralytics/issues/new/choose) and providing +a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better +understand and diagnose your problem. + +## License + +By contributing, you agree that your contributions will be licensed under +the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) diff --git a/README.md b/README.md new file mode 100644 index 0000000..6004c53 --- /dev/null +++ b/README.md @@ -0,0 +1,49 @@ +## **yolov8车牌识别算法,支持12种中文车牌类型** + +**环境要求: python >=3.6 pytorch >=1.7 pip install requirements.txt** + +#### **图片测试demo:** + +直接运行detect_plate.py 或者运行如下命令行: + +``` +python detect_rec_plate.py --detect_model weights/yolov8-lite-t-plate.pt --rec_model weights/plate_rec_color.pth --image_path imgs --output result +``` + +测试文件夹imgs,结果保存再 result 文件夹中 + +## **车牌检测训练** + +车牌检测训练链接如下: + +[车牌检测训练](https://github.com/we0091234/Chinese_license_plate_detection_recognition/tree/main/readme) + +## **车牌识别训练** + +车牌识别训练链接如下: + +[车牌识别训练](https://github.com/we0091234/crnn_plate_recognition) + +#### **支持如下:** + +- [X] 1.单行蓝牌 +- [X] 2.单行黄牌 +- [X] 3.新能源车牌 +- [X] 4.白色警用车牌 +- [X] 5.教练车牌 +- [X] 6.武警车牌 +- [X] 7.双层黄牌 +- [X] 8.双层白牌 +- [X] 9.使馆车牌 +- [X] 10.港澳粤Z牌 +- [X] 11.双层绿牌 +- [X] 12.民航车牌 + +## References + +* [https://github.com/derronqi/yolov8-face](https://github.com/derronqi/yolov8-face) +* [https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics) + +## 联系 + +**有问题可以提issues 或者加qq群:871797331 询问** diff --git a/data/test.jpg b/data/test.jpg new file mode 100644 index 0000000..4a29e94 Binary files /dev/null and b/data/test.jpg differ diff --git a/data/widerface/val/label.txt b/data/widerface/val/label.txt new file mode 100644 index 0000000..c7c583e --- /dev/null +++ b/data/widerface/val/label.txt @@ -0,0 +1,42934 @@ +# 0--Parade/0_Parade_marchingband_1_465.jpg +345 211 4 4 +331 126 3 3 +250 126 3 4 +221 128 4 5 +427 116 3 4 +393 79 3 4 +373 119 3 4 +90 225 6 5 +128 237 5 8 +170 230 5 6 +114 285 5 7 +81 304 9 9 +44 303 10 8 +7 264 6 8 +31 231 5 6 +26 192 6 8 +66 196 4 6 +74 175 5 5 +113 168 4 5 +129 165 4 5 +158 162 4 4 +174 156 5 6 +197 159 4 4 +192 191 3 5 +242 179 5 6 +161 269 5 6 +0 317 3 8 +346 68 3 4 +418 62 2 3 +376 59 3 3 +120 599 12 17 +1008 256 6 10 +853 131 4 5 +878 131 5 5 +900 138 4 6 +922 142 5 5 +936 139 5 6 +959 134 5 7 +984 141 5 5 +1010 141 4 6 +877 168 7 9 +947 160 5 7 +963 190 5 7 +982 194 5 7 +798 133 4 4 +815 170 4 5 +794 166 4 6 +845 167 4 7 +839 145 3 5 +1003 164 5 7 +921 227 7 8 +881 215 5 6 +836 209 6 8 +801 204 6 8 +875 257 6 8 +932 270 6 7 +1002 295 8 8 +984 249 5 8 +918 173 5 6 +973 163 5 6 +913 160 5 6 +700 237 6 7 +664 239 6 8 +669 260 7 8 +618 264 6 8 +598 239 4 6 +627 235 8 9 +560 237 4 6 +524 246 4 4 +522 267 5 6 +607 198 6 7 +600 157 4 5 +628 154 4 5 +528 157 4 6 +505 142 4 5 +488 143 4 5 +503 165 5 7 +554 165 4 5 +769 160 6 7 +750 160 5 7 +682 155 5 6 +704 156 4 6 +658 153 5 6 +604 135 4 6 +687 127 4 6 +701 127 4 6 +732 130 5 6 +532 136 5 4 +570 137 3 4 +618 80 3 4 +538 78 3 4 +571 154 5 6 +744 104 4 5 +711 105 4 4 +660 106 4 5 +616 110 4 4 +579 109 4 4 +548 106 4 4 +510 104 4 3 +333 155 3 4 +350 168 4 6 +390 167 4 6 +366 250 5 7 +313 177 6 6 +231 251 5 5 +236 157 4 5 +224 152 4 5 +210 151 4 5 +262 157 3 4 +431 167 4 5 +427 205 4 6 +467 168 4 5 +466 257 8 9 +293 252 4 5 +568 273 4 5 +489 242 3 5 +603 61 2 2 +463 94 3 3 +382 147 4 5 +433 145 3 6 +465 68 3 5 +709 267 7 9 +761 268 7 8 +735 241 6 8 +75 247 10 10 +707 199 6 8 +# 0--Parade/0_Parade_Parade_0_628.jpg +26 299 10 16 +25 329 7 11 +84 341 6 8 +80 329 5 6 +110 335 4 9 +138 337 4 8 +151 351 3 5 +157 342 4 8 +165 346 3 6 +208 349 2 4 +96 338 4 7 +309 238 21 30 +451 197 24 42 +360 290 12 18 +578 234 19 31 +624 219 20 33 +693 263 17 26 +708 296 9 15 +757 293 9 11 +786 290 11 13 +787 340 12 13 +885 315 9 13 +904 278 15 20 +939 296 12 14 +979 261 23 17 +944 214 5 7 +980 209 5 7 +1005 204 5 7 +733 325 7 9 +# 0--Parade/0_Parade_marchingband_1_765.jpg +311 131 8 9 +299 143 10 11 +284 150 9 13 +273 163 10 11 +260 173 10 13 +345 151 7 10 +370 161 9 12 +408 150 9 12 +398 173 7 11 +422 192 9 10 +384 205 9 12 +414 214 10 13 +351 183 10 14 +329 171 8 13 +309 203 10 12 +333 217 10 15 +362 230 13 16 +398 244 13 15 +339 265 14 15 +312 249 13 15 +284 234 12 14 +246 183 11 13 +233 198 12 16 +202 220 14 16 +172 256 14 18 +252 267 14 18 +288 289 12 19 +723 214 11 12 +705 230 10 16 +674 247 12 16 +764 230 12 14 +780 221 11 15 +746 251 12 16 +577 268 12 16 +647 265 12 18 +615 295 14 17 +584 314 15 18 +650 316 14 19 +688 293 15 21 +555 376 17 24 +612 382 16 20 +662 379 19 21 +692 352 16 19 +715 270 12 19 +785 273 14 17 +769 293 14 18 +727 311 16 19 +833 299 14 16 +805 322 15 19 +766 350 15 18 +727 381 18 23 +784 380 18 22 +845 371 19 24 +855 354 17 22 +731 181 9 11 +773 126 9 13 +780 138 10 13 +789 149 11 12 +804 156 9 14 +824 178 11 16 +833 202 14 16 +802 252 13 15 +848 225 13 15 +872 246 14 15 +893 266 12 17 +912 290 14 19 +905 384 17 21 +623 347 15 18 +317 305 15 18 +352 343 14 19 +307 370 17 24 +257 346 15 15 +226 317 13 19 +248 370 19 20 +196 374 16 19 +152 382 18 20 +139 275 15 17 +114 299 16 21 +90 322 18 27 +436 164 8 13 +463 147 9 12 +490 163 8 11 +522 146 8 11 +512 172 9 11 +486 190 10 13 +457 174 8 12 +449 203 10 11 +476 215 10 11 +505 202 11 13 +542 187 10 13 +541 208 10 14 +505 233 10 14 +442 233 13 18 +466 250 12 16 +501 264 12 16 +538 252 12 17 +423 268 12 16 +370 290 11 18 +405 309 15 19 +370 369 16 21 +452 341 16 20 +428 373 19 23 +494 366 16 24 +571 151 9 10 +550 167 8 10 +569 173 11 10 +599 166 9 10 +629 154 9 10 +659 165 8 10 +628 178 9 12 +654 189 9 12 +630 199 11 14 +599 189 10 12 +567 200 10 14 +569 234 11 13 +606 210 10 13 +606 242 13 17 +641 235 11 13 +665 210 10 15 +458 290 14 16 +502 313 13 21 +541 296 14 17 +539 346 14 20 +678 155 8 10 +717 154 10 13 +739 161 9 13 +705 167 9 11 +683 182 8 11 +713 192 10 12 +690 200 11 14 +758 189 9 14 +745 204 10 11 +# 0--Parade/0_Parade_Parade_0_194.jpg +111 425 122 127 +209 347 70 103 +368 252 89 133 +555 282 89 100 +707 252 92 133 +# 0--Parade/0_Parade_marchingband_1_379.jpg +281 303 20 36 +260 324 16 21 +896 312 14 20 +695 289 23 32 +809 320 17 25 +846 320 15 16 +953 316 14 16 +985 329 13 13 +783 319 11 17 +765 323 11 12 +658 324 16 23 +864 329 8 8 +877 328 7 9 +928 325 7 8 +1010 322 7 8 +746 303 5 9 +883 317 5 6 +598 323 11 15 +621 316 5 11 +581 299 6 9 +506 306 8 15 +537 316 19 31 +429 295 29 42 +383 309 15 25 +86 320 16 24 +188 322 19 28 +# 0--Parade/0_Parade_Parade_0_814.jpg +74 417 8 9 +54 394 4 6 +96 423 9 10 +102 395 8 10 +112 408 7 11 +125 404 8 11 +133 417 9 8 +151 407 7 9 +165 421 8 11 +166 401 8 11 +258 407 5 9 +290 394 6 8 +288 429 7 9 +321 407 8 10 +310 399 9 10 +306 419 6 8 +337 402 10 13 +407 455 16 14 +472 373 12 15 +495 378 12 17 +244 367 11 18 +407 394 3 4 +407 403 5 5 +526 411 6 9 +604 534 17 23 +647 404 9 11 +684 395 9 14 +705 437 8 11 +723 385 12 15 +753 378 13 15 +776 386 12 14 +764 514 22 24 +893 556 18 18 +924 527 19 21 +975 425 9 11 +947 409 6 8 +# 0--Parade/0_Parade_Parade_0_470.jpg +3 152 34 51 +36 166 85 104 +145 171 79 99 +223 171 61 87 +325 160 58 79 +418 200 64 90 +462 146 59 69 +532 182 66 93 +621 139 56 76 +596 243 79 100 +469 330 75 96 +312 365 84 113 +118 471 89 110 +295 240 66 83 +260 173 41 52 +702 220 65 95 +775 225 64 88 +839 226 53 75 +847 175 52 69 +899 173 61 72 +931 266 61 77 +760 170 20 26 +993 192 9 11 +1008 205 3 6 +814 177 11 31 +# 0--Parade/0_Parade_marchingband_1_1045.jpg +1 314 12 24 +90 273 33 33 +180 314 20 27 +151 317 12 18 +213 306 12 19 +204 320 11 21 +287 302 14 25 +305 305 13 16 +357 296 21 25 +384 299 15 20 +440 307 15 19 +507 308 14 21 +487 312 9 14 +590 257 25 41 +662 272 27 37 +694 256 21 33 +755 305 16 24 +778 299 13 14 +791 303 13 14 +946 270 39 44 +421 312 6 8 +229 259 9 11 +# 0--Parade/0_Parade_marchingband_1_556.jpg +82 262 23 28 +147 287 35 43 +280 265 21 27 +347 255 29 37 +448 275 23 34 +495 281 30 39 +555 254 24 25 +565 298 40 60 +719 236 23 27 +746 237 32 37 +967 255 31 41 +639 254 29 40 +573 419 52 35 +# 0--Parade/0_Parade_Parade_0_829.jpg +501 160 285 443 +# 0--Parade/0_Parade_marchingband_1_593.jpg +318 324 7 8 +345 318 9 8 +311 356 8 10 +352 361 8 8 +360 384 9 10 +393 375 7 12 +430 376 8 11 +360 346 7 7 +372 324 8 11 +360 300 7 9 +397 294 9 9 +403 323 8 8 +436 290 9 8 +456 306 6 8 +471 314 7 11 +437 323 7 8 +447 348 8 10 +476 344 8 8 +469 380 8 12 +507 377 7 12 +542 374 8 12 +503 346 8 8 +487 310 8 9 +504 291 7 9 +533 294 7 9 +562 293 7 9 +520 309 8 9 +541 324 7 9 +531 345 8 7 +565 345 8 10 +501 321 8 8 +593 289 8 11 +580 307 8 10 +570 327 6 7 +613 304 9 11 +630 312 7 10 +647 304 9 13 +653 340 8 10 +624 342 8 9 +599 321 9 9 +599 348 9 9 +584 379 10 10 +623 378 9 11 +659 378 8 9 +722 294 6 9 +680 319 6 8 +695 313 8 9 +676 333 8 13 +700 328 8 10 +711 326 6 6 +734 323 8 8 +725 333 7 11 +698 347 8 12 +725 353 10 12 +752 328 8 13 +762 315 7 11 +772 304 7 9 +801 301 7 10 +791 320 8 9 +779 325 8 11 +800 327 9 11 +799 340 9 12 +760 344 9 14 +692 398 9 9 +862 335 10 13 +909 348 9 13 +137 307 9 11 +108 309 10 10 +122 319 7 9 +105 331 10 11 +78 344 9 12 +154 336 9 11 +155 352 9 11 +183 332 9 12 +176 305 8 9 +208 309 7 9 +211 333 7 11 +225 324 6 9 +188 355 6 11 +237 309 11 9 +254 323 9 10 +227 358 10 11 +272 305 8 10 +289 322 7 8 +306 306 9 9 +270 358 8 13 +# 0--Parade/0_Parade_Parade_0_29.jpg +539 162 63 76 +799 164 43 51 +486 232 17 22 +239 232 23 32 +193 238 21 25 +162 246 15 23 +139 248 14 16 +315 231 34 39 +421 247 21 23 +294 242 14 16 +119 244 12 14 +101 252 8 10 +219 250 10 14 +356 238 15 21 +409 240 13 16 +860 233 19 24 +976 184 38 45 +744 238 24 30 +704 245 17 21 +692 246 14 17 +669 241 7 9 +679 240 8 10 +648 241 7 10 +584 239 27 30 +618 253 11 12 +510 244 14 16 +459 249 8 8 +477 251 5 6 +391 256 5 5 +# 0--Parade/0_Parade_Parade_0_72.jpg +135 334 30 40 +10 474 11 22 +66 556 24 23 +206 363 23 31 +256 384 15 15 +348 464 22 32 +352 371 15 19 +411 379 15 21 +454 381 28 33 +496 335 18 25 +546 376 16 21 +414 450 15 21 +549 351 7 9 +583 374 7 7 +625 343 20 21 +628 363 18 24 +662 343 30 44 +739 365 18 22 +775 343 14 18 +# 0--Parade/0_Parade_Parade_0_205.jpg +260 303 78 96 +666 325 40 52 +810 308 31 39 +588 329 28 33 +712 336 32 44 +683 3 13 23 +# 0--Parade/0_Parade_marchingband_1_881.jpg +79 217 9 10 +98 241 8 13 +143 259 11 12 +188 216 8 9 +228 304 6 8 +242 264 8 11 +226 219 9 8 +281 209 7 12 +343 160 13 32 +540 287 8 10 +570 208 6 10 +729 181 9 33 +1001 262 7 10 +7 217 9 15 +24 220 7 6 +52 234 10 14 +799 218 6 10 +966 264 7 7 +201 299 7 10 +643 158 14 35 +# 0--Parade/0_Parade_marchingband_1_490.jpg +787 392 15 19 +838 390 13 19 +878 389 14 19 +930 388 15 19 +974 397 13 16 +931 368 11 16 +745 391 14 16 +698 392 13 17 +652 389 15 19 +602 389 13 17 +670 454 16 20 +574 360 13 17 +617 358 13 16 +663 359 13 18 +706 358 14 16 +752 359 16 17 +794 360 13 16 +835 358 14 19 +883 360 13 17 +923 338 12 18 +881 333 13 15 +839 330 13 16 +793 331 12 15 +751 335 12 15 +715 330 13 16 +675 333 12 15 +631 333 12 14 +589 331 10 16 +602 305 10 15 +640 308 13 15 +680 307 12 16 +718 308 13 16 +759 309 12 16 +799 310 12 15 +838 313 13 16 +877 316 11 16 +576 284 9 13 +613 284 12 13 +652 287 10 13 +686 287 13 15 +725 285 12 16 +760 287 12 16 +798 285 13 16 +838 286 13 16 +872 298 11 13 +846 264 10 15 +800 269 12 13 +767 271 10 12 +735 267 10 15 +699 268 11 14 +662 267 12 16 +629 270 11 15 +593 267 13 16 +569 249 12 12 +606 248 11 14 +636 250 11 13 +669 250 12 15 +808 249 10 13 +839 249 10 13 +772 250 12 13 +706 249 9 13 +739 249 12 12 +687 235 9 13 +718 238 10 13 +627 220 9 12 +618 232 12 13 +584 234 9 13 +576 203 9 12 +593 221 10 12 +606 203 9 12 +590 188 10 12 +620 185 9 12 +653 188 9 14 +684 186 9 13 +667 201 10 16 +636 204 9 12 +660 219 10 13 +651 238 10 10 +691 220 9 13 +718 215 9 13 +746 229 9 16 +779 230 9 12 +811 230 9 13 +783 216 9 10 +791 209 8 10 +756 200 9 10 +752 217 10 12 +728 201 9 13 +701 202 10 12 +437 359 16 19 +455 392 15 16 +505 388 12 19 +552 391 14 17 +527 357 13 18 +484 357 12 17 +456 331 13 16 +500 330 12 14 +545 332 12 17 +563 305 12 16 +483 306 11 14 +494 285 12 15 +537 285 11 16 +521 307 14 13 +389 359 15 18 +407 387 13 17 +356 391 16 19 +309 391 12 14 +302 357 13 17 +330 328 10 16 +412 331 14 19 +288 329 12 16 +369 332 12 15 +442 302 11 15 +461 285 11 14 +482 271 11 13 +443 268 11 13 +423 284 11 16 +386 284 12 15 +361 305 11 16 +320 303 13 15 +283 305 13 15 +276 286 11 15 +305 267 10 14 +312 286 11 13 +350 284 11 15 +406 264 12 15 +432 246 11 14 +453 237 10 10 +467 248 10 13 +504 250 10 12 +537 251 10 12 +550 234 8 13 +563 218 9 12 +419 233 12 13 +328 247 10 13 +342 267 10 13 +372 265 11 16 +362 249 8 12 +397 249 10 12 +382 231 10 13 +347 230 11 15 +310 233 11 13 +293 248 10 13 +282 233 11 12 +275 214 8 10 +281 186 7 11 +295 200 10 12 +303 216 10 13 +327 199 9 11 +334 215 10 13 +310 185 9 11 +359 205 9 13 +339 187 10 11 +371 216 9 11 +402 217 10 12 +390 205 10 12 +374 191 10 11 +406 190 8 11 +420 202 8 12 +436 217 10 12 +466 216 10 10 +452 202 9 12 +436 193 9 12 +497 213 10 12 +517 235 10 10 +511 203 9 10 +484 202 9 12 +469 186 7 11 +497 185 8 11 +525 191 9 11 +546 205 8 12 +556 192 10 11 +555 266 12 15 +519 266 12 16 +401 307 12 15 +529 215 11 14 +32 392 14 17 +80 400 13 14 +124 401 12 17 +168 398 14 16 +217 396 15 17 +262 392 14 18 +260 354 13 17 +218 360 13 17 +169 356 16 16 +123 353 15 16 +79 361 14 16 +88 334 11 15 +125 329 12 14 +167 325 12 16 +206 326 12 15 +244 326 13 15 +245 304 14 17 +207 306 12 16 +164 306 12 13 +123 309 12 14 +125 289 10 13 +167 286 11 14 +195 265 11 14 +231 264 12 15 +154 259 10 14 +149 242 11 13 +183 242 11 13 +223 246 10 12 +258 246 10 14 +188 225 10 13 +219 225 10 13 +252 228 10 13 +245 213 10 13 +216 212 9 12 +186 210 9 11 +233 197 10 10 +265 197 10 14 +236 284 11 14 +201 283 10 13 +265 263 10 15 +483 233 10 11 +346 360 12 15 +538 437 14 22 +410 452 15 19 +292 455 14 18 +# 0--Parade/0_Parade_Parade_0_125.jpg +519 337 26 38 +583 277 18 25 +592 251 13 20 +475 270 20 27 +374 282 12 24 +1006 341 18 34 +56 432 9 14 +42 426 7 10 +12 412 13 14 +16 482 13 13 +874 444 6 7 +951 430 4 6 +898 428 6 7 +# 0--Parade/0_Parade_Parade_0_120.jpg +107 304 45 50 +23 368 29 39 +42 323 28 33 +327 314 17 20 +392 331 16 20 +295 344 11 17 +525 328 34 38 +492 315 16 23 +442 323 18 23 +554 325 12 18 +564 320 11 14 +627 318 15 19 +665 325 14 21 +366 333 10 12 +696 321 15 17 +743 311 16 18 +759 316 13 17 +905 305 13 22 +950 302 41 53 +977 321 28 48 +795 320 9 12 +# 0--Parade/0_Parade_marchingband_1_476.jpg +742 174 53 69 +610 198 40 56 +418 279 42 50 +337 269 29 44 +281 254 23 39 +240 259 24 32 +204 249 22 33 +71 255 19 24 +60 232 14 19 +54 238 13 19 +453 220 30 35 +385 222 21 31 +352 224 21 28 +313 218 17 26 +218 186 15 21 +673 161 35 39 +590 168 25 35 +518 174 25 31 +798 130 43 52 +856 103 37 40 +575 163 17 23 +430 192 19 24 +399 201 16 22 +174 186 10 17 +198 178 15 19 +155 202 9 15 +326 145 13 16 +248 181 7 10 +262 180 9 13 +303 167 8 12 +311 167 12 15 +353 154 7 10 +405 141 12 14 +474 116 14 20 +639 56 16 19 +706 54 17 21 +784 36 19 20 +583 92 13 18 +812 41 16 16 +871 39 22 20 +497 191 17 22 +193 193 12 16 +740 37 14 20 +# 0--Parade/0_Parade_marchingband_1_620.jpg +81 191 18 16 +105 186 17 20 +118 274 20 24 +171 191 13 13 +194 179 16 18 +255 201 18 19 +306 187 18 18 +287 239 19 18 +321 242 23 23 +407 174 9 12 +395 194 12 13 +441 244 18 21 +468 194 14 15 +513 170 8 11 +585 179 14 16 +618 161 13 14 +720 162 11 13 +763 209 14 17 +607 241 19 20 +659 341 31 30 +764 340 30 30 +834 234 20 17 +819 254 24 23 +855 203 22 20 +900 217 21 20 +974 217 16 18 +952 235 20 20 +911 326 31 36 +841 353 30 30 +800 413 31 39 +1009 334 15 25 +# 0--Parade/0_Parade_Parade_0_960.jpg +48 210 112 158 +232 24 68 100 +712 240 66 100 +878 192 80 108 +# 0--Parade/0_Parade_marchingband_1_488.jpg +730 360 5 12 +769 371 6 9 +763 369 4 8 +809 383 5 8 +911 384 5 8 +943 374 5 11 +974 390 6 8 +1019 306 5 13 +644 349 7 14 +612 321 11 20 +687 330 9 16 +273 334 10 15 +324 313 12 21 +373 344 8 13 +446 268 19 29 +498 341 8 13 +517 354 5 10 +41 267 20 26 +95 362 5 7 +203 378 6 7 +# 0--Parade/0_Parade_Parade_0_901.jpg +7 399 201 342 +283 680 164 208 +712 351 292 374 +# 0--Parade/0_Parade_marchingband_1_822.jpg +179 96 67 106 +307 155 37 40 +448 131 50 69 +642 144 52 64 +751 96 34 44 +879 77 65 69 +# 0--Parade/0_Parade_marchingband_1_353.jpg +263 381 113 169 +635 271 134 169 +# 0--Parade/0_Parade_marchingband_1_74.jpg +190 362 34 45 +294 321 13 14 +265 315 13 14 +248 310 15 16 +275 299 15 16 +210 315 17 16 +176 301 16 16 +160 294 14 13 +260 267 12 13 +213 283 16 16 +432 364 15 18 +465 372 17 20 +407 357 15 18 +385 341 15 17 +462 339 15 18 +483 335 12 18 +457 316 13 15 +437 311 14 16 +421 313 14 18 +403 331 13 15 +401 295 13 14 +349 346 14 16 +313 329 15 19 +347 313 13 15 +374 323 15 17 +367 312 12 14 +346 297 12 14 +466 297 13 16 +631 268 13 13 +628 305 13 13 +616 324 15 16 +606 314 14 15 +589 315 14 15 +582 294 15 19 +562 282 13 13 +554 298 12 17 +572 332 14 17 +552 325 14 15 +536 325 16 17 +531 312 12 14 +545 279 12 14 +602 352 14 14 +617 361 14 21 +627 374 19 21 +574 359 14 15 +563 372 18 19 +546 344 15 20 +525 361 16 19 +493 303 15 17 +505 316 15 18 +510 345 15 17 +495 359 16 16 +632 328 12 14 +501 386 15 18 +528 406 14 18 +581 391 16 20 +592 383 16 17 +604 408 16 21 +755 273 14 15 +772 291 12 14 +764 302 14 17 +781 299 15 17 +793 306 15 18 +774 314 15 18 +781 335 16 22 +766 341 18 22 +751 324 16 19 +735 336 15 16 +732 304 14 17 +723 287 12 16 +705 302 15 15 +705 337 14 14 +741 371 16 18 +706 358 16 19 +683 347 16 18 +643 279 14 15 +660 282 14 16 +653 301 16 19 +671 321 15 16 +682 313 14 16 +781 383 16 19 +649 344 14 15 +640 345 12 16 +657 366 15 20 +698 383 16 20 +669 394 17 15 +810 271 12 10 +817 278 13 13 +854 292 14 15 +835 290 14 15 +888 309 13 14 +900 297 10 17 +887 326 14 17 +834 323 16 18 +820 336 15 16 +826 302 12 13 +867 339 13 13 +1004 273 14 16 +1003 298 13 14 +963 314 12 13 +950 330 15 16 +916 324 14 16 +901 343 16 16 +990 329 14 14 +972 352 13 15 +958 361 15 16 +938 356 12 14 +928 365 15 16 +906 375 18 19 +866 383 16 18 +873 355 16 18 +888 350 14 17 +851 350 15 19 +838 375 18 21 +821 395 17 20 +759 393 17 21 +715 400 18 22 +658 412 19 24 +802 360 16 20 +812 349 14 18 +989 194 9 10 +614 298 11 18 +646 295 11 14 +688 300 11 14 +934 299 11 13 +884 291 15 16 +131 277 17 18 +192 290 11 15 +376 286 14 18 +10 111 9 10 +# 0--Parade/0_Parade_marchingband_1_234.jpg +188 328 16 15 +216 294 17 19 +244 335 15 16 +285 296 15 20 +306 314 16 19 +343 281 16 16 +255 258 16 16 +228 223 15 18 +292 232 13 16 +319 262 15 15 +354 233 12 13 +374 267 16 13 +401 220 15 14 +401 290 17 19 +367 309 15 20 +434 313 16 21 +438 263 13 15 +482 180 13 12 +554 168 16 16 +529 226 16 15 +468 236 14 15 +602 207 15 18 +670 212 16 16 +643 251 16 17 +570 270 15 17 +508 257 15 18 +490 311 14 19 +470 311 14 18 +540 295 16 16 +555 313 17 20 +613 322 14 21 +606 305 16 18 +677 285 15 18 +701 255 17 18 +728 320 16 16 +668 322 15 21 +775 258 16 16 +747 275 14 17 +737 233 14 15 +843 252 18 22 +808 284 16 16 +784 320 17 20 +835 327 18 16 +864 302 14 18 +208 423 15 17 +281 421 16 21 +335 414 16 20 +404 412 16 21 +471 404 16 22 +526 418 14 19 +582 416 15 20 +646 412 15 21 +703 421 15 18 +760 422 15 20 +# 0--Parade/0_Parade_marchingband_1_359.jpg +287 210 12 13 +909 120 22 28 +865 152 24 26 +1007 119 13 30 +608 190 18 22 +568 195 8 12 +665 193 6 6 +49 216 19 19 +0 216 15 21 +145 204 12 18 +272 202 10 12 +734 118 54 59 +167 173 34 40 +310 194 13 20 +412 195 17 19 +481 193 15 21 +506 194 12 14 +251 197 6 14 +364 200 9 13 +539 197 7 9 +# 0--Parade/0_Parade_Parade_0_266.jpg +867 573 15 38 +956 336 26 32 +914 452 12 14 +905 449 11 16 +911 490 15 19 +366 354 22 27 +26 419 15 16 +165 337 10 14 +207 326 10 15 +152 399 9 13 +74 370 11 12 +96 344 10 15 +276 326 8 9 +141 342 9 9 +5 328 11 15 +89 324 10 13 +989 333 22 30 +1008 312 16 23 +953 314 15 15 +# 0--Parade/0_Parade_Parade_0_275.jpg +0 0 0 0 +# 0--Parade/0_Parade_Parade_0_478.jpg +25 353 15 19 +44 373 13 13 +95 374 13 20 +193 375 11 19 +215 375 16 19 +656 450 11 12 +572 429 10 11 +611 447 7 11 +725 454 6 10 +843 474 16 22 +909 478 13 14 +1009 506 9 21 +752 482 11 18 +# 0--Parade/0_Parade_Parade_0_913.jpg +238 146 212 246 +612 192 206 234 +28 100 76 114 +# 0--Parade/0_Parade_marchingband_1_172.jpg +68 82 22 25 +50 72 15 18 +67 33 16 17 +111 83 26 31 +171 82 21 26 +162 67 22 24 +192 98 24 21 +230 87 20 25 +253 96 21 25 +279 96 22 25 +48 9 11 14 +17 8 12 13 +89 18 8 9 +73 5 9 9 +112 31 14 13 +128 26 9 9 +200 42 12 12 +216 14 9 9 +196 1 9 12 +207 65 16 20 +249 49 18 20 +283 43 18 16 +284 67 19 18 +304 79 19 20 +267 12 10 11 +284 13 9 9 +274 4 10 9 +245 169 31 40 +330 126 17 17 +372 110 26 31 +333 18 10 12 +337 5 11 12 +369 86 20 22 +404 68 16 17 +396 104 17 26 +438 106 24 29 +492 115 22 22 +440 74 20 22 +488 71 18 22 +487 31 11 12 +409 32 10 10 +399 12 10 12 +371 12 13 13 +469 42 12 14 +427 16 11 15 +486 4 11 13 +521 10 10 11 +570 244 39 45 +671 183 28 30 +508 143 26 33 +561 135 23 25 +590 113 25 26 +649 102 22 29 +696 140 25 25 +709 102 21 24 +523 95 20 26 +585 79 20 23 +643 78 21 20 +694 75 19 22 +675 54 15 18 +649 43 15 16 +608 50 14 13 +559 51 17 20 +540 70 15 19 +552 18 10 10 +568 24 9 12 +717 27 10 12 +674 18 14 17 +785 48 12 12 +744 52 11 12 +777 104 23 29 +804 79 20 22 +745 124 27 35 +840 100 23 27 +870 104 20 25 +813 36 9 11 +840 26 11 12 +796 22 10 12 +877 25 11 15 +855 32 11 13 +695 35 10 13 +701 20 8 10 +649 14 8 11 +823 22 11 14 +778 26 11 13 +748 27 10 14 +927 39 12 13 +999 32 12 16 +1016 25 7 13 +921 14 12 14 +900 33 14 18 +872 76 13 13 +956 89 13 14 +929 92 16 20 +951 54 15 17 +850 183 32 35 +878 169 24 28 +885 143 22 21 +906 99 21 23 +941 127 26 29 +962 118 21 28 +983 132 23 27 +1006 76 15 18 +0 34 12 16 +444 24 14 14 +606 29 13 12 +627 13 11 13 +635 25 9 12 +735 17 9 14 +715 12 11 12 +728 36 12 15 +916 76 11 14 +# 0--Parade/0_Parade_marchingband_1_309.jpg +47 90 24 24 +127 119 26 24 +274 99 22 21 +300 97 27 25 +388 96 28 29 +442 95 27 28 +500 74 32 33 +617 109 18 21 +731 93 24 26 +849 99 24 27 +822 117 23 25 +894 102 16 18 +928 112 20 21 +959 91 18 18 +195 107 20 22 +# 0--Parade/0_Parade_marchingband_1_360.jpg +474 112 52 53 +860 133 46 58 +92 170 44 37 +764 320 11 13 +935 300 14 15 +716 333 9 10 +676 313 10 9 +808 321 9 10 +240 262 17 22 +326 322 10 9 +398 247 15 18 +946 265 3 3 +195 346 6 7 +9 338 7 8 +296 355 5 5 +578 229 23 24 +# 0--Parade/0_Parade_Parade_0_472.jpg +56 139 35 36 +213 238 17 20 +237 280 11 16 +318 117 44 52 +247 176 11 15 +229 219 7 11 +251 156 8 14 +401 133 34 30 +509 131 37 41 +562 134 17 29 +599 218 14 26 +600 264 49 57 +629 163 19 18 +655 169 20 23 +643 119 15 27 +676 126 16 23 +706 112 23 28 +728 116 20 26 +748 120 12 25 +784 164 18 29 +811 117 20 29 +834 116 14 30 +840 174 17 28 +801 225 16 20 +846 245 10 21 +932 229 16 25 +947 151 17 22 +974 145 17 22 +989 138 19 23 +691 138 10 22 +347 284 50 47 +1018 197 6 19 +# 0--Parade/0_Parade_marchingband_1_517.jpg +708 224 39 41 +915 273 35 41 +1008 297 16 35 +805 311 11 13 +588 271 18 22 +454 218 44 56 +204 171 47 55 +51 141 49 50 +398 237 29 35 +337 258 15 18 +867 297 16 21 +# 0--Parade/0_Parade_marchingband_1_188.jpg +252 503 11 14 +210 417 10 10 +49 427 11 10 +155 370 11 11 +117 336 10 7 +62 323 10 11 +140 210 6 9 +217 176 6 7 +249 265 8 8 +295 249 8 9 +333 270 8 8 +359 255 8 8 +341 323 9 9 +241 329 9 8 +299 378 11 9 +492 178 5 7 +511 214 5 8 +439 275 7 8 +503 283 8 8 +554 265 7 8 +451 322 10 10 +610 281 8 9 +693 284 9 9 +721 305 9 9 +681 308 8 9 +587 326 10 11 +767 228 4 8 +780 185 6 8 +994 244 5 8 +520 367 9 10 +497 402 9 9 +466 432 11 13 +614 436 10 10 +601 409 11 11 +614 372 10 8 +665 392 9 10 +685 352 10 10 +709 371 12 9 +707 409 12 10 +796 360 10 9 +783 386 10 11 +853 429 12 11 +774 447 10 11 +919 485 12 16 +441 549 12 16 +549 519 12 13 +575 570 13 14 +# 0--Parade/0_Parade_Parade_0_917.jpg +650 122 28 37 +134 251 20 27 +151 238 7 10 +# 0--Parade/0_Parade_Parade_0_353.jpg +835 266 22 23 +852 250 18 19 +883 250 17 16 +937 252 7 9 +924 248 5 7 +992 259 3 5 +812 261 5 9 +797 256 10 11 +718 251 15 18 +692 250 15 17 +664 248 10 12 +673 264 14 18 +595 238 50 53 +493 243 26 32 +454 285 11 13 +443 285 6 11 +430 283 8 9 +388 263 20 25 +378 289 9 17 +322 295 28 36 +301 307 11 17 +290 289 12 16 +30 341 17 22 +77 309 18 23 +172 296 13 16 +187 298 10 12 +196 302 18 23 +159 370 9 12 +252 288 17 19 +# 0--Parade/0_Parade_marchingband_1_20.jpg +29 401 29 36 +255 369 32 39 +335 376 19 22 +127 375 13 17 +83 391 21 26 +233 374 21 25 +301 271 10 10 +542 358 36 42 +465 354 27 34 +447 346 24 28 +385 367 16 20 +628 391 20 27 +587 369 16 21 +493 373 15 17 +679 360 12 15 +618 369 11 13 +729 345 18 21 +802 357 16 17 +776 360 12 14 +852 271 44 53 +657 385 17 18 +965 339 46 44 +# 0--Parade/0_Parade_marchingband_1_818.jpg +643 336 324 553 +# 0--Parade/0_Parade_Parade_0_854.jpg +527 264 39 51 +618 485 22 37 +792 439 41 48 +984 456 33 44 +1007 472 17 36 +121 726 27 34 +143 746 15 22 +160 729 19 24 +10 407 8 12 +500 2 7 8 +522 2 9 8 +701 23 8 10 +# 0--Parade/0_Parade_marchingband_1_355.jpg +355 104 22 28 +456 114 26 33 +562 90 31 39 +689 107 42 50 +816 177 29 39 +888 107 25 36 +905 87 21 27 +# 0--Parade/0_Parade_marchingband_1_869.jpg +70 194 61 80 +306 212 53 66 +520 205 53 69 +704 324 45 60 +883 332 46 59 +# 0--Parade/0_Parade_Parade_0_611.jpg +71 194 24 34 +151 163 35 53 +178 222 14 20 +204 235 11 15 +262 196 24 39 +301 171 40 57 +416 139 35 54 +498 196 23 46 +528 207 20 41 +584 152 40 52 +614 186 22 33 +656 199 15 27 +685 212 14 30 +723 233 8 11 +760 207 11 23 +783 251 8 10 +916 224 17 17 +952 238 10 9 +# 0--Parade/0_Parade_Parade_0_443.jpg +684 312 72 73 +521 211 71 85 +370 234 52 70 +302 177 37 54 +790 212 19 41 +220 275 23 42 +213 243 24 24 +37 279 25 37 +1 256 10 14 +62 255 13 19 +87 263 6 8 +92 265 5 8 +104 261 11 11 +117 259 6 11 +128 256 8 14 +108 329 19 23 +149 261 7 9 +# 0--Parade/0_Parade_Parade_0_102.jpg +806 442 24 32 +936 435 21 28 +57 542 20 22 +83 539 17 21 +183 535 22 24 +268 529 16 25 +331 542 15 22 +424 538 20 22 +222 527 13 16 +294 526 11 13 +288 527 8 12 +364 539 11 13 +380 544 11 15 +376 524 10 11 +13 531 8 13 +135 528 7 11 +126 534 6 7 +524 540 21 26 +496 521 17 23 +549 541 19 19 +595 524 17 20 +615 529 11 15 +588 530 10 14 +517 527 10 12 +640 543 9 11 +661 530 22 23 +742 539 16 18 +699 532 9 11 +865 543 18 20 +407 508 7 10 +48 531 11 13 +304 534 11 10 +362 489 6 7 +401 517 7 9 +8 524 12 11 +40 524 8 9 +31 528 8 11 +390 495 6 7 +79 524 6 5 +# 0--Parade/0_Parade_Parade_0_218.jpg +739 381 34 44 +928 622 10 12 +967 545 9 13 +957 554 9 11 +992 557 9 13 +1014 536 10 13 +853 528 15 22 +942 543 8 11 +871 496 6 10 +142 561 11 14 +100 564 9 10 +115 569 9 10 +# 0--Parade/0_Parade_Parade_0_639.jpg +270 187 45 73 +338 206 35 48 +435 272 32 35 +420 287 30 33 +511 162 34 40 +591 98 30 36 +715 83 25 28 +461 497 16 23 +641 457 11 15 +809 441 13 16 +822 435 12 16 +883 427 14 18 +905 414 7 10 +913 412 6 12 +748 448 13 20 +737 454 10 18 +685 450 14 20 +703 448 10 13 +822 466 15 22 +840 426 10 15 +339 476 16 25 +297 515 15 16 +641 574 43 71 +909 545 42 90 +975 522 49 118 +929 437 6 10 +940 432 5 8 +937 410 12 11 +950 423 15 20 +966 404 11 13 +986 410 9 8 +1002 401 7 9 +992 394 6 6 +1016 404 5 8 +969 472 35 72 +693 0 20 13 +832 437 11 16 +# 0--Parade/0_Parade_marchingband_1_78.jpg +49 440 13 15 +29 471 14 18 +32 129 6 7 +92 104 6 9 +181 76 6 8 +256 92 7 9 +239 145 7 9 +170 119 6 8 +57 191 6 8 +264 137 7 9 +290 129 8 9 +264 169 7 8 +292 169 6 9 +308 160 8 10 +235 187 7 10 +198 159 8 9 +217 157 6 8 +267 203 8 10 +284 197 8 10 +189 191 7 9 +163 203 8 10 +273 60 6 8 +434 56 6 7 +471 80 6 9 +485 110 6 8 +464 108 6 9 +442 96 6 8 +373 34 6 9 +354 79 5 7 +411 104 7 8 +386 106 7 9 +445 127 6 8 +423 126 7 10 +403 134 7 10 +359 117 6 8 +452 143 6 9 +488 133 6 9 +466 143 7 8 +433 147 9 9 +366 140 6 8 +385 142 6 8 +852 401 7 10 +326 116 7 9 +329 151 6 8 +393 163 7 7 +372 170 7 9 +344 174 7 9 +417 155 7 8 +426 175 8 9 +401 194 7 8 +375 230 7 10 +495 162 8 9 +461 170 6 8 +512 186 7 10 +474 197 6 7 +514 212 7 9 +485 218 7 7 +463 223 6 9 +445 209 7 8 +499 15 5 6 +586 23 5 7 +508 96 6 9 +563 94 7 8 +527 38 4 7 +703 28 5 8 +749 16 5 7 +721 11 6 8 +666 13 5 7 +677 25 6 7 +655 24 5 8 +641 9 7 9 +628 16 6 8 +606 9 6 8 +618 56 6 7 +623 96 5 8 +602 115 7 8 +668 112 6 7 +746 114 6 8 +706 117 6 9 +683 93 7 8 +648 139 7 8 +778 126 7 10 +767 139 7 10 +726 134 8 10 +687 138 7 9 +740 96 8 9 +711 54 7 7 +969 88 6 10 +909 55 6 8 +853 55 6 8 +879 159 7 10 +842 152 7 10 +810 150 7 10 +941 104 7 10 +957 119 6 9 +911 85 7 9 +865 98 7 9 +852 78 7 9 +778 50 6 9 +785 69 6 8 +789 93 7 8 +910 128 7 8 +880 130 7 9 +905 99 7 9 +847 120 7 9 +814 120 7 9 +947 150 7 10 +943 170 7 9 +916 170 7 10 +845 186 8 10 +812 183 8 9 +778 174 7 10 +597 147 6 9 +575 120 7 8 +554 132 7 9 +533 128 6 8 +510 128 7 9 +545 153 7 9 +575 173 8 10 +548 205 8 10 +582 200 7 10 +544 182 7 8 +655 191 8 10 +637 155 8 9 +730 202 7 10 +706 170 6 8 +675 168 6 9 +698 192 6 8 +680 254 9 11 +705 265 10 10 +738 175 6 8 +758 212 7 9 +788 208 8 11 +814 220 8 11 +831 231 7 12 +841 245 7 11 +843 261 8 11 +963 189 7 10 +878 196 7 9 +586 342 8 12 +516 325 8 11 +460 305 8 11 +723 284 8 10 +567 253 8 11 +1014 110 6 8 +1018 95 6 9 +976 110 6 9 +1012 171 7 9 +1005 143 7 10 +974 154 7 9 +1003 126 6 8 +994 193 7 9 +1015 207 7 10 +896 204 6 8 +919 215 7 10 +936 228 7 9 +963 269 7 10 +944 247 8 10 +991 312 8 11 +1008 288 8 10 +765 383 9 10 +655 359 9 11 +980 633 10 13 +997 573 4 11 +1006 625 8 14 +329 298 9 10 +604 176 7 8 +982 362 2 11 +1015 528 7 12 +825 107 7 10 +910 122 6 10 +510 74 7 9 +560 70 7 7 +323 183 7 9 +122 162 4 6 +692 10 7 6 +# 0--Parade/0_Parade_Parade_0_247.jpg +313 417 11 16 +389 421 7 11 +254 451 6 8 +271 442 6 9 +183 442 7 8 +68 430 8 9 +522 452 5 6 +463 452 5 6 +491 454 3 6 +616 453 6 7 +644 423 9 13 +890 453 11 10 +879 489 13 14 +942 461 14 17 +991 448 16 22 +730 502 7 9 +763 504 7 9 +733 483 10 11 +862 484 13 19 +# 0--Parade/0_Parade_marchingband_1_404.jpg +855 155 25 29 +633 131 29 41 +467 150 35 44 +190 83 81 145 +254 116 44 70 +# 0--Parade/0_Parade_marchingband_1_606.jpg +8 213 22 24 +46 157 30 40 +132 212 21 27 +192 244 13 21 +258 141 29 36 +424 179 27 32 +486 137 34 41 +616 192 24 32 +705 177 22 31 +783 152 40 54 +842 155 27 37 +# 0--Parade/0_Parade_Parade_0_664.jpg +0 283 5 9 +14 288 8 8 +39 288 9 9 +60 280 4 8 +68 287 8 9 +86 274 7 8 +90 289 9 11 +127 280 11 14 +16 338 35 34 +26 301 12 15 +33 307 20 20 +60 313 16 17 +107 304 15 16 +123 325 21 24 +135 304 10 14 +137 320 15 20 +158 306 12 15 +171 351 21 21 +178 290 10 12 +196 295 13 14 +209 299 13 14 +203 319 11 16 +212 316 21 25 +257 356 36 16 +246 309 21 27 +289 302 13 13 +340 356 35 16 +361 324 13 16 +303 285 6 9 +271 279 9 14 +343 305 11 12 +357 296 10 10 +369 303 8 11 +355 306 13 13 +370 326 10 12 +408 292 9 10 +412 334 19 26 +423 310 11 16 +438 305 10 14 +463 301 14 17 +466 342 23 27 +488 304 10 13 +488 326 18 25 +502 297 10 11 +516 314 12 19 +529 301 9 11 +577 309 12 15 +580 328 19 21 +542 354 25 18 +555 294 7 9 +619 342 19 26 +659 317 14 25 +633 311 9 11 +670 312 14 20 +700 308 10 13 +687 302 6 6 +723 354 39 18 +764 340 17 24 +784 329 15 16 +762 326 9 11 +804 350 21 22 +845 325 17 24 +796 293 5 6 +843 300 5 6 +861 307 6 6 +889 323 7 9 +899 323 5 8 +895 313 7 9 +920 310 9 11 +956 315 8 11 +938 337 28 30 +976 346 18 19 +977 321 11 11 +980 310 11 8 +748 303 6 8 +999 305 7 8 +1011 311 10 11 +1011 323 11 12 +# 0--Parade/0_Parade_marchingband_1_382.jpg +804 127 37 37 +931 75 19 25 +955 67 16 30 +977 94 19 30 +1000 96 11 30 +996 22 11 17 +912 27 14 21 +906 66 13 23 +767 0 26 18 +623 85 31 38 +930 0 17 19 +150 171 30 44 +340 104 30 36 +204 68 30 39 +449 40 28 38 +# 0--Parade/0_Parade_Parade_0_616.jpg +114 162 70 106 +254 118 84 112 +472 42 84 112 +666 136 70 96 +764 112 92 110 +# 0--Parade/0_Parade_marchingband_1_149.jpg +213 233 21 32 +278 228 27 33 +351 235 26 34 +405 221 22 33 +473 231 24 32 +463 304 25 32 +389 305 25 36 +309 312 25 34 +224 312 25 35 +442 371 28 43 +372 376 26 43 +280 383 27 40 +808 236 25 37 +783 313 26 34 +712 385 28 42 +729 235 25 34 +671 316 26 34 +630 376 28 38 +527 382 26 38 +602 319 23 35 +536 300 25 35 +662 233 26 36 +598 237 24 34 +534 219 23 35 +# 0--Parade/0_Parade_Parade_0_873.jpg +30 432 29 44 +24 298 13 17 +2 328 9 14 +34 337 14 17 +59 336 15 19 +12 362 28 35 +0 394 19 30 +21 399 38 39 +0 460 13 64 +62 367 10 17 +74 371 14 21 +78 370 26 27 +127 342 12 13 +155 328 8 8 +174 333 16 22 +163 346 14 19 +159 366 16 19 +128 381 31 38 +149 409 34 39 +82 458 57 52 +67 481 25 28 +147 481 53 59 +0 554 64 52 +16 520 58 59 +95 588 75 99 +250 579 52 57 +225 415 24 33 +276 374 24 27 +186 360 18 21 +211 312 16 18 +219 356 12 15 +248 325 12 14 +234 331 12 14 +246 344 11 16 +259 332 9 11 +213 375 17 16 +240 367 20 24 +217 416 22 31 +258 353 13 21 +249 308 10 7 +278 319 13 15 +319 323 13 17 +289 336 11 12 +291 357 14 18 +299 353 23 26 +317 380 20 26 +313 403 37 48 +293 432 23 31 +318 355 14 20 +336 369 25 27 +351 352 13 17 +371 323 17 23 +400 324 28 35 +428 307 12 16 +320 459 48 65 +368 448 45 70 +445 369 33 35 +451 396 31 44 +451 523 65 85 +534 435 20 30 +551 424 36 47 +563 428 38 51 +608 488 48 68 +663 605 67 82 +443 349 15 14 +467 349 17 23 +462 333 10 12 +460 318 11 12 +475 301 11 15 +479 311 19 24 +502 322 13 20 +502 345 16 20 +501 360 16 20 +510 372 23 29 +522 338 17 20 +523 351 14 18 +531 362 25 29 +532 304 12 11 +535 321 12 12 +574 309 11 15 +583 317 13 16 +567 330 15 20 +575 338 17 21 +584 345 23 30 +628 363 23 31 +658 392 32 37 +671 361 26 33 +714 409 34 42 +745 355 23 30 +624 334 24 29 +631 311 16 18 +661 314 15 16 +703 290 12 18 +709 305 13 21 +743 315 21 27 +725 479 56 71 +819 506 59 82 +869 581 113 104 +779 417 35 47 +982 465 42 71 +977 422 42 56 +904 377 27 37 +933 408 43 49 +918 400 33 46 +735 300 16 18 +743 273 13 18 +808 265 13 16 +786 305 12 15 +793 316 15 22 +805 319 13 20 +818 324 22 23 +771 359 25 27 +785 345 22 24 +807 356 20 24 +860 378 26 41 +854 327 22 29 +867 341 21 27 +895 350 24 25 +959 348 28 30 +1000 364 24 37 +908 310 14 17 +913 329 16 20 +911 271 8 11 +920 277 11 14 +933 271 16 17 +930 291 13 17 +932 298 23 29 +966 296 13 20 +1011 320 13 27 +993 312 20 23 +977 295 16 19 +984 284 15 15 +873 271 8 9 +855 283 8 9 +1016 272 8 13 +991 277 13 11 +201 326 13 17 +559 309 12 18 +664 305 14 21 +708 272 10 13 +863 284 16 18 +377 310 14 16 +# 0--Parade/0_Parade_marchingband_1_156.jpg +164 248 68 74 +568 250 64 68 +872 186 82 106 +# 0--Parade/0_Parade_Parade_0_545.jpg +152 188 306 466 +614 362 264 320 +# 0--Parade/0_Parade_Parade_0_850.jpg +32 319 16 15 +49 327 11 12 +72 318 18 21 +125 321 14 16 +153 314 18 23 +188 318 16 17 +208 329 6 7 +249 316 19 24 +266 318 11 13 +274 327 7 9 +365 314 21 24 +408 328 5 6 +426 331 7 7 +483 297 25 31 +517 329 5 6 +543 335 12 14 +606 291 23 29 +623 288 29 38 +680 336 7 8 +704 333 7 10 +746 305 24 27 +826 291 37 43 +860 336 14 17 +941 284 28 36 +988 329 15 18 +# 0--Parade/0_Parade_marchingband_1_445.jpg +664 115 38 44 +822 120 35 36 +854 131 20 26 +155 97 29 42 +268 113 25 37 +335 90 29 37 +479 142 42 53 +414 115 24 33 +0 131 12 30 +935 116 7 8 +989 157 8 9 +923 160 7 9 +903 114 7 7 +968 117 12 11 +951 74 6 9 +985 80 7 9 +975 93 7 7 +876 104 6 9 +877 72 6 7 +917 71 7 9 +961 137 6 7 +992 137 5 6 +896 137 6 9 +757 135 5 8 +722 131 6 8 +736 110 4 7 +774 111 8 9 +744 68 8 11 +737 85 8 9 +780 89 5 9 +784 66 8 9 +702 91 7 9 +710 71 7 7 +717 12 7 7 +749 13 7 8 +787 13 7 8 +924 35 7 9 +966 38 5 7 +933 19 7 7 +549 28 8 7 +575 56 8 11 +582 2 7 7 +522 5 7 9 +577 30 6 7 +367 26 8 9 +500 13 7 8 +620 8 6 8 +101 46 6 6 +113 57 5 6 +99 80 6 7 +131 75 7 8 +134 46 5 7 +54 94 6 8 +60 71 6 8 +73 98 7 8 +161 73 6 9 +150 57 7 8 +165 34 9 11 +92 21 5 6 +147 16 7 8 +171 12 8 9 +250 18 6 8 +275 6 6 8 +25 19 8 9 +96 2 8 7 +47 3 6 7 +890 34 7 9 +819 17 6 8 +# 0--Parade/0_Parade_marchingband_1_710.jpg +0 0 37 61 +468 125 57 87 +114 298 67 98 +829 25 44 56 +# 0--Parade/0_Parade_Parade_0_137.jpg +999 515 14 16 +963 488 19 21 +953 462 13 21 +751 452 13 18 +716 447 14 21 +709 355 16 20 +944 306 15 17 +932 366 14 22 +658 450 12 18 +810 337 14 23 +905 338 12 16 +382 453 11 16 +378 480 13 19 +254 449 13 17 +238 471 12 14 +67 336 32 36 +# 0--Parade/0_Parade_marchingband_1_410.jpg +876 447 30 48 +795 486 16 26 +697 484 25 44 +998 445 9 16 +977 436 12 18 +988 416 11 14 +806 400 9 13 +671 472 10 14 +649 416 12 16 +653 399 11 15 +754 400 11 16 +791 384 10 12 +868 449 12 15 +657 448 11 14 +969 336 11 13 +901 338 11 13 +914 322 12 14 +966 303 10 12 +966 288 10 11 +998 279 13 17 +825 341 11 16 +778 335 10 15 +727 352 14 14 +757 388 10 12 +665 333 11 15 +685 324 12 16 +750 281 12 14 +791 301 12 12 +803 319 9 13 +829 301 10 15 +848 281 11 15 +810 276 11 15 +862 213 12 13 +897 212 11 14 +940 217 12 15 +981 214 12 16 +1009 178 11 12 +983 137 11 13 +833 177 12 14 +783 225 11 15 +677 264 10 13 +705 190 12 16 +744 196 9 14 +723 165 10 11 +679 155 12 15 +879 136 8 15 +957 161 9 11 +931 249 7 11 +966 249 9 13 +743 266 12 14 +717 248 11 15 +892 70 10 13 +931 72 10 12 +916 48 11 13 +938 29 10 16 +983 0 10 10 +869 31 9 13 +770 122 12 15 +838 132 10 13 +808 133 10 12 +822 107 10 14 +777 90 11 14 +942 134 10 15 +932 120 12 16 +968 121 11 11 +785 184 11 15 +660 175 9 12 +608 174 11 10 +658 221 13 13 +624 226 11 14 +632 290 9 11 +646 289 9 16 +663 122 10 11 +694 122 10 14 +735 123 11 14 +840 86 8 12 +808 88 12 11 +655 67 11 13 +685 66 10 15 +715 66 11 17 +617 119 8 12 +638 101 9 13 +637 65 10 13 +647 13 10 12 +659 9 10 12 +834 0 10 10 +874 0 8 11 +908 0 9 10 +921 283 12 13 +642 152 9 13 +636 134 11 13 +454 38 11 14 +507 26 11 10 +472 26 11 13 +540 67 10 14 +546 41 11 13 +577 26 10 14 +616 9 7 10 +503 6 10 12 +543 26 9 11 +596 62 10 14 +577 114 11 12 +575 132 10 13 +587 157 11 12 +568 171 11 11 +555 177 12 13 +548 153 10 12 +501 137 9 13 +494 113 10 14 +469 131 11 13 +386 98 11 14 +384 78 9 13 +420 43 10 14 +401 5 8 15 +515 171 12 14 +434 11 7 15 +452 9 9 12 +355 25 10 14 +327 28 9 13 +361 84 7 13 +285 114 10 13 +287 152 7 9 +312 167 7 14 +380 255 8 14 +440 259 7 15 +609 77 9 14 +355 0 10 13 +589 0 8 5 +140 149 10 13 +177 151 6 12 +177 151 10 14 +227 139 11 16 +244 124 9 12 +263 146 9 13 +263 146 10 12 +242 219 11 13 +241 241 12 14 +142 179 9 13 +183 181 11 12 +47 134 12 13 +99 146 10 12 +99 146 10 15 +117 214 10 13 +155 218 12 13 +168 230 11 15 +217 258 13 12 +200 218 10 14 +211 240 11 14 +263 253 10 15 +261 79 11 15 +222 77 10 12 +183 76 10 11 +188 87 6 5 +147 76 9 13 +128 37 10 14 +141 49 10 15 +179 52 10 13 +209 41 11 14 +249 36 11 15 +237 17 11 15 +195 16 10 15 +188 12 9 13 +167 32 11 13 +157 15 10 11 +130 12 11 11 +76 14 9 10 +111 3 9 10 +94 53 12 12 +73 83 12 15 +58 56 10 11 +66 66 10 15 +27 72 10 14 +20 55 9 14 +48 32 10 14 +43 9 11 15 +40 112 10 15 +0 106 9 14 +16 140 11 14 +88 215 11 13 +94 179 10 15 +125 232 10 15 +38 228 11 12 +23 219 12 14 +77 241 7 10 +78 246 10 13 +0 22 8 11 +31 0 9 8 +0 164 7 12 +561 468 25 34 +608 396 12 14 +568 336 11 15 +638 337 10 13 +650 319 12 16 +602 323 8 12 +616 378 14 18 +568 380 13 16 +464 486 25 33 +383 480 23 33 +311 477 21 32 +219 479 21 31 +177 492 19 30 +390 415 12 14 +414 432 12 19 +437 420 9 14 +495 317 10 14 +349 352 10 14 +361 319 11 13 +417 323 9 12 +332 452 12 15 +340 468 12 16 +267 480 12 17 +277 436 12 16 +244 447 12 15 +169 464 14 16 +141 488 14 17 +160 380 10 14 +139 387 10 16 +203 378 12 15 +237 339 13 17 +266 332 12 14 +253 381 12 16 +274 408 13 16 +230 424 13 16 +312 416 12 17 +349 414 12 14 +195 417 9 16 +220 327 12 16 +214 311 11 16 +254 315 12 14 +237 298 11 16 +166 312 12 16 +283 286 13 17 +272 269 12 16 +177 260 10 16 +144 269 12 14 +150 288 12 16 +138 367 11 14 +516 320 10 16 +600 295 10 12 +611 341 10 13 +327 322 10 13 +311 336 11 17 +108 315 12 13 +73 313 11 15 +29 312 12 13 +25 325 13 20 +42 347 12 18 +121 337 10 13 +90 337 9 12 +55 403 13 15 +125 460 11 19 +101 417 11 13 +61 460 12 18 +49 490 12 14 +16 502 17 23 +0 264 5 12 +1010 340 12 16 +1018 142 6 13 +1019 64 5 12 +417 98 10 13 +472 79 10 14 +509 78 7 12 +556 61 8 10 +338 432 13 16 +838 320 11 13 +886 317 9 11 +934 304 7 15 +323 14 9 12 +392 42 9 10 +618 68 9 9 +595 175 11 13 +219 470 11 16 +91 505 17 23 +89 34 7 12 +21 85 9 15 +994 182 9 11 +1007 154 9 14 +895 139 11 11 +410 349 11 14 +456 321 12 16 +608 488 18 25 +880 279 12 17 +880 47 12 16 +# 0--Parade/0_Parade_Parade_0_906.jpg +240 264 96 118 +352 284 66 98 +440 260 104 134 +562 256 72 108 +644 226 126 170 +# 0--Parade/0_Parade_marchingband_1_1004.jpg +119 210 38 46 +155 406 49 55 +330 406 46 43 +429 229 37 47 +648 188 42 52 +688 204 38 43 +771 205 35 47 +811 229 35 34 +# 0--Parade/0_Parade_Parade_0_377.jpg +256 508 71 97 +856 368 72 74 +975 339 21 25 +946 322 14 21 +933 321 10 14 +897 310 15 14 +845 312 12 12 +853 326 10 13 +831 319 10 13 +801 328 8 11 +808 310 15 17 +820 304 12 22 +764 318 12 18 +730 309 22 26 +712 316 13 18 +573 311 33 32 +758 282 12 15 +795 308 10 17 +520 289 21 30 +21 264 36 58 +69 382 45 55 +341 334 23 37 +157 220 8 11 +427 309 19 26 +319 329 25 33 +249 412 37 30 +97 369 56 61 +158 319 35 35 +# 0--Parade/0_Parade_Parade_0_559.jpg +2 332 28 39 +177 372 51 48 +303 280 59 30 +436 358 61 60 +519 353 47 51 +568 322 58 51 +635 369 52 36 +748 375 38 30 +763 317 62 52 +991 438 33 40 +472 358 31 45 +# 0--Parade/0_Parade_Parade_0_246.jpg +162 552 590 380 +# 0--Parade/0_Parade_marchingband_1_552.jpg +182 375 13 19 +345 368 15 18 +421 376 8 14 +537 372 13 17 +855 392 11 15 +968 385 15 17 +737 384 12 13 +# 0--Parade/0_Parade_Parade_0_12.jpg +127 201 18 20 +153 202 19 22 +187 208 18 26 +252 170 22 29 +277 192 25 31 +326 156 32 41 +395 173 33 45 +485 166 40 56 +372 202 13 13 +424 182 12 20 +538 207 8 13 +574 183 26 26 +599 196 11 15 +640 149 30 28 +726 131 49 66 +831 119 32 43 +912 148 25 32 +1004 132 20 34 +34 220 4 7 +43 221 6 8 +# 0--Parade/0_Parade_Parade_0_429.jpg +154 262 106 122 +390 222 116 144 +610 230 94 132 +# 0--Parade/0_Parade_Parade_0_286.jpg +588 331 7 9 +513 331 7 9 +499 334 6 8 +899 527 20 23 +953 556 71 81 +961 496 10 11 +943 503 13 15 +613 515 7 11 +583 527 11 14 +533 543 18 25 +584 560 22 27 +507 418 10 11 +564 516 8 10 +550 520 7 10 +445 525 20 26 +477 551 21 28 +511 536 17 22 +72 552 20 29 +87 525 14 15 +113 529 9 9 +88 504 8 10 +231 554 21 30 +352 510 12 15 +412 523 10 14 +618 609 106 90 +636 525 10 12 +602 528 9 10 +# 0--Parade/0_Parade_Parade_0_317.jpg +13 89 15 17 +34 97 15 14 +50 94 13 16 +75 75 16 18 +101 69 15 17 +177 105 16 16 +161 90 14 16 +96 88 12 16 +181 67 18 18 +224 100 13 17 +203 107 15 16 +184 86 12 13 +199 86 12 13 +126 75 17 18 +123 92 20 15 +249 90 11 16 +275 102 11 17 +301 100 14 18 +310 78 14 21 +389 122 13 18 +408 90 12 11 +429 88 16 15 +2 99 15 14 +260 99 13 21 +381 82 14 21 +327 74 17 19 +347 84 14 12 +429 68 16 17 +392 66 16 8 +486 67 14 11 +469 71 14 11 +489 92 17 16 +510 86 15 22 +383 105 14 15 +354 124 15 16 +521 100 16 19 +537 92 19 23 +541 76 17 17 +565 71 22 28 +565 123 15 22 +602 104 21 25 +623 89 18 16 +640 76 16 19 +654 77 20 19 +638 109 16 15 +636 147 20 15 +658 161 16 20 +704 117 17 17 +686 75 14 22 +696 85 20 17 +706 94 0 1 +696 95 14 20 +721 73 14 22 +744 82 12 10 +768 91 13 20 +742 97 13 15 +782 103 12 13 +790 65 15 16 +790 79 13 10 +833 70 17 16 +854 72 11 13 +867 90 16 18 +821 102 11 13 +930 75 13 13 +921 101 17 21 +959 108 15 19 +1004 103 14 17 +1003 83 16 21 +978 105 15 12 +1001 129 15 18 +883 213 15 16 +863 210 16 15 +903 93 15 18 +171 206 24 25 +635 240 15 18 +# 0--Parade/0_Parade_Parade_0_239.jpg +902 281 34 39 +879 325 36 43 +831 310 10 15 +746 288 35 46 +707 294 24 26 +603 294 32 39 +506 294 29 33 +500 375 34 39 +393 284 33 41 +353 293 31 43 +275 306 25 28 +227 316 35 41 +36 295 38 44 +# 0--Parade/0_Parade_Parade_0_459.jpg +0 438 55 161 +121 656 121 150 +291 636 104 169 +404 740 84 139 +444 780 97 167 +381 908 52 94 +188 815 78 155 +# 0--Parade/0_Parade_Parade_0_376.jpg +142 270 34 44 +340 257 31 34 +394 220 37 50 +536 243 21 35 +632 205 30 41 +766 156 49 53 +917 155 34 43 +210 586 38 87 +758 202 25 32 +413 145 8 7 +428 143 8 9 +446 139 7 8 +469 136 7 8 +486 138 7 8 +479 155 11 9 +460 158 9 10 +435 161 10 11 +422 167 9 9 +# 0--Parade/0_Parade_marchingband_1_695.jpg +82 275 19 25 +74 269 19 29 +17 292 20 27 +10 284 13 15 +5 265 4 9 +48 273 4 7 +55 283 5 6 +139 248 31 36 +113 250 6 8 +180 213 40 45 +127 252 21 29 +260 177 17 24 +278 165 17 25 +313 171 47 56 +388 170 8 20 +436 171 8 19 +464 183 7 13 +482 181 8 13 +504 167 10 13 +473 178 6 9 +515 223 14 14 +516 245 23 25 +554 147 10 11 +524 175 7 8 +568 148 10 15 +572 93 55 65 +661 151 7 7 +643 156 6 7 +684 158 5 7 +691 153 5 9 +698 153 4 7 +705 153 7 10 +744 151 9 9 +753 141 5 8 +762 140 7 10 +769 136 4 6 +752 270 24 25 +786 132 5 6 +843 195 43 44 +790 131 4 7 +799 128 8 10 +835 133 6 7 +826 146 8 10 +842 127 4 7 +849 125 5 8 +879 113 5 8 +893 107 7 11 +907 109 8 14 +928 99 5 10 +937 92 14 16 +968 88 8 11 +960 97 6 6 +993 71 10 15 +1014 67 9 11 +1000 81 6 7 +1010 133 8 10 +1001 140 8 9 +525 189 7 9 +402 190 9 13 +1009 98 7 13 +770 148 6 8 +719 146 7 7 +823 126 8 9 +975 270 22 20 +# 0--Parade/0_Parade_marchingband_1_910.jpg +38 232 36 33 +74 247 40 32 +138 229 34 34 +205 213 47 42 +282 263 38 30 +339 237 37 41 +370 265 52 44 +438 267 52 54 +482 196 38 48 +529 237 43 44 +571 245 52 44 +779 327 66 105 +959 262 43 96 +116 238 20 25 +# 0--Parade/0_Parade_marchingband_1_356.jpg +314 552 114 86 +# 0--Parade/0_Parade_Parade_0_382.jpg +117 176 45 76 +448 241 48 49 +593 137 39 49 +791 213 28 43 +908 301 26 29 +83 95 24 42 +# 0--Parade/0_Parade_marchingband_1_439.jpg +917 496 25 45 +944 409 12 14 +140 430 13 13 +81 431 13 13 +133 457 12 21 +137 565 34 64 +249 468 12 28 +314 471 12 27 +420 476 9 24 +220 425 8 9 +244 430 8 9 +268 434 7 9 +278 416 5 7 +174 426 8 10 +299 428 7 8 +334 409 7 11 +377 414 6 7 +390 424 7 10 +398 423 6 9 +290 426 9 11 +11 442 8 13 +12 426 9 13 +49 440 8 14 +458 417 7 9 +447 419 7 7 +426 523 14 29 +483 424 6 8 +466 418 5 7 +531 420 5 6 +813 413 8 11 +0 430 8 10 +4 466 11 22 +405 423 5 9 +917 402 13 15 +94 435 11 12 +376 436 7 10 +# 0--Parade/0_Parade_marchingband_1_329.jpg +80 303 21 19 +136 315 22 26 +158 328 19 20 +192 317 19 18 +213 344 18 18 +179 394 22 21 +253 388 19 19 +291 320 18 19 +330 325 17 17 +329 391 18 17 +390 387 19 16 +373 328 15 18 +413 320 16 18 +478 316 18 20 +443 324 14 19 +448 415 20 29 +510 427 19 25 +572 424 20 25 +566 339 17 20 +525 319 14 18 +508 336 12 14 +542 324 16 16 +571 309 16 18 +591 317 15 18 +610 317 17 20 +640 312 15 18 +632 426 21 28 +656 375 11 11 +659 337 16 17 +686 334 12 14 +708 336 16 15 +715 377 19 20 +749 344 15 14 +769 337 14 13 +786 353 16 17 +758 395 17 18 +797 405 18 15 +836 328 21 22 +896 337 22 24 +# 0--Parade/0_Parade_Parade_0_887.jpg +152 371 69 79 +226 260 56 77 +286 298 53 70 +405 300 45 66 +496 332 67 86 +525 242 52 63 +658 190 55 76 +710 320 74 86 +# 0--Parade/0_Parade_Parade_0_468.jpg +444 613 285 354 +# 0--Parade/0_Parade_marchingband_1_139.jpg +693 257 171 101 +839 81 82 90 +303 46 32 40 +224 84 38 45 +392 84 34 56 +456 93 33 52 +945 202 34 47 +72 80 40 48 +720 125 52 54 +# 0--Parade/0_Parade_marchingband_1_746.jpg +24 109 27 29 +142 82 32 42 +164 62 30 44 +963 184 15 16 +1000 135 12 15 +1005 161 12 16 +1001 179 12 16 +298 77 28 31 +371 87 31 42 +425 80 26 40 +452 98 22 31 +546 87 32 45 +576 109 19 27 +597 90 25 37 +627 108 26 38 +764 45 27 41 +746 86 26 42 +815 93 33 45 +881 182 11 13 +920 170 11 13 +927 187 13 13 +942 137 12 12 +958 161 11 14 +949 181 11 12 +# 0--Parade/0_Parade_Parade_0_164.jpg +17 438 17 13 +85 421 11 14 +60 427 11 14 +46 394 9 10 +144 390 9 10 +121 411 9 11 +133 372 12 11 +181 377 9 11 +197 376 8 11 +213 416 9 10 +248 388 7 10 +278 400 7 10 +298 379 9 9 +258 375 9 11 +223 358 9 11 +231 345 9 10 +239 323 9 9 +284 327 8 11 +297 332 6 9 +311 333 7 8 +336 320 7 9 +315 309 7 11 +285 304 8 11 +249 304 9 9 +287 258 7 8 +341 294 7 8 +345 286 7 8 +367 277 6 7 +411 307 7 8 +413 284 7 11 +427 280 7 9 +438 278 6 7 +401 266 5 7 +432 249 5 6 +449 249 6 6 +441 297 5 5 +451 284 6 9 +461 277 5 7 +474 260 6 8 +489 252 6 6 +481 255 5 7 +523 271 6 6 +510 243 6 7 +502 247 5 7 +526 249 6 8 +554 235 6 7 +587 252 5 5 +636 372 8 10 +609 398 8 10 +550 368 10 9 +515 328 8 8 +493 350 8 9 +611 318 7 9 +553 299 5 7 +675 348 6 9 +689 321 6 7 +716 293 6 7 +639 297 7 7 +681 265 6 6 +718 270 5 6 +645 258 5 6 +657 247 4 6 +658 276 4 6 +626 264 4 5 +593 291 6 6 +929 539 11 15 +897 502 17 15 +1006 559 10 15 +997 592 16 17 +994 648 12 16 +896 563 9 13 +859 470 9 12 +797 426 9 11 +862 437 8 8 +893 452 10 11 +998 436 9 13 +989 480 11 15 +863 398 7 11 +919 407 10 12 +970 360 6 9 +902 340 5 5 +910 327 6 8 +892 322 6 9 +878 331 6 8 +859 314 8 9 +865 376 8 10 +896 373 7 9 +909 365 8 10 +983 341 7 9 +962 321 9 10 +988 321 8 8 +948 301 6 7 +915 308 7 9 +931 290 8 9 +942 282 6 5 +895 298 7 7 +863 294 6 8 +868 285 5 6 +855 277 8 9 +963 382 12 13 +123 335 14 16 +584 350 8 10 +112 400 11 11 +89 397 10 11 +982 514 13 12 +1000 363 8 11 +827 588 12 9 +840 346 10 11 +1007 319 7 10 +999 302 6 10 +983 296 9 11 +979 262 8 9 +914 287 9 11 +990 427 7 11 +1010 337 9 12 +417 261 8 11 +246 357 10 12 +944 420 8 10 +925 473 10 16 +# 0--Parade/0_Parade_Parade_0_461.jpg +518 224 92 110 +# 0--Parade/0_Parade_Parade_0_757.jpg +238 52 204 310 +# 0--Parade/0_Parade_marchingband_1_759.jpg +342 127 44 57 +0 23 15 35 +8 32 36 47 +56 40 35 45 +37 31 17 37 +88 17 38 51 +136 47 50 59 +136 152 50 56 +173 4 46 61 +228 15 27 50 +306 19 42 59 +369 20 48 65 +407 131 38 53 +491 139 52 65 +566 136 60 66 +631 112 59 80 +204 103 34 59 +# 0--Parade/0_Parade_Parade_0_53.jpg +269 167 46 54 +467 218 12 15 +792 181 46 58 +1013 233 8 8 +1005 245 5 6 +991 245 5 5 +684 231 5 7 +745 233 5 7 +242 215 10 10 +363 222 5 6 +524 234 4 5 +423 217 5 6 +# 0--Parade/0_Parade_marchingband_1_653.jpg +44 45 12 13 +101 51 10 14 +150 57 10 13 +187 43 12 14 +21 99 12 11 +68 99 12 15 +113 88 11 14 +167 96 10 13 +27 145 13 16 +77 145 14 17 +124 151 13 16 +175 139 12 18 +44 199 14 14 +130 202 13 18 +230 48 12 15 +274 54 11 12 +320 44 12 13 +363 46 12 14 +210 97 11 13 +259 110 13 15 +304 98 11 13 +353 96 11 15 +227 143 11 15 +273 155 11 15 +325 138 12 19 +221 207 12 15 +313 209 12 18 +410 34 11 13 +465 35 13 16 +508 32 12 15 +398 88 12 14 +442 95 11 15 +490 84 12 16 +533 97 12 15 +512 132 12 19 +461 149 12 16 +420 145 12 15 +375 148 12 15 +397 200 11 16 +481 206 11 18 +538 195 11 18 +601 188 13 19 +679 194 12 18 +555 35 10 11 +603 39 12 15 +653 22 13 16 +700 27 12 13 +742 43 10 13 +585 92 12 17 +629 90 12 16 +674 92 11 15 +716 83 13 16 +559 130 13 17 +604 140 12 17 +649 129 13 16 +693 126 13 13 +739 135 12 18 +782 34 13 13 +826 31 11 13 +864 39 12 13 +913 29 12 14 +761 89 11 14 +808 93 11 14 +851 93 11 15 +892 97 12 15 +934 95 10 14 +782 138 13 13 +825 128 13 15 +876 118 14 20 +929 119 13 13 +764 192 14 18 +872 194 13 18 +959 40 13 13 +979 79 13 14 +980 131 13 15 +962 184 13 16 +# 0--Parade/0_Parade_marchingband_1_227.jpg +38 97 13 16 +175 110 23 29 +151 109 17 19 +253 99 15 20 +289 143 22 29 +292 105 19 24 +417 120 20 21 +463 140 26 35 +493 101 12 18 +349 108 14 18 +379 100 12 14 +517 115 17 23 +562 89 15 21 +597 135 23 26 +920 143 32 50 +844 129 27 31 +789 94 16 22 +720 104 29 38 +167 80 12 15 +70 97 15 19 +213 77 11 14 +932 92 35 51 +0 84 13 25 +532 98 9 20 +152 92 11 15 +95 96 17 18 +41 121 19 20 +# 0--Parade/0_Parade_marchingband_1_649.jpg +6 129 26 33 +55 117 31 38 +103 134 25 34 +152 125 14 32 +149 156 14 23 +23 197 38 48 +131 237 34 45 +236 153 22 29 +228 170 16 21 +246 145 32 34 +341 117 32 41 +176 283 39 49 +294 287 42 52 +410 82 35 46 +489 121 34 45 +529 130 30 38 +556 82 34 47 +651 145 27 40 +701 172 23 33 +760 149 28 37 +795 164 21 36 +833 177 24 34 +860 152 30 39 +812 200 36 47 +797 282 38 54 +701 287 38 51 +906 180 24 29 +969 151 33 41 +938 207 42 47 +895 319 38 52 +244 194 42 46 +663 222 33 40 +# 0--Parade/0_Parade_Parade_0_43.jpg +31 510 19 25 +153 507 20 25 +266 514 19 22 +77 454 19 23 +135 426 19 23 +201 452 19 23 +18 482 13 22 +188 384 18 27 +255 377 19 26 +309 451 21 29 +306 355 20 25 +386 343 19 23 +398 514 21 25 +499 514 20 24 +604 514 19 23 +546 447 22 26 +444 448 20 25 +501 394 18 22 +677 450 18 24 +824 517 18 23 +715 517 18 24 +943 521 21 26 +884 464 20 25 +920 501 17 23 +810 436 19 23 +771 461 17 23 +755 404 18 21 +703 379 18 23 +655 349 18 25 +597 343 21 24 +544 292 18 23 +491 223 17 23 +440 289 18 22 +978 323 11 15 +1015 323 7 9 +998 310 10 9 +938 333 11 11 +983 295 7 10 +907 330 9 11 +903 372 9 11 +888 374 9 14 +863 372 11 12 +873 369 8 12 +839 375 11 12 +1016 267 7 9 +1001 265 8 9 +987 273 11 10 +995 258 11 10 +1003 238 7 9 +1011 233 5 8 +968 257 6 9 +956 263 8 7 +929 275 9 9 +1006 344 7 7 +927 318 6 9 +895 299 10 11 +908 277 9 9 +920 278 7 9 +850 327 8 12 +854 308 7 8 +893 324 9 11 +849 297 8 8 +887 273 8 11 +937 249 10 10 +956 233 7 10 +945 228 7 9 +936 215 8 10 +928 226 7 7 +985 212 9 10 +972 215 5 7 +908 257 9 12 +923 263 8 9 +889 255 7 7 +863 354 6 10 +966 202 8 9 +792 389 15 14 +772 383 11 15 +823 366 11 14 +819 332 10 13 +821 314 11 11 +789 327 10 12 +777 318 9 12 +750 323 12 14 +726 313 8 9 +709 317 9 11 +701 317 9 9 +686 323 7 9 +704 337 10 14 +674 313 10 13 +665 323 9 11 +655 317 8 12 +766 301 8 9 +754 293 10 11 +760 274 8 11 +809 262 9 12 +794 278 7 7 +778 276 8 8 +773 266 7 8 +792 266 8 9 +829 273 7 8 +840 258 8 12 +849 263 7 10 +872 264 8 10 +873 254 7 8 +733 282 9 9 +759 263 7 8 +787 247 7 11 +639 309 11 10 +625 304 9 9 +615 304 9 8 +588 291 9 10 +564 300 9 9 +643 290 9 9 +670 300 9 9 +658 301 8 10 +685 276 8 8 +703 261 9 11 +682 261 6 8 +675 255 8 9 +669 246 7 7 +680 239 8 8 +716 254 10 8 +746 264 12 12 +830 293 6 9 +600 284 9 9 +626 268 6 8 +642 254 7 9 +657 242 6 8 +671 237 5 8 +569 266 7 7 +579 260 8 8 +609 263 8 7 +604 259 5 6 +610 245 7 9 +636 231 8 8 +585 241 7 9 +585 230 7 9 +576 235 7 8 +766 242 7 10 +761 230 7 9 +750 221 6 8 +731 230 6 8 +729 242 7 8 +742 252 8 8 +865 252 7 11 +862 219 9 9 +888 207 6 9 +874 195 7 7 +864 194 5 7 +904 187 6 9 +886 172 5 9 +848 159 6 6 +672 276 8 8 +557 239 7 8 +783 214 6 7 +815 238 7 8 +832 233 7 10 +807 231 7 9 +810 214 6 9 +757 183 7 8 +778 178 5 7 +788 186 6 8 +632 208 7 9 +572 206 6 8 +553 255 7 10 +614 283 6 10 +622 292 6 7 +634 271 9 9 +693 254 9 9 +697 307 7 10 +705 299 6 9 +768 313 6 8 +624 179 6 7 +835 188 7 8 +814 198 8 9 +704 233 6 7 +709 244 5 8 +716 239 6 8 +883 264 8 8 +799 300 8 8 +769 324 9 10 +725 283 8 8 +734 262 8 9 +773 196 7 7 +743 179 7 9 +735 159 5 7 +893 187 6 8 +970 381 11 10 +981 381 10 9 +930 171 6 7 +942 174 7 7 +993 176 6 6 +1006 178 5 6 +974 179 5 6 +952 197 5 7 +961 189 6 8 +912 158 6 6 +890 178 5 7 +882 190 6 8 +704 203 6 6 +716 200 4 7 +698 203 5 7 +832 164 5 9 +142 278 10 10 +193 283 9 11 +237 277 9 9 +310 304 11 13 +369 287 8 8 +328 281 8 9 +343 274 7 9 +354 271 7 7 +365 276 7 10 +114 272 9 12 +154 256 8 11 +221 277 7 8 +298 290 8 10 +40 257 8 10 +103 275 7 8 +69 317 9 11 +181 276 7 8 +173 300 9 10 +171 288 8 8 +159 273 6 8 +161 265 6 8 +239 263 9 9 +252 280 8 10 +262 287 7 7 +271 280 9 11 +217 276 6 9 +317 272 7 11 +324 265 7 8 +335 266 8 11 +323 306 6 8 +369 248 5 10 +331 239 9 9 +353 240 5 8 +314 240 6 8 +217 256 7 7 +136 215 7 8 +75 257 7 9 +78 274 7 10 +182 237 7 10 +151 236 8 9 +309 222 7 8 +318 223 7 8 +278 213 6 6 +285 211 6 7 +210 230 7 8 +191 232 6 7 +67 228 6 8 +74 227 6 7 +37 241 9 9 +20 226 8 10 +39 218 8 7 +125 221 6 7 +190 254 6 9 +330 304 9 11 +287 265 7 11 +194 193 6 9 +183 219 7 8 +227 233 7 6 +136 242 7 9 +115 254 7 9 +367 146 7 10 +149 191 6 8 +648 188 5 7 +697 166 6 8 +685 196 6 7 +634 195 7 7 +131 266 7 7 +105 225 8 9 +256 256 9 12 +279 236 8 10 +325 223 7 10 +122 252 8 8 +65 257 7 10 +43 192 6 6 +298 190 7 9 +189 199 5 6 +248 165 6 6 +353 174 6 9 +188 208 6 10 +238 214 5 10 +251 229 6 9 +275 221 6 8 +550 180 6 7 +545 181 5 6 +818 166 7 7 +722 209 7 9 +592 214 7 7 +588 205 7 10 +608 180 5 6 +599 177 5 6 +656 178 6 7 +658 169 7 7 +712 161 7 8 +703 165 4 7 +825 189 6 9 +813 187 6 7 +798 189 5 7 +792 208 5 7 +8 280 8 9 +23 280 8 8 +8 263 8 10 +373 208 7 9 +315 174 5 6 +303 149 6 9 +240 241 7 9 +582 194 6 8 +555 207 6 7 +25 202 7 8 +40 230 7 7 +137 233 4 7 +119 226 5 7 +64 278 7 7 +176 196 4 7 +930 294 7 9 +# 0--Parade/0_Parade_marchingband_1_561.jpg +162 649 159 144 +820 1027 72 90 +# 0--Parade/0_Parade_Parade_0_490.jpg +8 92 13 21 +91 61 12 19 +134 73 19 24 +61 110 17 21 +113 149 19 24 +73 162 35 32 +27 171 27 31 +5 138 24 29 +2 225 15 38 +15 298 45 46 +8 381 38 49 +10 496 70 74 +117 435 87 99 +216 466 68 45 +384 356 50 72 +472 328 50 73 +400 283 49 53 +472 281 47 60 +241 319 49 71 +178 199 49 68 +102 182 46 60 +177 132 27 35 +239 159 29 35 +276 156 27 30 +280 177 33 46 +234 94 18 21 +277 85 18 23 +246 61 13 9 +351 125 28 35 +337 121 21 24 +379 180 49 48 +377 247 35 46 +439 214 42 40 +524 223 43 45 +596 280 52 66 +592 169 41 49 +502 149 38 43 +547 115 30 36 +595 110 35 39 +669 132 41 60 +673 78 23 35 +632 78 32 33 +571 62 28 30 +536 73 27 35 +499 91 22 27 +474 67 21 27 +443 65 29 32 +434 112 24 31 +385 98 22 29 +403 67 16 26 +355 62 24 21 +439 41 22 16 +517 24 14 17 +644 31 12 11 +663 54 24 17 +723 43 19 28 +701 33 19 14 +824 34 15 19 +850 32 20 25 +877 48 22 36 +908 82 33 40 +824 151 50 48 +745 151 44 50 +758 81 39 47 +717 90 32 38 +611 34 10 13 +730 238 42 51 +771 245 34 66 +765 317 67 85 +901 361 71 109 +940 193 53 41 +972 51 31 37 +957 20 30 38 +998 29 21 28 +946 40 26 28 +906 17 21 26 +931 17 26 18 +783 17 21 22 +540 20 20 21 +188 44 8 13 +206 17 8 15 +284 16 16 13 +916 298 69 68 +771 560 118 139 +670 640 32 58 +494 664 40 80 +496 617 37 65 +814 7 18 17 +674 7 21 24 +615 7 20 20 +602 5 12 17 +400 1 12 10 +886 0 16 17 +844 3 17 19 +789 47 18 29 +# 0--Parade/0_Parade_Parade_0_519.jpg +86 248 34 41 +123 267 42 61 +176 273 71 84 +415 267 115 124 +595 267 26 42 +667 308 46 59 +750 262 37 47 +722 273 29 42 +859 258 98 108 +# 0--Parade/0_Parade_Parade_0_465.jpg +109 81 18 45 +403 112 97 130 +512 115 108 136 +695 161 30 84 +715 138 35 83 +717 89 35 77 +846 107 8 16 +902 96 16 20 +908 126 10 17 +948 123 15 18 +1005 122 14 21 +1010 63 7 6 +# 0--Parade/0_Parade_marchingband_1_629.jpg +2 349 9 12 +53 334 11 12 +35 377 11 14 +109 373 13 15 +137 360 12 15 +164 389 7 9 +131 373 7 8 +235 364 11 15 +294 352 8 13 +320 357 6 9 +327 358 8 11 +290 383 9 14 +359 362 8 9 +393 361 9 12 +406 327 6 8 +429 321 7 10 +426 361 10 13 +463 357 10 15 +509 368 10 14 +515 393 5 11 +483 337 5 6 +496 332 8 10 +531 324 9 9 +542 320 7 9 +563 326 6 8 +578 322 9 12 +561 370 12 14 +610 371 12 14 +635 327 10 12 +652 376 7 14 +742 334 5 10 +777 339 6 8 +796 353 9 11 +792 355 5 8 +792 372 7 19 +826 342 6 8 +866 335 6 11 +862 354 8 12 +864 386 6 15 +956 393 15 22 +710 336 11 12 +# 0--Parade/0_Parade_marchingband_1_768.jpg +6 628 11 13 +280 210 36 34 +297 648 14 18 +377 619 10 14 +403 626 10 12 +462 613 20 26 +500 636 11 15 +532 614 21 26 +188 579 7 7 +609 631 11 13 +641 637 12 15 +675 639 34 39 +713 622 11 17 +784 621 14 17 +819 616 37 48 +905 652 42 29 +981 639 39 44 +200 579 6 8 +175 576 8 10 +130 543 6 8 +404 540 7 8 +451 558 5 7 +389 555 4 5 +391 571 3 5 +371 570 5 8 +311 556 6 10 +537 574 4 7 +454 575 5 8 +499 586 3 7 +462 588 5 5 +447 589 5 7 +517 554 5 7 +584 578 7 8 +587 608 8 8 +624 570 6 8 +734 669 15 13 +707 565 6 8 +696 535 7 11 +895 554 9 10 +876 557 8 11 +855 552 8 9 +843 565 7 7 +818 557 7 7 +799 557 5 7 +790 560 5 8 +45 570 8 12 +7 572 7 8 +102 629 8 10 +19 627 6 8 +696 567 6 8 +648 571 7 7 +667 572 7 7 +# 0--Parade/0_Parade_Parade_0_688.jpg +55 554 8 11 +1 568 7 10 +6 583 13 16 +206 554 17 22 +215 539 19 24 +277 525 23 26 +309 518 11 15 +454 534 24 28 +451 502 18 26 +538 596 12 22 +569 544 13 18 +534 464 17 19 +808 528 16 20 +842 570 14 19 +881 524 14 18 +947 529 7 10 +967 513 8 10 +840 538 5 7 +# 0--Parade/0_Parade_Parade_0_288.jpg +781 386 12 15 +748 330 15 19 +695 342 14 15 +659 356 12 14 +646 355 12 17 +595 342 11 12 +495 173 13 14 +507 211 11 15 +461 199 10 14 +423 217 10 14 +408 203 15 12 +298 390 11 14 +701 404 14 16 +# 0--Parade/0_Parade_Parade_0_502.jpg +0 330 34 38 +165 358 17 36 +90 490 26 32 +288 386 21 24 +260 388 13 25 +244 337 18 42 +351 396 7 11 +374 392 11 11 +409 361 22 31 +436 387 19 25 +464 399 35 48 +529 363 22 28 +564 365 24 24 +709 360 30 47 +858 366 19 29 +938 386 20 26 +1000 387 10 19 +959 351 8 13 +359 387 14 18 +# 0--Parade/0_Parade_Parade_0_364.jpg +206 130 142 180 +358 106 92 138 +566 134 90 114 +654 118 74 90 +800 172 60 86 +# 0--Parade/0_Parade_marchingband_1_267.jpg +0 220 28 47 +165 250 34 35 +315 222 24 35 +354 235 24 34 +424 204 25 31 +468 224 31 38 +511 236 26 29 +507 196 20 27 +550 180 19 24 +585 227 25 31 +584 180 18 24 +607 212 22 29 +653 195 19 27 +677 200 20 28 +708 187 16 26 +711 164 19 22 +646 157 16 18 +625 175 18 23 +563 138 17 18 +588 141 15 15 +680 144 14 14 +762 155 12 14 +805 143 14 13 +795 138 7 8 +784 139 9 11 +832 147 12 14 +898 124 6 7 +925 146 15 21 +936 107 6 6 +961 111 6 8 +406 117 7 11 +437 113 6 8 +511 115 6 8 +634 149 9 11 +701 151 15 15 +658 150 16 16 +# 0--Parade/0_Parade_Parade_0_68.jpg +312 450 14 14 +410 424 14 12 +500 422 13 14 +800 391 10 11 +715 389 8 10 +640 389 9 8 +570 388 11 10 +524 370 10 11 +507 373 10 9 +654 378 9 10 +581 366 12 11 +731 370 8 9 +344 370 10 9 +323 363 9 11 +292 367 9 11 +253 355 9 9 +228 361 9 8 +180 352 10 10 +154 358 9 10 +120 343 9 11 +87 358 8 11 +871 395 10 8 +861 389 9 7 +312 416 10 11 +457 394 9 12 +404 365 10 10 +449 362 8 8 +382 362 8 9 +845 304 6 5 +798 301 5 6 +754 292 6 7 +744 300 5 5 +703 298 8 7 +716 290 6 5 +662 296 5 6 +576 299 8 6 +358 284 6 7 +415 284 5 5 +462 283 6 6 +510 286 5 6 +544 292 8 6 +424 280 5 6 +473 277 6 5 +514 281 5 6 +624 288 7 8 +# 0--Parade/0_Parade_marchingband_1_525.jpg +676 382 22 27 +539 397 18 23 +765 436 17 20 +900 424 20 21 +982 462 12 16 +860 483 13 18 +835 419 13 16 +797 435 10 12 +811 434 9 11 +782 430 10 11 +730 401 8 13 +623 415 10 12 +582 419 16 19 +511 418 7 8 +401 387 20 23 +290 395 21 24 +485 485 6 6 +195 376 22 25 +132 412 22 23 +37 424 17 18 +121 411 12 17 +258 397 5 8 +16 427 6 8 +6 425 4 6 +63 431 5 7 +78 424 4 6 +# 0--Parade/0_Parade_marchingband_1_147.jpg +506 89 80 90 +304 126 56 73 +400 170 66 71 +872 153 38 56 +216 154 48 59 +159 123 43 47 +90 146 32 39 +618 144 30 40 +17 168 22 25 +317 38 14 19 +# 0--Parade/0_Parade_marchingband_1_104.jpg +549 244 10 15 +574 249 17 27 +834 227 14 18 +738 232 17 20 +900 234 9 13 +967 233 9 12 +34 253 15 19 +125 252 19 23 +150 243 15 18 +68 247 10 12 +102 241 12 15 +177 242 10 13 +22 238 13 15 +54 257 11 15 +239 245 16 21 +250 235 10 13 +281 234 13 16 +340 226 11 16 +353 233 13 19 +450 263 28 32 +409 245 13 16 +314 247 35 45 +500 224 12 15 +601 228 11 12 +999 216 20 27 +# 0--Parade/0_Parade_marchingband_1_932.jpg +42 330 35 44 +437 332 27 41 +723 344 30 36 +848 354 32 42 +# 0--Parade/0_Parade_marchingband_1_311.jpg +204 296 13 11 +223 309 12 14 +266 300 12 16 +337 320 10 12 +394 338 6 8 +411 337 13 15 +414 313 10 10 +476 302 15 21 +545 336 10 15 +615 340 17 17 +665 329 8 10 +719 345 12 12 +709 321 6 6 +728 318 5 6 +771 335 9 14 +831 326 11 12 +586 318 6 6 +553 318 4 5 +193 310 9 9 +# 1--Handshaking/1_Handshaking_Handshaking_1_664.jpg +857 21 42 62 +651 107 15 22 +681 118 12 15 +720 115 12 15 +740 122 11 17 +# 1--Handshaking/1_Handshaking_Handshaking_1_762.jpg +235 211 315 417 +615 438 185 214 +# 1--Handshaking/1_Handshaking_Handshaking_1_766.jpg +390 90 260 374 +# 1--Handshaking/1_Handshaking_Handshaking_1_134.jpg +320 66 70 142 +698 58 70 156 +# 1--Handshaking/1_Handshaking_Handshaking_1_362.jpg +104 10 482 508 +# 1--Handshaking/1_Handshaking_Handshaking_1_522.jpg +702 32 96 150 +# 1--Handshaking/1_Handshaking_Handshaking_1_453.jpg +292 64 92 150 +676 202 70 138 +# 1--Handshaking/1_Handshaking_Handshaking_1_314.jpg +344 68 72 112 +# 1--Handshaking/1_Handshaking_Handshaking_1_343.jpg +96 76 70 86 +298 96 62 86 +494 72 62 90 +700 92 64 76 +886 74 60 82 +# 1--Handshaking/1_Handshaking_Handshaking_1_733.jpg +770 120 138 196 +482 192 54 104 +324 174 78 146 +192 12 114 282 +# 1--Handshaking/1_Handshaking_Handshaking_1_801.jpg +869 27 98 133 +645 44 54 70 +556 79 49 67 +410 23 58 69 +243 66 58 68 +69 60 62 69 +# 1--Handshaking/1_Handshaking_Handshaking_1_465.jpg +388 172 400 508 +# 1--Handshaking/1_Handshaking_Handshaking_1_94.jpg +381 207 237 354 +# 1--Handshaking/1_Handshaking_Handshaking_1_602.jpg +248 342 30 31 +290 286 18 18 +119 308 20 15 +405 333 26 16 +374 240 13 18 +469 267 17 10 +438 233 8 10 +476 218 9 11 +510 224 8 11 +609 305 19 10 +674 316 20 11 +719 268 20 10 +753 246 9 10 +680 232 8 11 +687 228 9 11 +717 235 6 12 +644 222 11 12 +610 229 9 12 +840 237 11 14 +914 269 12 14 +922 252 33 23 +575 220 9 11 +545 225 9 12 +191 190 7 8 +138 198 6 10 +# 1--Handshaking/1_Handshaking_Handshaking_1_781.jpg +410 219 165 228 +713 362 180 228 +30 272 90 171 +# 1--Handshaking/1_Handshaking_Handshaking_1_380.jpg +674 4 142 124 +# 1--Handshaking/1_Handshaking_Handshaking_1_357.jpg +148 116 110 144 +590 192 112 152 +# 1--Handshaking/1_Handshaking_Handshaking_1_236.jpg +0 198 30 81 +194 148 60 74 +394 195 26 53 +401 229 23 46 +506 197 44 59 +620 291 26 33 +731 239 28 40 +860 249 23 33 +916 234 26 28 +893 522 26 44 +979 274 21 25 +# 1--Handshaking/1_Handshaking_Handshaking_1_107.jpg +391 177 174 258 +# 1--Handshaking/1_Handshaking_Handshaking_1_827.jpg +190 60 72 168 +412 170 72 124 +760 132 74 140 +# 1--Handshaking/1_Handshaking_Handshaking_1_579.jpg +838 501 17 44 +939 428 36 40 +878 469 54 42 +786 411 36 41 +777 354 31 42 +858 343 34 40 +677 360 34 50 +595 415 35 46 +580 319 36 41 +750 304 28 36 +650 278 30 40 +497 404 33 45 +471 346 26 41 +394 378 32 41 +413 337 36 35 +438 308 28 37 +384 278 24 31 +531 310 28 39 +491 283 26 34 +440 277 26 34 +324 351 33 34 +250 394 27 30 +277 309 23 31 +317 297 33 36 +307 260 22 26 +375 258 19 27 +230 257 25 29 +179 314 31 33 +145 311 34 42 +79 361 31 31 +7 385 33 35 +0 312 15 43 +5 296 17 27 +4 257 21 29 +26 252 20 33 +25 205 26 27 +87 214 23 25 +25 289 34 27 +75 324 33 34 +99 296 31 34 +172 287 23 25 +119 257 24 26 +176 243 22 27 +68 218 18 22 +150 211 22 19 +196 230 22 28 +235 198 31 31 +220 182 19 21 +271 216 26 32 +328 206 19 28 +73 190 22 26 +102 183 17 22 +33 163 20 23 +1 185 13 16 +21 131 12 17 +11 116 12 15 +30 107 13 16 +35 118 16 18 +74 153 13 17 +82 123 12 13 +128 131 14 19 +131 153 21 22 +135 109 12 16 +145 85 12 17 +164 109 10 14 +171 117 20 18 +189 151 20 21 +228 95 14 18 +243 147 18 25 +229 143 13 18 +246 124 13 16 +249 106 12 15 +250 91 12 16 +272 117 14 16 +265 146 16 22 +308 150 17 22 +304 101 11 15 +305 78 12 14 +333 98 9 12 +341 95 12 14 +359 101 11 14 +302 133 11 16 +323 160 14 17 +362 197 22 32 +378 178 20 26 +388 123 14 19 +376 93 11 16 +226 123 10 13 +363 137 18 23 +323 130 14 19 +339 116 13 15 +323 115 13 16 +395 99 14 14 +389 78 12 13 +402 153 18 21 +400 209 17 23 +202 82 12 15 +278 137 13 16 +358 127 13 14 +339 73 11 15 +349 72 13 15 +426 98 13 17 +408 84 6 17 +417 69 11 14 +445 132 15 19 +477 133 13 17 +445 92 9 15 +453 82 11 12 +475 80 11 13 +422 148 15 18 +456 166 16 22 +485 159 20 24 +509 116 12 18 +464 226 21 25 +499 200 19 28 +530 237 22 29 +585 252 29 36 +531 192 22 30 +567 187 20 28 +556 183 15 21 +587 206 21 24 +523 128 13 19 +528 144 18 20 +560 142 19 23 +501 88 12 17 +517 75 12 17 +593 98 11 16 +574 76 13 16 +614 97 13 19 +614 71 10 13 +622 120 15 17 +543 114 13 15 +537 60 9 13 +647 142 18 21 +643 171 18 22 +645 127 17 20 +638 103 14 15 +640 82 13 16 +662 66 10 15 +666 131 9 15 +625 199 16 28 +636 195 23 33 +625 235 25 36 +661 189 22 26 +669 212 19 27 +740 190 23 26 +719 157 22 25 +781 201 14 21 +801 201 20 25 +795 233 21 24 +828 242 24 30 +677 128 15 17 +679 94 11 17 +698 78 12 14 +733 143 19 26 +730 111 13 17 +760 136 17 17 +765 150 13 26 +796 137 15 20 +837 137 18 23 +828 178 17 24 +844 121 17 20 +827 113 15 17 +745 69 12 13 +859 256 25 35 +881 273 30 40 +872 237 28 31 +871 183 17 21 +891 172 26 36 +923 290 24 30 +790 108 10 13 +867 153 15 18 +885 145 18 22 +905 145 21 28 +973 190 25 30 +948 163 17 19 +966 163 16 21 +925 108 15 17 +901 98 13 16 +959 112 13 15 +989 106 12 16 +1008 134 15 23 +1004 231 19 36 +789 82 13 16 +780 103 10 15 +867 83 10 14 +711 343 15 29 +706 127 16 18 +834 66 13 19 +851 65 11 14 +970 86 12 18 +1001 81 12 15 +912 78 12 16 +934 91 9 13 +477 110 16 21 +455 118 15 18 +# 1--Handshaking/1_Handshaking_Handshaking_1_275.jpg +432 210 222 312 +# 1--Handshaking/1_Handshaking_Handshaking_1_35.jpg +440 141 193 258 +# 1--Handshaking/1_Handshaking_Handshaking_1_411.jpg +22 62 112 150 +260 6 154 172 +670 64 80 198 +862 114 130 238 +# 1--Handshaking/1_Handshaking_Handshaking_1_158.jpg +210 146 112 154 +464 240 52 94 +700 40 126 210 +440 304 56 66 +# 1--Handshaking/1_Handshaking_Handshaking_1_209.jpg +196 138 94 142 +616 38 80 176 +# 1--Handshaking/1_Handshaking_Handshaking_1_313.jpg +266 24 86 158 +638 106 78 126 +# 1--Handshaking/1_Handshaking_Handshaking_1_457.jpg +248 66 514 664 +# 1--Handshaking/1_Handshaking_Handshaking_1_567.jpg +207 278 461 695 +# 1--Handshaking/1_Handshaking_Handshaking_1_356.jpg +433 285 12 22 +572 298 15 26 +611 290 9 21 +491 302 6 13 +672 226 117 175 +605 419 18 30 +586 420 19 31 +150 622 108 137 +310 391 31 32 +281 386 11 36 +658 358 15 21 +650 304 7 13 +159 276 8 9 +356 307 10 16 +450 310 10 13 +508 309 8 13 +497 300 10 11 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_674.jpg +980 317 9 12 +963 315 11 14 +918 309 8 10 +895 308 8 12 +866 311 11 11 +868 289 7 9 +843 286 9 13 +973 374 7 9 +828 398 15 17 +801 333 7 9 +795 316 10 12 +778 300 6 8 +774 289 7 8 +786 293 9 14 +695 270 6 9 +672 274 6 10 +725 356 12 15 +748 366 13 15 +681 382 11 15 +938 379 9 12 +640 341 13 15 +621 364 13 16 +577 365 10 12 +599 337 10 13 +514 358 9 11 +528 338 9 14 +561 282 6 9 +593 275 6 8 +516 254 7 9 +634 271 8 9 +465 344 9 13 +451 330 11 12 +508 289 6 9 +488 305 12 14 +406 376 13 17 +392 341 11 13 +328 331 11 14 +261 302 8 13 +417 259 5 8 +399 258 8 9 +384 260 7 8 +318 268 6 8 +297 263 6 9 +208 313 8 12 +233 272 9 12 +99 295 11 11 +71 286 8 9 +9 280 9 13 +167 230 5 8 +15 369 13 16 +174 399 8 14 +197 348 12 13 +274 458 18 26 +142 408 12 17 +880 343 9 11 +821 275 9 12 +778 339 6 8 +734 286 7 11 +# 10--People_Marching/10_People_Marching_People_Marching_2_430.jpg +163 162 35 40 +63 130 22 18 +213 156 26 30 +88 144 20 23 +287 129 31 36 +416 171 30 37 +631 200 46 53 +768 196 41 50 +913 251 35 42 +# 10--People_Marching/10_People_Marching_People_Marching_2_822.jpg +666 222 91 101 +517 152 72 98 +317 76 84 106 +842 260 16 20 +882 264 10 14 +891 251 15 20 +907 244 12 22 +178 134 26 48 +107 150 15 19 +29 150 20 24 +# 10--People_Marching/10_People_Marching_People_Marching_2_944.jpg +609 458 45 54 +481 452 38 41 +244 510 43 45 +234 472 26 28 +133 478 34 40 +52 462 33 41 +640 446 30 42 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_162.jpg +390 308 21 27 +93 292 23 24 +142 273 18 22 +175 256 18 24 +280 218 18 18 +272 289 20 22 +363 263 19 21 +389 228 14 17 +417 225 15 18 +519 272 15 18 +668 293 21 24 +697 280 15 21 +712 284 19 22 +778 92 20 17 +656 43 18 16 +334 271 20 20 +303 271 19 22 +# 10--People_Marching/10_People_Marching_People_Marching_2_269.jpg +65 220 14 15 +42 218 6 9 +246 208 12 15 +330 205 5 7 +343 197 6 6 +397 200 6 7 +389 208 5 5 +768 236 25 37 +474 210 7 10 +481 205 7 12 +489 201 8 13 +523 201 7 9 +571 197 5 7 +535 201 4 6 +558 198 5 5 +724 200 11 11 +656 193 7 8 +673 194 7 9 +694 186 6 6 +713 186 6 6 +709 191 5 7 +604 195 4 6 +749 194 9 13 +776 187 5 6 +792 181 6 7 +807 187 6 8 +828 179 5 7 +887 174 9 12 +845 178 6 5 +834 178 7 7 +941 183 10 11 +969 182 8 9 +930 171 7 7 +961 173 7 6 +1003 168 5 7 +979 174 7 7 +609 201 6 9 +686 191 5 7 +# 10--People_Marching/10_People_Marching_People_Marching_2_307.jpg +514 114 94 162 +254 172 82 112 +78 182 68 92 +662 210 52 84 +# 10--People_Marching/10_People_Marching_People_Marching_2_40.jpg +58 43 10 11 +57 31 9 10 +63 65 12 17 +96 44 9 12 +114 27 6 8 +94 93 24 26 +47 136 40 40 +100 142 22 30 +136 79 22 28 +154 46 10 12 +187 41 10 10 +229 94 21 25 +253 87 19 14 +266 120 27 32 +315 147 29 32 +167 228 37 42 +222 198 27 38 +50 349 52 62 +94 452 71 115 +410 46 13 15 +381 92 18 19 +356 127 26 37 +402 161 28 32 +419 103 22 28 +473 107 22 28 +512 48 12 17 +520 43 13 17 +515 86 18 26 +494 150 30 29 +594 126 18 41 +564 65 15 16 +573 45 13 15 +654 57 11 15 +668 49 8 11 +698 46 14 15 +715 53 12 15 +742 65 11 15 +757 48 12 19 +661 98 18 28 +678 96 20 26 +712 83 13 22 +796 67 12 14 +812 54 9 14 +821 54 11 17 +853 52 11 18 +767 91 17 14 +789 121 20 27 +804 110 26 30 +817 141 17 42 +883 99 17 26 +902 115 18 24 +875 137 26 33 +668 205 36 56 +585 275 38 50 +684 247 39 57 +714 268 55 65 +767 207 37 33 +819 246 34 50 +839 239 46 76 +708 354 33 71 +940 253 44 48 +949 290 45 75 +988 340 36 66 +914 445 41 99 +514 519 101 100 +194 85 16 18 +255 440 78 94 +17 3 8 12 +100 11 7 7 +941 202 36 42 +936 153 22 36 +599 216 29 51 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_938.jpg +483 261 108 156 +# 10--People_Marching/10_People_Marching_People_Marching_2_191.jpg +20 305 18 18 +82 294 16 21 +96 314 16 23 +137 304 15 23 +185 316 17 22 +241 312 18 21 +256 293 18 22 +329 239 14 18 +379 240 12 17 +318 300 17 23 +336 308 15 26 +359 310 18 22 +388 317 16 21 +453 322 16 23 +513 318 18 27 +660 323 19 23 +747 327 20 26 +827 323 18 28 +848 317 18 27 +971 345 19 27 +1009 471 15 38 +# 10--People_Marching/10_People_Marching_People_Marching_2_678.jpg +946 210 12 14 +901 208 11 15 +886 296 19 21 +842 306 24 27 +787 307 22 24 +723 275 26 27 +623 296 20 24 +512 304 22 25 +449 298 24 25 +350 278 27 28 +305 292 23 26 +254 287 27 33 +216 295 25 27 +205 298 9 12 +173 313 5 10 +116 338 15 15 +66 258 6 10 +36 318 15 19 +69 255 7 10 +23 321 10 15 +86 340 7 12 +564 332 10 10 +68 369 9 14 +387 301 12 17 +# 10--People_Marching/10_People_Marching_People_Marching_2_1046.jpg +46 204 24 21 +96 207 17 18 +165 249 30 29 +215 212 23 24 +280 258 18 22 +199 188 16 19 +366 236 16 21 +330 200 7 8 +422 199 22 22 +510 201 49 63 +576 241 22 25 +742 240 20 22 +680 200 15 18 +870 234 26 24 +1006 227 18 29 +586 196 10 10 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_524.jpg +70 145 63 70 +280 205 44 49 +589 171 60 63 +843 141 48 57 +# 10--People_Marching/10_People_Marching_People_Marching_2_316.jpg +36 2 202 220 +# 10--People_Marching/10_People_Marching_People_Marching_2_277.jpg +47 197 48 52 +25 170 7 8 +47 166 7 7 +223 172 11 18 +228 173 18 23 +257 171 24 24 +298 181 6 6 +329 181 18 27 +365 186 30 32 +412 178 38 42 +465 180 17 25 +502 195 27 28 +497 176 10 16 +557 179 10 13 +571 155 6 10 +598 180 20 20 +619 188 31 38 +672 195 18 22 +706 190 7 10 +714 187 12 13 +735 192 11 18 +753 188 28 33 +789 196 17 17 +817 192 19 20 +837 187 25 31 +868 197 17 18 +909 191 10 18 +914 188 21 20 +939 188 14 18 +972 195 20 18 +1008 187 15 18 +999 178 6 6 +996 195 9 9 +963 176 9 7 +625 157 4 6 +640 164 5 7 +881 189 11 14 +# 10--People_Marching/10_People_Marching_People_Marching_2_256.jpg +0 272 65 122 +140 223 67 85 +173 182 88 101 +326 228 66 62 +447 224 85 87 +572 255 73 61 +710 146 48 55 +813 150 65 68 +957 160 50 58 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_368.jpg +267 186 54 59 +45 105 61 71 +186 100 65 85 +336 159 59 70 +398 158 42 57 +449 190 56 62 +599 143 60 69 +730 120 61 69 +859 176 72 80 +523 227 39 47 +661 187 45 52 +0 181 35 55 +922 217 40 56 +493 239 33 48 +# 10--People_Marching/10_People_Marching_People_Marching_2_373.jpg +54 540 39 37 +143 508 43 48 +172 497 30 50 +251 464 40 49 +188 430 26 48 +107 426 40 44 +71 467 35 50 +46 429 41 40 +291 469 31 43 +251 398 38 50 +273 361 40 51 +231 539 38 33 +320 540 42 36 +333 432 34 42 +353 433 46 60 +415 422 39 52 +510 415 41 48 +442 386 37 39 +551 392 37 36 +368 382 31 39 +398 429 32 37 +391 344 38 47 +460 326 43 44 +631 477 37 42 +693 477 45 57 +766 493 40 58 +733 552 39 25 +928 495 34 45 +821 507 35 44 +836 472 35 45 +868 397 40 49 +920 378 37 48 +955 504 38 48 +977 556 42 21 +989 479 35 41 +956 286 32 29 +983 242 32 37 +905 267 28 24 +925 288 31 36 +857 295 32 32 +876 275 31 28 +911 227 30 36 +944 208 28 29 +966 190 26 31 +993 205 27 28 +981 148 28 38 +913 148 27 36 +992 134 24 22 +924 125 25 24 +969 91 26 33 +994 80 25 28 +933 68 24 36 +897 101 19 24 +927 51 15 19 +944 7 16 19 +888 33 15 12 +846 17 14 17 +848 37 16 12 +878 45 18 15 +872 71 23 19 +899 72 15 18 +853 49 19 25 +825 56 23 24 +819 21 12 20 +794 19 18 24 +757 28 19 24 +776 28 9 14 +767 8 14 15 +839 75 26 29 +806 54 15 21 +770 51 20 19 +791 63 23 28 +773 86 24 25 +763 81 20 23 +876 97 25 33 +816 87 21 26 +765 115 26 28 +794 118 24 27 +806 143 30 30 +838 156 27 30 +863 133 25 31 +883 176 32 30 +815 185 24 27 +848 206 28 28 +846 239 31 35 +772 185 26 39 +757 237 27 29 +757 274 36 38 +793 276 35 26 +809 300 37 36 +795 375 32 31 +756 335 30 38 +711 172 30 37 +744 161 24 31 +1002 370 21 37 +975 437 33 39 +691 296 32 31 +696 337 33 40 +694 241 31 31 +694 203 33 33 +672 435 37 42 +624 411 29 44 +626 290 35 44 +568 301 30 43 +593 372 37 37 +518 257 33 39 +475 276 26 23 +666 188 26 25 +625 174 22 30 +654 137 29 32 +678 136 22 32 +706 139 25 26 +728 85 21 30 +746 89 11 25 +730 60 22 28 +656 51 22 27 +702 24 17 25 +611 75 24 29 +619 110 26 32 +640 27 20 21 +659 16 16 17 +616 34 21 20 +575 59 20 29 +570 85 18 26 +545 92 29 21 +569 27 19 23 +622 2 9 10 +658 0 19 13 +673 5 12 15 +684 7 15 17 +699 0 19 19 +728 4 18 26 +537 74 15 22 +524 61 17 21 +552 41 15 27 +537 8 19 15 +661 357 32 31 +401 303 28 25 +395 258 28 26 +330 281 33 35 +352 319 22 43 +282 292 35 46 +258 258 31 42 +222 302 33 50 +204 259 33 38 +206 226 25 33 +226 220 15 28 +238 217 15 25 +268 204 29 29 +309 200 33 41 +380 187 30 38 +335 168 29 30 +252 164 26 30 +500 93 24 29 +464 77 13 30 +477 37 23 30 +491 24 18 20 +496 50 17 21 +445 45 22 20 +421 47 20 29 +403 55 22 19 +416 95 21 31 +443 108 22 20 +400 78 22 19 +371 109 28 26 +365 133 28 29 +395 128 22 23 +355 75 20 28 +346 50 22 28 +335 98 18 29 +269 93 26 35 +264 135 27 36 +320 44 19 23 +366 39 17 28 +410 24 17 24 +346 21 19 21 +260 77 26 17 +285 16 18 22 +305 11 17 23 +381 4 20 22 +359 9 16 22 +354 0 16 11 +230 40 22 24 +242 6 16 19 +268 0 17 9 +186 13 17 26 +158 6 16 24 +200 49 17 17 +210 62 15 20 +231 85 16 17 +184 58 20 35 +233 23 14 17 +182 46 19 17 +163 69 25 33 +146 45 20 23 +119 34 20 22 +114 55 19 21 +121 9 19 20 +81 35 18 20 +81 11 17 18 +132 84 25 27 +147 99 25 29 +194 97 27 32 +218 93 20 31 +37 9 20 27 +33 0 21 8 +49 51 26 30 +30 61 17 15 +21 81 18 29 +183 121 22 31 +201 200 31 34 +162 191 31 27 +139 135 20 25 +83 188 29 31 +54 222 25 29 +100 241 23 25 +70 239 29 44 +49 258 28 35 +102 300 28 29 +167 289 35 46 +135 313 33 40 +53 302 31 39 +168 388 34 37 +152 342 35 40 +125 376 36 44 +88 381 37 37 +60 351 33 40 +21 325 36 37 +16 371 21 41 +563 0 19 14 +507 8 18 18 +523 0 18 13 +774 149 23 26 +456 15 13 14 +364 261 32 33 +0 85 18 27 +5 8 18 20 +# 10--People_Marching/10_People_Marching_People_Marching_2_934.jpg +446 196 114 152 +# 10--People_Marching/10_People_Marching_People_Marching_2_668.jpg +412 306 44 53 +541 295 31 33 +605 304 21 28 +297 348 38 43 +194 300 38 43 +100 266 32 36 +372 330 19 24 +515 314 17 22 +461 318 26 29 +769 316 25 29 +857 315 21 26 +921 336 16 19 +956 330 15 16 +986 332 12 15 +1003 340 12 15 +900 345 14 18 +835 339 13 15 +# 10--People_Marching/10_People_Marching_People_Marching_2_404.jpg +194 226 24 28 +269 268 20 23 +334 280 14 18 +385 297 11 15 +154 298 15 16 +37 266 18 23 +603 237 28 25 +929 248 23 20 +840 288 17 16 +780 298 12 10 +1013 268 11 22 +822 269 10 13 +386 281 7 8 +437 312 8 11 +409 309 10 12 +# 10--People_Marching/10_People_Marching_People_Marching_2_34.jpg +82 733 24 32 +101 684 16 19 +39 687 14 16 +175 690 15 19 +176 751 26 18 +72 608 8 9 +166 637 12 15 +219 671 11 16 +229 680 17 20 +244 641 12 19 +261 673 13 14 +226 728 15 23 +304 716 21 25 +404 720 18 25 +525 709 29 46 +400 659 14 17 +343 643 11 19 +370 641 13 17 +305 627 11 15 +329 650 10 12 +382 674 12 13 +447 688 17 19 +456 637 12 17 +591 690 22 30 +623 716 34 51 +675 711 22 30 +524 585 9 10 +510 581 9 9 +603 624 12 15 +623 625 13 19 +247 565 7 10 +195 564 9 10 +189 579 8 10 +126 606 9 12 +248 584 9 11 +299 589 10 14 +341 611 10 10 +442 592 10 11 +296 572 8 11 +342 579 7 7 +426 552 7 8 +453 550 7 8 +440 532 6 7 +420 535 5 6 +458 582 6 9 +429 582 6 9 +532 599 11 12 +493 527 6 8 +459 530 7 11 +365 567 7 9 +380 561 8 9 +415 598 10 13 +668 716 19 24 +716 704 20 29 +761 746 25 23 +861 723 30 38 +837 671 18 23 +811 641 15 16 +613 668 13 14 +665 613 8 9 +589 588 9 11 +667 534 7 8 +657 575 9 10 +732 606 10 9 +734 580 11 15 +764 591 11 11 +698 570 7 9 +693 584 8 9 +746 553 7 10 +802 570 6 10 +811 587 8 11 +824 590 9 10 +840 586 8 13 +831 568 8 11 +912 551 8 10 +927 569 7 9 +901 627 11 15 +993 627 8 17 +748 515 6 6 +787 552 8 10 +807 519 5 8 +873 521 7 8 +914 526 7 9 +824 496 6 7 +829 519 6 6 +852 513 5 6 +933 519 6 9 +754 503 6 8 +32 623 8 9 +79 599 8 10 +228 599 7 9 +247 601 9 8 +228 550 6 10 +281 543 7 8 +219 540 6 9 +194 537 7 8 +207 554 7 8 +101 564 5 8 +121 575 8 9 +77 571 7 9 +68 567 7 8 +93 621 8 10 +206 584 8 9 +159 560 7 8 +255 564 6 6 +629 610 12 15 +565 570 8 8 +640 587 8 8 +622 590 9 8 +579 617 9 12 +516 508 4 6 +626 510 6 7 +681 489 5 6 +661 506 5 6 +676 524 6 7 +639 491 6 6 +787 474 5 5 +916 492 5 8 +845 484 5 6 +907 452 5 7 +863 469 5 5 +147 594 8 9 +388 538 4 7 +410 550 6 9 +376 581 8 11 +463 593 8 11 +461 570 9 11 +439 583 6 10 +435 631 11 15 +508 622 8 11 +517 692 23 31 +550 699 19 23 +659 561 6 11 +720 579 7 10 +750 574 9 13 +819 609 10 16 +702 686 14 15 +782 586 7 11 +993 722 31 47 +999 651 10 22 +1004 638 16 17 +981 591 11 14 +858 608 10 12 +448 608 7 11 +352 533 6 9 +318 543 6 9 +258 594 7 8 +# 10--People_Marching/10_People_Marching_People_Marching_2_60.jpg +140 0 33 20 +233 0 22 22 +310 0 28 20 +103 197 38 44 +222 160 39 50 +429 170 26 36 +353 208 36 41 +399 221 38 42 +506 184 43 42 +590 190 27 29 +662 194 38 42 +768 157 41 38 +843 187 32 40 +921 211 35 41 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_716.jpg +2 433 34 45 +75 435 38 50 +122 419 40 41 +196 407 45 50 +326 345 51 67 +507 290 62 77 +767 271 76 85 +653 524 19 24 +930 454 32 36 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_69.jpg +92 641 24 30 +118 617 20 25 +135 662 22 21 +176 649 22 30 +215 659 19 24 +208 605 19 27 +162 557 21 26 +97 563 20 28 +115 598 20 17 +48 591 22 29 +3 611 24 28 +15 569 16 32 +52 562 17 17 +231 551 18 20 +222 519 17 19 +160 512 18 25 +84 510 22 24 +169 486 17 27 +224 484 18 22 +128 478 17 17 +181 445 17 23 +194 426 16 17 +268 414 15 21 +298 423 14 19 +294 456 19 24 +301 479 16 20 +307 500 19 21 +102 466 17 16 +62 501 19 18 +21 511 15 17 +56 451 18 20 +84 444 13 13 +154 414 17 20 +52 382 15 16 +251 404 13 22 +288 404 11 14 +236 354 16 21 +134 389 12 18 +47 347 13 15 +115 330 14 18 +104 319 14 14 +65 312 13 14 +107 251 12 17 +92 244 13 15 +75 270 13 14 +54 264 12 14 +58 248 11 12 +74 230 10 12 +144 230 12 14 +161 229 11 12 +185 232 12 13 +207 231 11 14 +228 226 13 13 +109 220 13 17 +167 210 8 13 +196 216 11 14 +213 221 11 8 +272 215 7 11 +271 196 12 13 +218 196 10 14 +199 190 10 13 +228 190 10 13 +163 176 13 18 +137 195 13 14 +119 203 9 13 +97 196 11 15 +111 190 12 15 +132 181 10 13 +73 191 9 13 +62 197 10 13 +105 177 9 6 +82 177 6 11 +130 155 10 13 +162 154 9 13 +181 163 10 13 +207 177 8 7 +225 177 8 8 +242 156 9 11 +267 183 8 8 +200 139 9 11 +190 131 9 13 +204 134 9 13 +176 137 9 12 +173 136 5 11 +476 98 8 9 +455 103 7 9 +461 117 10 10 +472 135 8 8 +474 144 8 8 +473 160 9 13 +497 158 9 12 +517 166 10 13 +530 158 8 13 +512 144 9 12 +523 146 7 8 +492 142 10 14 +500 137 6 11 +549 157 9 7 +594 155 9 13 +612 141 10 9 +645 134 8 8 +638 144 9 11 +672 157 9 10 +690 152 9 12 +704 145 7 8 +737 139 9 10 +669 146 7 10 +704 164 10 12 +594 170 8 12 +522 56 4 4 +767 82 4 9 +759 119 9 9 +780 106 7 11 +876 106 8 9 +760 105 7 9 +770 152 9 10 +786 143 9 10 +821 162 8 11 +817 144 6 7 +828 150 6 7 +854 148 8 12 +884 132 9 10 +894 131 8 10 +867 137 6 10 +915 171 11 15 +934 161 10 11 +954 204 13 9 +1001 204 11 12 +1010 184 11 14 +966 202 8 9 +951 144 8 6 +660 141 8 10 +690 179 8 12 +712 182 10 11 +660 185 3 10 +682 191 9 11 +648 184 7 10 +637 194 9 12 +644 189 5 9 +625 176 8 13 +616 165 7 8 +613 154 5 9 +602 195 9 8 +606 189 8 9 +610 212 12 12 +582 185 9 10 +572 180 8 10 +592 208 10 11 +624 228 10 9 +714 214 9 13 +681 220 9 7 +684 230 10 12 +713 234 12 12 +692 244 12 10 +679 249 11 17 +657 233 10 9 +536 195 9 14 +527 194 8 9 +540 184 9 6 +587 233 8 12 +596 230 7 8 +603 242 10 15 +632 246 9 13 +662 264 10 12 +690 271 13 19 +700 267 9 13 +709 272 10 10 +437 97 9 11 +444 117 7 9 +398 115 9 11 +416 123 7 7 +422 121 8 10 +436 128 9 12 +378 117 8 8 +395 107 7 9 +364 106 5 8 +357 114 6 7 +367 123 10 11 +348 126 7 12 +345 117 9 8 +335 129 10 10 +318 129 10 10 +300 124 8 9 +311 125 7 9 +293 135 8 11 +312 148 8 10 +295 159 10 10 +294 175 11 13 +306 169 11 14 +325 166 7 11 +329 159 7 7 +346 173 7 7 +330 145 7 7 +380 149 8 10 +365 170 7 9 +396 127 7 11 +414 148 8 7 +406 162 11 12 +421 144 10 9 +462 160 5 9 +427 170 8 11 +457 181 10 14 +466 190 10 14 +490 189 10 11 +502 188 7 14 +507 189 6 12 +408 192 9 13 +384 195 10 13 +149 147 10 12 +183 73 7 9 +199 77 8 8 +171 84 6 12 +145 96 9 11 +128 83 9 11 +89 91 9 8 +95 80 7 8 +95 68 6 10 +140 67 9 10 +151 58 7 10 +143 52 7 8 +199 47 6 10 +177 38 8 8 +210 43 6 8 +227 46 6 8 +239 53 7 11 +192 33 6 10 +178 50 6 6 +163 42 6 7 +153 40 6 9 +127 37 5 10 +154 27 7 10 +167 31 6 8 +183 28 6 8 +199 32 5 5 +234 28 7 9 +214 29 6 7 +261 55 6 9 +245 70 5 7 +242 81 6 6 +271 81 4 6 +275 91 9 10 +275 61 6 9 +252 35 6 7 +261 40 6 8 +282 28 6 9 +285 46 6 7 +292 62 8 11 +311 60 7 8 +252 22 7 8 +214 18 6 8 +207 18 6 10 +182 10 6 10 +190 7 6 7 +197 2 6 8 +165 11 6 6 +170 21 5 6 +171 2 5 8 +218 6 5 8 +240 10 7 10 +246 5 5 6 +267 15 7 8 +292 23 6 6 +299 3 6 8 +305 7 6 9 +315 19 6 6 +316 34 7 9 +269 33 5 6 +315 0 6 5 +324 1 7 5 +319 5 4 7 +353 2 6 6 +362 8 5 8 +377 6 5 6 +394 7 5 6 +414 4 5 8 +419 0 6 6 +440 14 4 6 +429 19 3 4 +428 25 5 8 +441 23 6 8 +404 17 7 10 +393 17 7 9 +375 19 6 7 +383 14 4 5 +385 24 6 8 +354 26 6 8 +329 22 6 8 +304 21 4 5 +330 35 5 8 +340 35 6 8 +325 51 6 8 +319 62 8 10 +318 55 5 5 +347 54 7 9 +367 41 7 9 +387 50 8 8 +396 48 6 9 +380 45 5 7 +400 40 6 6 +405 32 6 10 +423 29 5 8 +434 31 5 7 +133 131 9 13 +119 143 12 12 +100 129 9 11 +96 153 8 14 +98 118 9 9 +111 107 12 12 +136 114 8 11 +147 131 8 11 +161 128 9 10 +160 115 9 11 +150 115 8 11 +118 95 8 8 +81 127 9 17 +185 101 9 9 +219 114 10 13 +202 114 5 10 +226 106 7 13 +217 110 6 10 +241 120 8 11 +247 115 7 8 +268 113 5 7 +268 132 8 8 +237 140 7 11 +283 155 8 9 +273 171 8 11 +283 183 10 15 +290 207 6 6 +204 91 9 11 +232 88 9 10 +264 88 7 9 +221 66 6 7 +581 254 10 14 +566 301 12 12 +580 290 12 9 +482 310 13 18 +466 298 14 16 +435 294 13 16 +428 318 14 12 +448 323 12 17 +560 320 12 16 +578 331 14 12 +580 348 12 14 +550 349 13 17 +616 267 10 11 +654 278 11 8 +659 292 11 11 +700 283 10 13 +708 304 14 16 +734 310 13 15 +695 302 11 13 +660 306 11 17 +640 313 10 11 +622 280 12 16 +592 301 12 13 +600 309 12 16 +629 328 12 16 +645 330 14 18 +687 328 15 15 +706 338 13 17 +738 328 12 16 +595 331 12 15 +609 361 15 13 +950 478 17 19 +995 526 20 23 +909 470 13 18 +885 483 18 20 +839 477 19 22 +918 544 22 25 +326 360 14 15 +382 352 13 16 +322 339 10 10 +448 372 15 18 +500 388 13 14 +385 383 15 19 +379 382 11 16 +339 375 13 14 +381 408 17 18 +407 414 14 20 +370 418 12 16 +358 456 17 22 +363 440 12 15 +425 451 15 19 +464 412 16 16 +502 434 17 22 +535 463 19 25 +537 449 10 10 +520 432 14 10 +456 466 14 22 +441 474 15 23 +421 490 20 22 +396 502 18 22 +367 495 16 23 +587 447 14 20 +563 461 16 17 +653 457 17 24 +723 446 10 17 +724 479 17 17 +695 480 18 27 +726 502 20 21 +630 493 17 23 +597 508 19 21 +576 495 16 19 +563 531 17 26 +541 539 18 23 +503 498 16 20 +470 503 18 20 +299 535 16 15 +359 534 17 23 +401 525 17 22 +319 547 17 24 +401 573 20 27 +389 595 21 24 +328 613 22 26 +358 628 21 26 +302 603 19 26 +321 598 20 28 +243 614 20 17 +271 569 19 20 +258 638 19 21 +285 659 20 24 +429 663 20 20 +437 617 20 21 +504 636 17 16 +547 631 18 22 +435 44 7 10 +408 58 8 10 +437 63 5 7 +425 66 6 8 +428 79 9 11 +403 72 7 10 +367 55 7 12 +360 61 6 6 +382 75 8 9 +406 98 8 11 +392 94 8 10 +383 88 7 10 +378 103 7 9 +370 96 8 10 +354 93 6 7 +348 68 6 7 +315 82 6 8 +289 81 6 8 +312 93 8 9 +324 97 7 5 +336 84 9 14 +332 102 8 10 +346 104 8 9 +291 108 10 12 +287 115 8 11 +447 6 6 7 +500 53 6 8 +448 55 7 7 +458 33 5 6 +500 77 8 10 +491 63 6 9 +461 79 7 7 +467 69 6 7 +455 94 8 7 +479 88 7 8 +513 83 7 10 +554 81 9 10 +602 83 8 11 +571 23 4 6 +469 49 7 7 +639 56 8 7 +689 51 7 8 +660 54 8 9 +653 67 6 9 +627 65 6 8 +646 38 8 10 +632 45 7 7 +668 33 5 8 +642 15 6 9 +647 106 9 12 +708 64 6 9 +714 51 8 10 +721 71 8 9 +623 11 6 5 +656 14 5 7 +734 58 6 7 +725 50 6 8 +508 11 6 7 +645 30 4 5 +680 30 4 6 +674 67 5 6 +375 180 10 12 +320 193 10 13 +328 185 8 13 +366 143 8 10 +451 194 9 12 +446 189 8 8 +457 202 11 8 +461 214 11 13 +436 224 11 12 +392 217 11 13 +351 207 5 12 +299 200 10 13 +324 219 5 9 +498 219 11 14 +503 207 9 11 +528 205 11 14 +544 216 9 14 +511 224 9 8 +510 238 12 15 +524 254 10 10 +493 247 9 9 +455 249 10 11 +441 251 9 13 +470 266 12 14 +505 268 10 10 +485 267 12 17 +446 267 13 17 +462 275 12 16 +506 282 13 15 +519 285 11 13 +566 268 12 13 +684 62 7 7 +678 51 6 5 +708 82 7 10 +741 75 9 10 +750 94 9 13 +703 109 8 10 +699 122 7 12 +665 21 5 8 +481 0 4 6 +651 86 5 8 +587 96 8 10 +547 112 7 11 +581 112 7 11 +572 118 5 4 +544 104 4 9 +618 126 8 10 +610 120 9 10 +595 134 7 10 +583 138 9 13 +564 137 6 9 +567 126 6 8 +551 134 7 7 +540 138 9 8 +524 114 8 11 +514 111 7 7 +513 127 8 10 +500 123 7 10 +487 129 7 8 +475 117 7 10 +472 103 7 10 +572 257 11 14 +627 371 15 18 +560 372 15 18 +538 377 14 18 +595 376 12 17 +550 424 15 16 +773 360 14 13 +820 346 11 11 +885 327 11 11 +878 396 12 15 +821 369 15 17 +788 377 16 17 +808 398 13 17 +763 388 11 11 +784 405 17 19 +745 390 12 19 +717 357 13 19 +721 386 14 16 +684 390 15 15 +652 344 13 17 +631 419 11 20 +683 418 13 19 +732 415 15 21 +767 442 17 22 +849 424 15 15 +882 443 18 24 +979 337 13 16 +1018 324 6 21 +950 305 10 14 +1009 305 8 13 +800 320 7 11 +886 631 21 25 +876 646 20 31 +931 576 20 28 +958 629 22 29 +978 604 21 23 +996 593 17 26 +951 664 23 20 +999 569 17 14 +58 351 13 10 +932 446 17 22 +534 609 18 26 +476 561 16 22 +491 545 18 23 +511 563 18 25 +550 577 17 23 +570 595 20 21 +584 625 17 24 +601 659 20 24 +606 553 20 24 +645 583 19 21 +716 538 20 29 +729 535 13 24 +744 574 21 25 +795 542 19 20 +820 527 13 20 +809 534 10 19 +821 571 21 30 +806 570 17 23 +764 573 13 21 +839 612 21 28 +757 613 19 27 +758 648 23 24 +689 660 22 23 +694 639 18 15 +760 508 16 18 +779 502 16 17 +868 542 19 22 +879 532 19 27 +897 524 11 16 +880 575 20 28 +603 404 11 13 +313 673 21 11 +660 84 7 10 +498 109 7 7 +0 659 19 26 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_933.jpg +110 198 38 51 +539 159 27 30 +521 182 25 27 +985 145 18 27 +771 189 14 26 +320 72 13 23 +238 51 16 20 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_1024.jpg +577 1042 19 25 +460 1046 28 36 +282 1049 22 31 +207 1054 25 33 +847 1043 23 32 +213 1954 79 105 +364 1880 71 96 +4 1687 54 71 +80 1696 43 65 +124 1703 37 49 +177 1752 44 50 +221 1755 27 42 +264 1752 45 51 +343 1761 35 39 +401 1787 57 69 +491 1726 29 35 +465 1769 30 36 +449 1832 35 39 +518 1844 35 49 +546 1774 34 38 +579 1896 44 63 +611 1985 72 85 +610 1831 44 51 +594 1761 33 46 +632 1755 30 37 +671 1745 30 34 +665 1802 34 42 +731 1847 47 63 +797 1776 42 59 +761 1744 38 44 +859 1724 34 43 +881 1683 37 46 +858 1776 40 52 +915 1783 46 57 +952 1916 59 78 +990 1757 34 56 +906 1077 38 45 +994 1087 25 41 +710 1084 30 39 +326 1051 20 24 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_499.jpg +558 288 23 25 +491 272 22 30 +444 275 20 25 +377 245 23 29 +737 82 25 28 +639 75 25 24 +600 73 17 20 +539 29 19 24 +847 0 18 10 +# 10--People_Marching/10_People_Marching_People_Marching_2_259.jpg +91 468 16 15 +209 207 8 9 +166 231 11 10 +172 251 8 11 +123 285 11 14 +116 323 11 13 +282 205 9 10 +329 214 9 10 +365 206 9 14 +401 175 10 11 +328 248 11 19 +268 278 14 20 +282 318 11 16 +322 284 12 16 +364 285 12 14 +343 313 13 13 +428 199 10 11 +55 368 10 14 +149 408 15 17 +178 372 12 14 +208 410 13 18 +169 464 17 15 +226 465 15 14 +282 475 16 14 +262 409 13 12 +233 371 13 13 +297 363 14 15 +314 410 15 16 +347 365 13 14 +370 410 16 15 +397 371 14 15 +395 469 15 18 +420 413 15 16 +450 476 16 16 +474 411 15 15 +505 468 14 15 +533 416 13 16 +562 464 16 18 +589 425 13 17 +338 477 15 16 +432 267 10 12 +409 292 10 11 +414 319 13 15 +450 293 12 15 +470 332 12 14 +445 361 14 17 +496 369 15 15 +529 326 11 13 +555 356 15 13 +500 291 12 13 +533 258 10 11 +558 286 13 14 +591 326 12 14 +445 176 10 10 +486 156 10 12 +522 175 11 13 +470 193 10 14 +510 201 10 11 +553 203 10 13 +567 176 10 11 +590 201 10 13 +624 205 10 13 +627 260 13 17 +607 289 13 14 +640 324 14 15 +666 288 12 15 +707 324 14 17 +711 283 13 14 +666 208 10 11 +707 220 8 10 +613 370 14 14 +683 361 13 14 +639 426 15 15 +620 469 16 15 +679 465 15 20 +689 416 14 14 +734 465 16 16 +746 416 17 20 +786 467 17 16 +802 407 14 17 +744 360 12 11 +805 365 13 11 +763 197 8 10 +797 223 9 10 +845 283 12 11 +859 319 11 11 +920 363 12 13 +943 414 12 16 +854 408 15 15 +884 466 15 13 +835 470 17 15 +791 248 12 17 +# 10--People_Marching/10_People_Marching_People_Marching_2_591.jpg +894 159 53 52 +850 185 33 34 +755 196 30 29 +672 192 25 28 +598 185 24 27 +460 186 21 23 +415 181 19 19 +370 201 18 20 +335 189 16 21 +509 222 23 12 +279 190 17 21 +212 189 21 21 +153 188 20 19 +84 180 22 20 +41 176 24 25 +155 1380 20 26 +844 1245 20 25 +806 1306 15 23 +621 1229 18 20 +513 1184 20 23 +506 1124 17 17 +574 1254 19 22 +538 1226 20 21 +611 1282 21 21 +646 1288 16 16 +771 1271 19 20 +526 1277 21 23 +487 1268 16 19 +499 1228 16 19 +456 1202 18 20 +566 1303 20 23 +413 1133 17 18 +320 1138 17 16 +360 1143 14 19 +447 1138 17 23 +356 1218 14 18 +250 1211 12 18 +45 1216 14 16 +106 1172 15 16 +71 1161 18 17 +6 1114 14 15 +149 1199 17 20 +128 1153 16 16 +319 1291 21 22 +421 1269 11 16 +699 1329 16 23 +935 1049 11 15 +663 1241 18 25 +734 1267 15 19 +690 1272 16 23 +651 1208 16 23 +585 1192 20 22 +556 1357 21 22 +424 1221 22 23 +468 1314 14 18 +892 1310 17 22 +873 1344 17 24 +46 1107 16 18 +162 1059 14 15 +286 1093 13 12 +232 1090 12 14 +75 1125 16 17 +278 1170 14 19 +250 1135 13 14 +206 1143 11 16 +458 1277 9 15 +591 658 10 15 +# 10--People_Marching/10_People_Marching_People_Marching_2_433.jpg +614 346 157 222 +245 382 147 188 +353 222 108 168 +498 237 132 162 +# 10--People_Marching/10_People_Marching_People_Marching_2_2.jpg +38 384 11 21 +159 425 14 23 +142 386 7 8 +111 382 5 6 +181 393 5 10 +192 391 7 10 +291 394 14 17 +315 442 12 24 +339 461 22 35 +358 391 7 11 +455 460 11 30 +436 404 8 15 +519 411 14 22 +488 481 26 41 +510 523 18 54 +572 422 9 21 +542 374 7 10 +629 384 12 14 +656 395 16 29 +659 434 22 36 +663 477 35 56 +701 370 10 12 +620 356 7 11 +759 422 32 41 +747 407 24 31 +793 385 9 19 +828 346 8 16 +948 468 33 62 +914 494 26 48 +1001 420 23 36 +961 389 15 30 +902 404 16 29 +877 344 7 15 +911 356 7 11 +974 345 7 14 +992 354 12 16 +814 398 11 19 +243 420 8 16 +342 390 7 10 +696 354 6 9 +634 362 6 9 +310 394 10 11 +234 404 7 11 +220 395 7 9 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_1020.jpg +697 101 27 49 +# 10--People_Marching/10_People_Marching_People_Marching_2_496.jpg +455 150 85 132 +# 10--People_Marching/10_People_Marching_People_Marching_2_638.jpg +372 166 328 458 +# 10--People_Marching/10_People_Marching_People_Marching_2_401.jpg +330 403 68 86 +190 350 55 69 +92 513 61 76 +104 390 32 38 +63 317 30 38 +100 281 31 40 +46 289 24 28 +350 333 34 38 +308 329 23 29 +283 294 24 30 +208 301 25 31 +189 266 24 30 +254 271 21 30 +314 268 19 23 +327 291 18 22 +239 290 14 20 +352 275 22 25 +383 313 25 33 +423 239 22 29 +385 249 18 24 +357 253 11 13 +34 268 17 26 +416 257 12 15 +485 239 14 15 +462 238 13 15 +508 245 18 16 +531 244 8 7 +542 239 24 34 +567 262 28 39 +496 266 28 42 +479 262 23 36 +545 308 31 29 +615 238 14 21 +626 246 29 39 +662 264 33 45 +703 241 11 16 +748 230 13 15 +740 252 19 26 +757 261 23 35 +728 300 37 47 +780 258 35 50 +817 246 25 31 +796 278 48 51 +642 527 73 102 +853 499 85 131 +848 244 37 54 +922 265 26 30 +984 234 11 22 +975 250 7 10 +990 231 15 29 +996 272 20 36 +777 349 62 84 +671 418 40 51 +724 381 38 48 +552 505 55 55 +495 351 18 47 +424 363 30 33 +23 414 55 58 +0 353 22 56 +162 268 17 24 +# 10--People_Marching/10_People_Marching_People_Marching_2_173.jpg +68 481 6 7 +115 490 6 6 +123 486 5 5 +162 489 7 8 +172 493 6 8 +181 486 5 6 +187 490 8 11 +193 486 7 8 +210 484 7 10 +238 489 6 8 +249 494 9 11 +260 491 7 12 +250 486 8 8 +269 497 5 6 +276 496 5 9 +283 499 8 11 +312 494 7 9 +319 488 9 14 +344 482 7 9 +366 496 11 17 +353 495 6 8 +377 495 9 14 +420 499 8 10 +426 483 5 7 +446 497 7 9 +455 495 9 15 +463 505 5 8 +468 506 4 8 +480 503 10 10 +494 503 11 15 +507 503 9 11 +519 499 12 18 +536 497 10 12 +566 508 14 16 +581 504 10 14 +643 499 9 11 +612 506 11 12 +669 502 16 19 +688 500 9 9 +699 514 13 19 +761 498 9 14 +793 511 26 34 +810 497 11 15 +856 514 17 20 +952 512 14 16 +341 497 8 11 +376 486 7 8 +# 10--People_Marching/10_People_Marching_People_Marching_2_793.jpg +936 169 40 52 +898 18 48 58 +801 117 51 51 +649 181 50 50 +513 99 49 56 +404 85 54 51 +276 77 49 62 +415 10 49 51 +126 88 48 52 +61 135 50 57 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_619.jpg +433 461 12 15 +351 456 10 9 +415 446 7 9 +436 435 6 9 +462 449 8 10 +503 443 7 8 +520 454 10 11 +541 457 10 13 +555 449 12 15 +586 440 13 15 +615 445 12 12 +668 445 11 13 +702 437 10 11 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_848.jpg +131 187 33 43 +243 214 33 47 +363 204 39 43 +481 156 44 59 +584 200 24 39 +641 165 50 58 +728 177 33 47 +760 128 54 73 +989 188 33 58 +105 243 26 29 +# 10--People_Marching/10_People_Marching_People_Marching_2_36.jpg +599 306 19 26 +511 289 25 25 +595 276 14 18 +634 291 18 22 +673 272 14 18 +649 309 24 28 +699 309 26 31 +694 289 11 12 +719 283 13 12 +753 322 27 30 +813 293 15 17 +813 333 31 33 +832 295 13 17 +854 293 13 16 +902 304 14 16 +929 297 16 16 +962 280 13 15 +974 295 13 18 +1004 291 15 18 +950 342 33 36 +549 298 23 21 +12 315 16 15 +32 285 16 22 +58 239 15 16 +93 308 12 20 +99 312 20 24 +115 290 24 26 +134 296 22 29 +20 433 11 10 +52 411 11 12 +57 373 14 18 +155 309 22 29 +190 316 24 28 +223 323 26 29 +257 327 27 29 +305 340 31 32 +300 285 20 25 +324 298 18 21 +343 293 21 26 +385 302 24 24 +428 307 25 28 +258 236 11 12 +467 312 25 30 +503 322 25 34 +558 324 29 36 +# 10--People_Marching/10_People_Marching_People_Marching_2_236.jpg +462 167 26 28 +# 10--People_Marching/10_People_Marching_People_Marching_2_514.jpg +140 208 150 252 +394 164 156 234 +804 238 158 250 +534 28 154 216 +918 74 100 234 +# 10--People_Marching/10_People_Marching_People_Marching_2_498.jpg +35 619 25 21 +0 462 10 20 +164 430 23 19 +116 271 22 23 +69 146 22 26 +27 17 22 28 +191 32 20 20 +239 141 23 20 +289 290 24 20 +346 417 25 30 +412 582 25 27 +574 596 26 30 +363 31 21 23 +542 7 24 25 +609 134 21 20 +663 286 23 17 +722 421 22 22 +785 543 23 26 +955 545 23 30 +894 417 23 30 +835 274 22 31 +772 137 23 24 +705 0 20 27 +877 16 19 17 +942 130 21 19 +538 442 22 26 +1002 271 22 29 +216 597 24 27 +# 10--People_Marching/10_People_Marching_People_Marching_2_131.jpg +143 302 37 52 +201 368 41 48 +283 311 35 45 +327 212 34 46 +447 237 37 43 +408 297 35 46 +471 369 43 49 +512 301 36 47 +563 264 34 48 +620 314 39 48 +612 374 41 51 +684 257 32 45 +730 322 34 44 +737 398 40 51 +839 327 36 43 +342 387 36 49 +# 10--People_Marching/10_People_Marching_People_Marching_2_823.jpg +363 200 179 256 +# 10--People_Marching/10_People_Marching_People_Marching_2_171.jpg +6 144 20 21 +114 125 22 30 +247 104 21 24 +346 113 18 24 +455 79 22 24 +572 71 20 27 +671 90 22 29 +769 75 19 25 +839 99 19 28 +192 303 17 25 +199 365 20 24 +308 360 21 24 +324 297 18 26 +64 360 22 28 +391 359 19 25 +441 293 19 25 +467 355 20 23 +553 353 20 27 +568 275 19 26 +720 347 21 26 +745 273 23 31 +907 337 18 29 +974 264 23 32 +608 386 15 27 +661 372 22 28 +32 434 22 27 +127 438 21 29 +215 433 22 30 +305 437 22 30 +264 487 22 36 +221 582 25 32 +341 619 24 31 +17 629 17 34 +97 656 20 27 +100 540 22 34 +452 531 19 32 +418 493 18 31 +559 579 18 30 +441 427 20 28 +564 426 22 27 +# 10--People_Marching/10_People_Marching_People_Marching_2_27.jpg +24 187 17 33 +36 206 27 35 +129 198 31 34 +158 177 28 40 +207 189 20 23 +211 206 25 26 +265 195 24 30 +302 186 39 37 +363 184 28 33 +433 184 25 39 +485 197 19 25 +521 195 21 31 +538 200 31 36 +574 188 36 47 +608 213 32 35 +646 181 27 34 +653 220 24 30 +703 203 36 36 +790 228 25 34 +831 224 32 39 +858 203 28 34 +916 218 29 33 +912 269 32 43 +989 207 25 39 +218 63 6 7 +979 234 12 19 +# 10--People_Marching/10_People_Marching_People_Marching_2_577.jpg +602 301 137 183 +333 446 118 145 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_552.jpg +436 106 64 76 +# 10--People_Marching/10_People_Marching_People_Marching_2_898.jpg +673 590 56 61 +789 652 46 45 +508 711 49 60 +438 781 41 40 +253 811 53 50 +628 727 38 41 +# 10--People_Marching/10_People_Marching_People_Marching_2_395.jpg +220 161 34 42 +150 173 31 36 +94 175 22 29 +120 190 24 35 +10 230 17 23 +300 344 21 21 +579 188 31 35 +467 188 30 34 +392 212 22 28 +322 232 18 26 +404 218 24 28 +669 214 24 29 +743 215 28 32 +957 179 30 39 +830 212 26 33 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_240.jpg +1 237 31 39 +130 209 26 38 +108 196 12 17 +161 191 13 14 +170 199 18 29 +216 227 27 30 +221 268 35 46 +198 200 16 20 +219 206 21 20 +263 213 15 22 +232 193 10 9 +298 193 15 17 +289 266 39 57 +239 302 67 86 +341 219 11 13 +330 229 28 41 +378 255 26 34 +374 213 11 17 +388 190 9 15 +396 189 8 14 +350 204 9 10 +479 190 11 11 +497 200 11 12 +492 215 16 24 +460 202 12 13 +536 210 16 20 +554 224 20 30 +455 243 26 38 +418 235 42 56 +409 327 109 137 +610 233 47 63 +690 261 24 35 +715 243 24 35 +737 237 19 23 +742 219 13 16 +680 233 12 15 +684 226 15 16 +790 208 8 10 +765 206 10 11 +847 211 17 21 +839 261 27 41 +889 215 18 20 +887 233 22 23 +777 230 13 14 +343 193 8 7 +945 283 26 34 +795 304 44 56 +825 276 24 33 +723 203 11 18 +997 214 16 20 +1009 223 14 27 +902 431 122 123 +975 338 49 53 +35 234 18 19 +598 234 14 18 +691 206 8 14 +# 10--People_Marching/10_People_Marching_People_Marching_2_239.jpg +61 404 45 57 +209 352 52 62 +347 219 69 79 +513 347 49 63 +703 81 98 73 +843 242 70 69 +929 358 42 60 +979 400 39 53 +1 438 46 33 +# 10--People_Marching/10_People_Marching_People_Marching_2_935.jpg +372 282 210 339 +# 10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_447.jpg +492 776 46 55 +682 758 50 59 +953 771 65 61 +848 787 58 63 +930 736 25 32 +960 701 24 32 +898 730 24 29 +994 700 17 32 +851 652 22 31 +872 624 21 32 +927 616 23 31 +790 702 24 30 +767 743 24 33 +569 755 24 30 +609 689 23 29 +638 673 24 29 +759 651 24 28 +807 634 22 31 +557 632 23 30 +695 644 22 27 +993 524 20 28 +970 478 21 29 +832 526 21 32 +1007 452 16 30 +906 437 22 29 +861 470 21 29 +775 497 22 31 +670 508 22 31 +719 453 21 29 +742 427 21 26 +836 445 20 25 +655 477 28 30 +662 439 24 31 +965 392 23 29 +884 409 23 23 +634 387 23 26 +681 365 21 28 +993 369 21 28 +873 383 20 24 +767 354 20 27 +737 335 19 25 +613 365 24 29 +565 333 18 26 +680 310 16 25 +810 309 21 27 +872 323 14 23 +889 352 14 25 +934 328 14 25 +976 353 16 22 +991 345 19 25 +825 355 18 27 +625 335 21 24 +646 271 17 25 +566 276 20 22 +606 267 17 27 +687 269 19 25 +720 295 20 28 +854 292 20 25 +793 277 18 28 +830 240 21 29 +894 238 19 25 +981 270 19 24 +1011 235 13 24 +522 431 20 30 +520 349 18 28 +487 326 21 29 +897 199 17 22 +961 199 19 21 +905 159 20 25 +958 234 17 20 +971 160 19 26 +993 105 21 28 +952 24 18 24 +908 63 17 26 +872 108 16 24 +838 197 19 26 +823 166 18 26 +777 200 18 27 +710 229 21 24 +749 166 20 26 +781 126 20 21 +739 135 22 26 +663 153 18 24 +637 238 21 24 +599 159 19 27 +615 131 19 27 +757 105 19 26 +807 122 19 25 +675 134 21 26 +695 85 21 26 +630 94 18 23 +666 47 15 24 +834 47 20 25 +702 21 18 26 +721 44 18 27 +787 20 19 26 +848 15 16 22 +879 5 18 23 +909 0 16 12 +212 884 22 31 +80 892 26 32 +175 830 23 30 +133 796 21 26 +252 811 23 25 +66 835 25 30 +44 894 23 29 +38 779 23 37 +61 747 20 29 +140 875 23 31 +408 798 23 31 +330 836 20 32 +383 645 21 25 +310 644 23 27 +230 635 24 32 +35 689 25 30 +169 715 24 25 +176 600 25 30 +73 647 18 28 +84 618 20 24 +55 581 21 27 +257 572 17 27 +275 548 20 25 +123 594 16 25 +187 578 17 26 +107 536 20 27 +169 538 18 25 +148 667 19 32 +463 478 28 35 +411 455 22 29 +364 541 21 28 +358 514 23 27 +250 485 18 27 +169 469 22 29 +110 510 19 23 +30 503 19 28 +68 446 22 29 +13 455 22 23 +123 452 20 21 +190 427 23 31 +258 438 21 30 +331 471 23 31 +328 426 19 27 +405 426 26 25 +300 397 21 29 +411 389 18 24 +227 392 22 27 +148 416 22 29 +282 366 21 29 +346 392 17 23 +353 407 15 25 +413 350 24 27 +296 334 21 28 +221 337 22 27 +144 346 20 26 +12 293 23 25 +270 280 20 26 +421 284 19 25 +338 250 21 25 +500 286 18 26 +358 329 18 31 +527 177 22 25 +493 154 20 25 +463 183 19 25 +411 166 18 22 +333 157 19 25 +295 229 17 24 +546 129 19 26 +530 263 22 27 +481 111 17 22 +463 103 17 24 +405 86 18 27 +562 107 20 24 +601 55 17 25 +622 5 18 21 +542 36 21 26 +504 35 19 25 +463 38 16 23 +425 25 19 24 +254 71 20 26 +193 119 18 27 +213 97 19 26 +192 91 20 27 +206 54 17 23 +320 3 19 25 +511 78 19 25 +574 225 20 27 +458 249 17 19 +533 0 22 19 +150 172 19 27 +84 190 21 29 +1 246 20 22 +8 180 16 26 +110 126 18 24 +110 58 16 22 +48 74 19 25 +56 8 17 23 +141 43 20 23 +192 1 18 22 +226 294 17 20 +494 411 23 27 +348 709 22 26 +784 411 18 24 +929 385 14 21 +929 263 15 21 +319 84 17 20 +193 39 21 22 +97 178 19 24 +250 754 21 24 +568 460 23 26 +0 734 11 25 +20 155 23 27 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_529.jpg +846 406 76 95 +827 317 51 65 +777 290 30 34 +734 296 31 29 +634 286 49 61 +663 459 74 88 +539 349 64 83 +519 474 29 58 +458 286 51 52 +353 398 44 64 +286 284 34 35 +174 308 41 53 +66 232 33 36 +12 251 45 51 +11 310 55 72 +422 127 36 45 +198 268 29 31 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_441.jpg +823 343 29 43 +768 334 29 38 +737 331 29 40 +731 288 27 41 +648 293 28 33 +921 589 103 94 +599 228 11 12 +559 228 7 13 +555 281 25 30 +462 277 24 31 +402 278 25 35 +373 311 32 40 +250 304 31 42 +200 485 69 70 +132 365 41 60 +416 351 33 48 +775 11 12 12 +670 11 11 14 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_774.jpg +74 103 7 11 +91 101 7 9 +162 117 10 8 +181 112 9 10 +121 110 10 8 +112 111 9 10 +211 129 7 10 +243 131 8 8 +230 112 7 8 +256 122 5 9 +265 117 6 8 +290 129 6 9 +278 132 7 8 +313 109 8 9 +339 104 8 8 +350 99 5 9 +379 112 6 8 +386 110 7 7 +403 111 6 8 +442 98 9 7 +465 97 8 10 +416 108 9 9 +492 112 8 9 +510 82 11 12 +492 82 9 10 +531 70 9 14 +566 85 13 11 +591 77 10 12 +608 69 8 10 +12 96 9 13 +38 98 10 12 +64 106 8 10 +642 57 11 15 +671 53 13 16 +704 48 11 18 +758 46 10 13 +796 47 11 11 +942 327 15 16 +890 323 8 17 +849 362 10 18 +829 309 12 12 +782 300 11 12 +713 303 10 11 +790 352 13 15 +819 347 11 16 +818 377 14 15 +835 391 11 21 +799 375 16 17 +765 390 16 18 +741 359 14 12 +735 349 11 8 +716 370 17 18 +741 396 18 23 +708 417 17 20 +722 409 11 18 +661 396 16 16 +705 356 11 9 +667 307 11 14 +643 297 11 13 +635 346 11 14 +619 345 10 16 +610 373 9 12 +657 365 10 12 +627 396 11 13 +600 405 13 21 +602 445 21 24 +662 432 17 24 +649 454 14 19 +604 346 10 14 +569 474 18 27 +539 472 23 25 +567 379 12 12 +576 359 11 9 +567 344 7 10 +546 345 9 11 +523 306 7 7 +531 337 10 10 +519 351 13 11 +508 367 12 13 +505 392 17 16 +499 422 21 23 +450 437 18 24 +472 392 17 24 +480 372 13 14 +480 355 10 12 +504 349 11 12 +496 348 9 11 +476 339 7 13 +479 330 8 10 +461 333 8 11 +461 355 8 7 +452 380 13 16 +426 408 17 15 +424 389 12 11 +431 357 12 14 +441 337 8 10 +447 331 9 9 +420 340 9 11 +403 340 10 10 +412 361 11 17 +397 389 14 14 +393 411 18 17 +403 435 23 32 +349 419 16 19 +360 405 12 17 +347 370 16 16 +377 365 12 18 +378 347 11 15 +357 342 11 12 +391 328 9 12 +415 304 8 8 +370 294 7 7 +383 321 8 11 +372 338 8 10 +370 330 7 8 +350 335 9 8 +325 373 13 14 +329 353 11 11 +325 333 9 11 +348 320 8 8 +335 299 7 8 +314 323 8 11 +302 322 6 8 +302 333 9 8 +301 342 11 10 +309 363 11 13 +301 356 8 10 +302 391 10 15 +302 421 18 26 +273 403 18 21 +233 412 19 20 +249 382 14 18 +246 361 10 14 +255 343 10 16 +273 349 13 14 +281 343 8 11 +292 332 8 10 +263 325 11 11 +254 337 12 10 +242 327 9 10 +215 305 9 10 +223 331 8 11 +237 342 11 11 +231 365 12 11 +227 379 16 13 +194 373 14 19 +200 347 11 14 +210 328 6 11 +186 329 9 12 +182 348 10 11 +173 357 17 23 +184 392 16 20 +159 395 11 13 +146 367 10 15 +157 347 13 15 +158 334 11 10 +128 334 10 11 +130 353 15 15 +106 308 9 9 +111 336 7 12 +101 356 15 14 +108 378 12 13 +130 386 15 16 +105 393 8 17 +64 381 15 14 +68 367 12 13 +66 350 12 11 +59 362 12 13 +288 373 8 14 +229 354 10 11 +285 325 8 9 +336 398 14 21 +15 605 49 54 +758 377 13 12 +618 367 15 14 +275 334 10 15 +432 348 8 10 +140 114 9 12 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_865.jpg +355 352 322 422 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_102.jpg +446 170 114 168 +600 22 108 136 +132 78 116 128 +8 244 92 160 +756 204 108 140 +876 256 102 152 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_507.jpg +696 299 16 21 +685 284 14 20 +660 282 15 17 +649 277 11 16 +658 257 10 13 +630 262 10 13 +750 236 14 18 +730 275 14 16 +775 232 12 16 +763 224 10 11 +811 247 16 20 +821 358 27 35 +846 301 16 21 +877 289 13 15 +867 357 21 33 +584 251 8 12 +520 239 8 9 +552 231 10 10 +893 323 18 20 +888 305 22 25 +978 351 21 28 +1011 248 13 20 +1005 228 11 11 +961 248 13 15 +962 223 10 13 +918 228 9 11 +899 230 12 12 +879 250 9 9 +765 164 18 23 +454 242 10 11 +301 231 8 13 +328 272 13 14 +309 283 13 17 +348 262 8 12 +352 259 9 14 +381 251 9 10 +734 318 11 24 +707 313 16 24 +241 236 12 16 +282 289 12 16 +216 273 12 13 +254 304 15 17 +205 356 12 27 +127 364 35 38 +78 406 33 43 +55 251 28 35 +193 242 17 19 +380 226 6 8 +623 219 8 9 +644 251 9 9 +903 411 42 44 +151 296 14 20 +401 248 7 9 +355 220 5 7 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_644.jpg +615 466 24 33 +602 457 13 18 +667 446 13 20 +450 453 22 41 +378 468 18 30 +304 434 29 44 +783 465 9 9 +850 464 8 10 +928 454 6 15 +823 470 6 8 +807 467 9 10 +324 466 7 9 +1010 492 12 14 +877 465 8 9 +901 445 15 14 +1001 453 7 11 +683 368 18 13 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_587.jpg +122 170 8 10 +114 158 7 9 +100 182 8 10 +93 172 8 8 +57 185 10 10 +46 171 10 10 +143 156 8 8 +215 154 7 9 +206 164 6 6 +206 157 7 5 +191 164 6 7 +240 131 7 9 +239 175 9 6 +194 190 11 9 +231 191 10 11 +261 190 10 10 +277 173 9 10 +257 173 7 9 +273 167 6 10 +282 156 5 9 +291 153 6 10 +304 150 6 8 +326 155 5 8 +189 243 14 12 +158 253 13 19 +260 241 13 14 +315 239 13 14 +222 315 16 23 +274 311 18 17 +256 278 11 15 +312 260 12 16 +332 255 12 13 +346 294 13 20 +322 201 9 12 +306 175 7 10 +315 192 6 9 +303 171 8 8 +360 175 5 8 +490 177 9 9 +518 182 6 10 +491 190 8 11 +473 193 11 10 +445 181 9 8 +418 178 9 11 +422 163 4 8 +382 180 9 9 +428 160 7 8 +461 165 6 6 +479 166 8 7 +475 171 8 10 +474 132 7 7 +496 129 7 8 +531 176 8 9 +558 172 7 7 +556 187 9 12 +545 220 8 13 +492 220 9 11 +420 235 8 15 +429 225 11 13 +438 218 9 14 +418 308 18 22 +433 324 13 19 +517 252 6 13 +571 235 10 15 +927 268 10 10 +559 300 16 18 +589 303 17 16 +565 325 12 18 +650 281 16 22 +641 217 9 11 +659 202 9 13 +611 184 7 9 +600 182 6 9 +566 138 6 9 +557 139 5 7 +645 141 7 9 +571 188 6 10 +629 164 8 8 +734 156 7 11 +762 150 7 6 +774 159 6 9 +760 169 7 10 +775 174 9 10 +789 160 5 9 +837 130 5 7 +819 138 4 6 +859 131 5 6 +862 159 6 9 +909 162 9 9 +876 227 12 14 +919 234 11 13 +822 260 10 11 +765 253 11 11 +727 205 9 11 +694 142 6 10 +735 250 13 20 +771 271 12 18 +721 295 16 23 +803 324 17 20 +850 287 13 20 +904 273 15 19 +913 258 14 16 +961 238 11 13 +1006 244 12 15 +1013 267 9 13 +998 160 9 9 +1015 159 7 8 +870 259 11 14 +803 351 18 24 +613 368 13 22 +426 355 13 21 +751 218 6 8 +999 395 24 51 +835 347 13 27 +8 186 10 12 +587 135 4 4 +646 207 11 12 +542 253 10 11 +501 169 5 7 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_189.jpg +65 367 19 21 +96 383 21 24 +160 370 14 19 +33 358 10 11 +46 358 9 10 +18 363 11 11 +43 386 12 13 +107 366 9 10 +138 365 8 9 +75 357 7 8 +0 369 13 15 +3 355 10 13 +213 358 8 9 +237 357 5 7 +253 364 10 12 +276 374 13 13 +194 387 30 34 +286 338 7 8 +396 391 16 22 +326 379 13 18 +337 367 8 12 +426 367 13 17 +375 357 5 8 +303 356 5 6 +500 365 8 9 +418 359 8 9 +439 366 7 10 +808 152 33 48 +226 354 6 7 +337 359 6 6 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_807.jpg +626 72 42 57 +555 28 41 54 +409 77 50 55 +892 239 66 104 +970 187 53 65 +275 177 65 36 +138 188 41 77 +687 414 79 118 +777 96 47 69 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_468.jpg +579 235 35 46 +295 233 44 58 +168 178 51 79 +848 125 46 91 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_639.jpg +34 118 112 118 +320 86 74 94 +548 76 66 76 +722 46 58 80 +908 114 72 88 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_176.jpg +793 89 45 66 +654 204 21 27 +695 246 33 45 +775 250 26 31 +647 285 30 50 +570 212 22 24 +411 218 27 32 +498 214 23 28 +338 226 25 34 +356 223 18 22 +285 214 20 23 +216 270 28 32 +120 244 33 43 +188 211 22 29 +15 233 25 33 +167 77 18 27 +763 236 18 28 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_574.jpg +920 196 11 13 +926 221 16 16 +933 204 11 13 +926 209 14 12 +946 233 12 14 +955 256 13 17 +957 240 11 15 +865 242 18 19 +872 215 16 18 +876 203 14 13 +820 189 12 10 +824 209 13 19 +834 200 12 16 +792 197 9 9 +772 207 12 13 +802 237 19 19 +528 207 11 13 +592 232 12 14 +626 220 13 14 +674 205 10 13 +533 244 15 20 +551 234 16 20 +455 219 14 19 +435 216 15 20 +482 259 20 25 +469 246 15 17 +494 224 14 17 +514 208 12 17 +509 237 13 19 +352 236 20 21 +357 259 16 21 +398 255 15 19 +411 241 13 18 +399 276 20 24 +350 303 22 28 +303 259 16 22 +263 299 22 30 +129 54 17 33 +304 134 12 18 +377 153 9 10 +199 292 21 26 +119 295 23 26 +50 335 38 65 +801 265 22 28 +783 255 20 29 +785 207 10 15 +883 196 7 6 +971 290 28 31 +956 334 34 43 +852 297 26 34 +727 239 17 23 +751 220 16 19 +737 229 14 20 +736 289 28 32 +714 322 30 37 +688 262 18 23 +707 251 17 22 +639 287 23 28 +708 194 11 11 +693 195 10 11 +703 183 10 9 +668 196 9 9 +634 190 9 11 +643 194 11 15 +658 208 11 14 +613 201 12 13 +604 192 10 13 +587 195 12 13 +566 200 12 11 +569 214 12 13 +556 201 10 17 +545 224 12 14 +664 188 8 8 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_702.jpg +43 171 30 25 +119 214 28 24 +19 195 31 27 +0 214 19 30 +1 301 45 35 +330 133 18 17 +345 157 19 23 +359 168 21 26 +375 185 24 29 +408 201 31 35 +417 237 37 47 +442 295 54 63 +401 142 17 21 +374 128 16 19 +480 128 14 20 +509 134 14 20 +525 141 19 24 +543 159 21 26 +578 178 24 30 +571 125 16 19 +533 125 14 14 +623 191 29 39 +617 139 22 24 +630 119 13 16 +657 122 15 19 +685 125 18 19 +668 148 24 30 +679 256 41 51 +698 109 13 13 +713 131 19 22 +751 114 14 16 +752 70 6 8 +759 72 5 7 +772 75 4 7 +792 75 5 8 +801 77 9 11 +802 72 7 7 +782 110 12 16 +812 96 11 12 +879 109 14 16 +834 120 15 17 +859 128 21 23 +913 120 20 23 +897 102 13 11 +916 73 8 8 +968 69 6 8 +956 76 6 6 +972 65 5 6 +921 102 15 13 +943 112 19 20 +944 145 24 23 +888 165 39 33 +822 151 29 32 +957 180 49 40 +984 130 24 23 +998 103 15 16 +979 100 16 13 +805 122 17 22 +769 132 23 31 +717 163 28 32 +791 185 38 41 +887 245 51 48 +1013 113 11 16 +747 321 60 65 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_385.jpg +735 101 10 13 +702 111 10 13 +832 87 26 38 +790 139 14 16 +774 145 13 18 +759 146 13 16 +746 146 8 12 +734 138 13 14 +715 170 24 33 +664 160 17 32 +655 166 15 26 +644 156 13 19 +616 148 14 18 +600 146 12 15 +661 130 9 10 +767 215 43 48 +801 178 26 35 +786 171 15 14 +832 220 41 44 +662 145 9 10 +651 142 9 9 +959 167 20 21 +911 165 16 17 +1008 167 15 26 +433 102 8 11 +385 149 12 16 +452 148 14 15 +522 102 10 12 +575 109 11 11 +361 154 15 23 +297 167 18 29 +280 181 22 33 +159 208 31 46 +72 221 38 57 +31 269 50 58 +716 109 9 11 +940 250 58 75 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_375.jpg +766 136 84 174 +468 200 100 116 +404 132 68 120 +228 182 104 108 +74 208 72 86 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_560.jpg +671 414 25 31 +641 398 22 27 +626 395 17 23 +608 413 21 30 +560 402 25 29 +542 388 22 23 +526 414 25 35 +488 401 21 28 +496 383 21 28 +461 397 18 26 +411 387 21 24 +442 414 25 31 +399 407 22 32 +357 394 19 22 +359 420 19 31 +324 427 35 40 +757 429 39 53 +743 396 23 27 +701 416 18 21 +811 385 25 24 +830 439 43 52 +793 413 17 23 +835 295 19 26 +236 408 27 32 +201 419 26 35 +77 460 37 31 +87 499 41 53 +0 462 35 48 +183 524 36 36 +183 574 58 73 +102 681 21 81 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_663.jpg +131 481 11 17 +100 478 14 17 +79 480 10 14 +81 510 12 13 +57 510 15 17 +37 506 13 18 +32 527 10 16 +2 535 13 18 +0 500 9 15 +29 479 8 11 +102 514 15 14 +241 215 50 68 +973 273 7 8 +999 269 6 8 +1010 269 7 9 +870 308 4 6 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_339.jpg +615 121 28 45 +456 119 29 37 +278 137 32 49 +165 149 41 53 +137 211 33 58 +675 146 34 59 +720 185 37 57 +894 253 90 119 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_206.jpg +51 192 16 19 +139 186 18 23 +91 216 12 21 +87 328 43 56 +185 370 50 80 +272 191 13 20 +313 187 18 19 +302 277 37 45 +477 255 33 43 +615 238 33 43 +802 219 35 41 +874 310 36 74 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_26.jpg +780 265 17 25 +182 233 9 10 +268 200 8 9 +526 221 8 11 +157 305 18 29 +167 233 10 14 +77 330 25 39 +806 277 17 23 +828 291 19 29 +761 261 8 16 +749 252 12 18 +727 249 12 18 +703 245 8 14 +691 245 10 13 +673 237 11 14 +654 237 10 13 +633 234 8 13 +612 229 8 10 +644 232 7 11 +973 434 8 9 +908 358 33 38 +863 318 15 27 +459 226 9 13 +444 231 9 12 +422 234 9 11 +403 238 8 11 +378 239 10 11 +348 242 11 14 +341 240 11 15 +309 251 13 18 +294 249 11 16 +271 253 13 20 +249 260 16 21 +219 272 18 25 +205 280 15 26 +178 281 20 30 +249 227 9 10 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_406.jpg +531 215 43 62 +363 219 58 77 +251 262 69 92 +116 252 80 99 +707 248 79 90 +870 250 90 126 +280 124 28 31 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_71.jpg +454 138 35 49 +348 142 40 50 +304 166 49 67 +604 142 45 58 +739 153 41 68 +780 164 42 57 +773 210 77 85 +839 323 71 154 +19 56 40 43 +232 205 36 74 +75 269 75 88 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_108.jpg +658 159 47 63 +884 354 40 54 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_349.jpg +190 177 74 117 +144 36 21 26 +631 60 17 25 +668 46 16 21 +696 63 21 21 +730 59 15 19 +607 53 17 15 +586 52 14 21 +525 50 18 24 +475 50 14 18 +428 39 15 21 +826 54 16 22 +856 56 15 17 +894 53 12 12 +986 53 11 14 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_573.jpg +782 168 62 126 +536 194 96 110 +186 244 92 110 +74 138 94 178 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_223.jpg +314 14 168 256 +612 516 60 140 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_633.jpg +464 240 186 214 +708 378 148 174 +178 494 162 132 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_250.jpg +347 168 35 50 +224 199 38 49 +101 118 42 61 +467 181 35 53 +597 142 39 53 +702 160 39 56 +859 130 52 64 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_320.jpg +434 80 182 230 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_287.jpg +565 227 47 61 +730 370 37 55 +314 400 43 70 +731 508 21 53 +3 573 42 67 +# 11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_295.jpg +505 235 11 19 +440 219 19 22 +607 373 52 61 +593 312 18 34 +491 333 22 41 +691 292 21 28 +901 312 20 29 +800 299 22 28 +789 322 23 32 +134 374 36 53 +233 340 26 32 +109 464 78 88 +51 332 40 57 +809 549 68 116 +# 12--Group/12_Group_Group_12_Group_Group_12_912.jpg +216 83 44 53 +302 113 40 51 +408 137 35 49 +532 139 40 48 +646 103 36 46 +698 174 36 52 +829 66 39 57 +617 235 40 56 +489 240 38 56 +381 239 39 59 +287 187 39 57 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_536.jpg +480 130 68 72 +612 164 86 80 +842 50 60 84 +# 12--Group/12_Group_Group_12_Group_Group_12_28.jpg +72 21 37 49 +168 36 33 47 +108 68 36 48 +128 121 43 61 +211 100 34 54 +233 78 36 46 +178 195 35 50 +259 147 40 54 +244 256 45 54 +302 133 36 48 +385 194 41 58 +462 230 31 49 +520 254 36 50 +516 59 28 43 +550 109 39 50 +612 94 41 59 +617 50 39 34 +665 56 35 52 +704 81 41 60 +791 75 38 55 +801 128 45 63 +905 110 43 54 +919 69 40 47 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_103.jpg +104 150 43 51 +9 211 22 52 +311 188 42 53 +296 161 19 32 +321 122 13 28 +361 133 20 25 +399 152 23 27 +448 146 11 23 +406 205 24 42 +501 186 41 53 +509 128 19 25 +534 174 26 36 +638 143 27 29 +668 127 18 27 +739 130 21 29 +821 167 23 33 +923 181 17 30 +915 295 55 62 +646 209 32 71 +491 308 73 116 +773 187 34 46 +705 201 35 53 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_354.jpg +23 304 79 138 +211 362 73 94 +283 326 54 91 +353 331 56 79 +444 370 39 50 +656 367 44 84 +819 178 205 506 +795 394 82 157 +778 374 44 71 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_789.jpg +299 162 51 68 +677 463 45 53 +519 595 54 76 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_575.jpg +195 233 57 67 +277 184 61 70 +342 273 57 69 +430 206 58 64 +489 297 56 70 +529 137 56 57 +624 247 62 71 +694 196 52 61 +770 264 58 63 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_602.jpg +256 166 142 214 +398 259 151 190 +533 199 117 126 +632 202 166 145 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_213.jpg +176 411 32 29 +245 304 17 21 +282 319 17 19 +292 285 15 17 +403 295 12 11 +425 321 16 13 +354 362 29 28 +484 339 24 42 +472 299 17 20 +547 310 12 11 +580 314 18 22 +586 285 15 16 +669 278 11 17 +698 311 15 18 +739 277 10 14 +762 306 12 15 +841 355 26 25 +851 266 16 24 +883 279 14 17 +942 318 25 21 +729 383 20 16 +0 376 22 49 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_403.jpg +436 334 22 28 +359 410 22 28 +317 333 22 25 +232 396 22 30 +185 290 21 27 +603 263 20 25 +752 264 21 27 +881 212 22 27 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_101.jpg +259 386 52 47 +910 509 20 30 +849 510 24 28 +836 519 16 29 +791 523 23 26 +887 486 11 15 +578 502 21 24 +606 513 25 36 +671 496 19 25 +690 506 22 36 +763 509 16 26 +739 512 22 30 +779 522 11 15 +282 1139 43 34 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_244.jpg +146 134 138 116 +398 68 128 156 +614 104 130 210 +# 12--Group/12_Group_Group_12_Group_Group_12_367.jpg +694 354 27 31 +652 324 25 29 +609 294 25 29 +635 269 23 24 +560 284 22 23 +565 326 24 27 +546 370 27 32 +517 341 26 28 +457 358 28 28 +437 335 23 26 +422 286 26 27 +469 284 22 23 +518 287 27 28 +393 346 26 27 +383 273 22 25 +351 297 22 25 +291 265 22 25 +287 310 19 25 +213 299 26 29 +199 262 24 25 +202 343 26 27 +258 367 28 33 +322 366 28 32 +626 358 25 31 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_527.jpg +324 408 93 117 +712 408 78 129 +# 12--Group/12_Group_Group_12_Group_Group_12_101.jpg +166 436 51 74 +350 207 51 63 +321 140 52 62 +479 128 50 59 +520 186 49 68 +661 133 56 61 +730 227 51 59 +829 140 52 70 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_849.jpg +779 338 17 26 +650 355 11 18 +433 301 12 21 +399 324 14 19 +212 222 11 12 +# 12--Group/12_Group_Group_12_Group_Group_12_843.jpg +387 504 99 97 +80 453 120 180 +164 239 4 5 +157 241 4 5 +142 240 5 6 +204 235 5 6 +233 237 5 5 +236 236 4 5 +256 237 6 6 +68 238 6 6 +282 235 5 7 +583 190 10 14 +620 203 18 19 +603 210 12 12 +634 193 9 12 +673 267 14 15 +545 224 13 16 +511 228 10 15 +421 239 14 16 +295 488 87 66 +222 494 74 39 +# 12--Group/12_Group_Group_12_Group_Group_12_198.jpg +174 422 68 88 +351 396 80 97 +491 414 66 86 +667 425 63 88 +847 411 68 86 +# 12--Group/12_Group_Group_12_Group_Group_12_84.jpg +119 129 49 63 +281 76 45 64 +331 111 49 71 +432 112 53 68 +524 75 48 65 +369 302 57 74 +178 307 55 57 +577 286 60 89 +606 126 50 71 +823 102 62 80 +795 316 78 95 +# 12--Group/12_Group_Group_12_Group_Group_12_519.jpg +567 199 40 47 +711 225 37 41 +823 204 29 39 +904 207 29 37 +166 348 20 24 +826 98 10 11 +803 69 8 8 +761 73 10 13 +721 83 11 11 +708 48 11 15 +823 45 10 11 +852 90 11 13 +450 209 39 49 +# 12--Group/12_Group_Group_12_Group_Group_12_10.jpg +83 107 67 84 +246 114 58 79 +377 148 56 59 +542 100 51 71 +682 119 56 73 +772 88 54 83 +861 207 43 41 +912 135 62 78 +78 303 73 84 +289 338 63 83 +553 398 63 70 +670 326 60 86 +853 332 81 106 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_448.jpg +298 152 260 244 +142 422 264 234 +590 110 188 238 +502 408 194 270 +728 272 210 232 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_889.jpg +592 50 78 110 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_64.jpg +858 317 70 100 +776 242 32 51 +654 273 56 76 +535 239 41 54 +244 268 41 54 +109 318 54 67 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_353.jpg +433 11 447 523 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_313.jpg +640 116 54 84 +# 12--Group/12_Group_Group_12_Group_Group_12_935.jpg +144 81 20 28 +210 92 15 25 +217 63 18 21 +92 85 19 25 +32 88 18 25 +19 107 20 26 +8 159 19 26 +53 192 19 25 +63 155 20 27 +122 154 19 26 +111 184 18 25 +183 177 18 21 +172 185 14 22 +222 160 17 23 +233 196 19 26 +300 155 19 24 +247 135 15 19 +265 110 16 21 +305 130 18 19 +357 148 20 28 +365 116 18 27 +367 90 19 24 +328 60 19 23 +396 58 17 29 +401 20 17 27 +339 25 19 26 +365 2 18 22 +285 32 19 21 +296 0 15 10 +215 2 16 19 +190 0 17 18 +135 0 18 13 +173 25 12 13 +125 14 16 27 +56 33 19 20 +1 48 16 24 +0 29 12 13 +9 3 14 12 +62 0 21 15 +300 192 16 23 +356 195 21 18 +412 166 16 22 +387 205 16 19 +407 198 13 18 +263 256 17 18 +136 118 18 26 +36 218 20 23 +13 228 14 18 +718 31 19 25 +789 31 19 16 +824 71 19 19 +804 5 17 13 +743 4 17 14 +682 5 20 25 +626 4 19 22 +507 4 25 15 +455 5 20 21 +441 59 18 24 +459 57 16 16 +501 65 18 22 +553 71 17 17 +614 60 19 29 +664 60 17 25 +822 91 19 28 +772 130 19 22 +719 131 17 24 +654 113 21 28 +667 91 19 24 +698 150 16 26 +650 161 17 21 +595 137 19 15 +593 158 17 24 +532 168 19 21 +535 126 20 21 +549 92 17 24 +487 95 18 22 +484 116 20 21 +427 116 19 22 +422 95 17 21 +470 157 18 28 +461 189 18 25 +515 184 19 27 +570 221 18 22 +618 218 18 25 +755 176 20 26 +819 178 20 26 +688 188 21 23 +636 192 17 24 +571 201 16 21 +687 218 21 25 +736 232 17 20 +741 256 20 27 +800 231 17 22 +1004 85 19 27 +1015 48 9 26 +1016 1 8 14 +967 28 17 28 +912 29 19 18 +927 1 16 17 +861 1 20 14 +853 22 18 26 +904 60 18 24 +890 88 17 26 +954 94 14 20 +957 67 17 20 +945 113 20 25 +881 117 21 24 +937 149 15 20 +981 151 16 25 +924 221 18 22 +847 222 20 25 +989 199 20 20 +988 248 20 23 +977 225 18 22 +925 243 20 26 +42 272 15 17 +77 294 18 18 +28 294 16 21 +136 279 18 25 +205 289 18 24 +180 316 19 26 +236 319 19 24 +225 347 22 24 +155 353 20 24 +117 317 16 22 +70 318 19 26 +100 354 18 28 +148 388 20 26 +37 350 20 27 +39 392 20 26 +43 423 21 26 +21 456 21 25 +78 494 17 21 +115 486 17 25 +169 537 20 25 +114 535 17 24 +48 528 21 23 +20 564 20 25 +85 573 18 25 +153 565 21 24 +222 527 20 27 +234 493 20 22 +242 452 20 25 +262 410 21 30 +281 388 20 24 +305 284 18 26 +355 278 20 25 +431 292 19 22 +491 294 18 18 +556 285 20 25 +615 286 18 22 +655 311 14 25 +597 319 19 20 +542 319 22 24 +476 325 18 18 +422 333 17 17 +480 344 16 30 +535 354 20 26 +596 349 20 27 +631 380 19 27 +623 427 18 25 +574 392 20 28 +522 390 20 25 +562 423 20 24 +613 458 20 25 +296 325 20 20 +358 325 20 20 +356 349 21 29 +411 348 20 28 +398 381 21 26 +381 416 20 29 +444 408 18 24 +466 385 18 27 +514 429 18 18 +510 456 18 27 +562 461 18 23 +614 495 20 28 +543 489 20 27 +499 496 20 21 +435 491 20 25 +433 461 20 20 +361 483 20 28 +463 532 20 24 +532 526 20 26 +600 529 22 26 +412 527 18 22 +294 494 20 25 +317 456 18 25 +320 427 18 20 +344 383 18 28 +220 382 19 28 +182 440 13 13 +219 568 20 26 +279 563 20 24 +323 538 20 22 +340 560 19 26 +350 554 17 20 +404 558 21 27 +464 562 21 28 +530 568 19 25 +588 565 20 24 +580 608 18 26 +522 611 20 22 +327 597 18 24 +275 601 19 21 +203 602 21 24 +250 632 22 26 +325 620 22 25 +382 613 20 22 +434 594 22 25 +512 628 21 26 +569 642 19 27 +632 610 23 23 +76 602 18 25 +144 599 21 28 +194 640 18 23 +172 666 21 26 +122 632 20 28 +62 637 21 27 +242 665 20 26 +375 658 22 28 +388 640 18 22 +443 649 20 23 +554 672 20 20 +632 640 21 29 +625 664 20 30 +114 678 19 19 +53 679 21 22 +25 605 18 28 +285 348 21 25 +960 353 21 23 +941 377 22 25 +898 363 20 27 +890 321 23 23 +902 291 17 21 +834 317 18 24 +830 351 20 25 +778 351 17 21 +773 322 17 23 +783 289 19 21 +838 286 19 21 +793 267 22 13 +708 286 17 23 +718 351 20 23 +760 377 22 30 +832 385 18 25 +873 425 24 28 +943 422 22 31 +1010 389 14 26 +1005 462 19 33 +1014 330 10 19 +972 291 21 22 +951 318 19 25 +869 463 21 25 +936 469 23 26 +977 495 21 23 +975 524 22 28 +913 531 21 25 +919 499 22 30 +861 488 19 25 +809 456 20 26 +816 429 22 27 +761 418 22 30 +739 455 19 27 +687 456 18 23 +689 430 22 19 +700 390 21 25 +725 510 20 28 +746 497 21 23 +792 492 21 23 +678 491 22 27 +721 557 20 29 +780 532 21 20 +846 530 20 23 +904 557 23 30 +962 570 20 22 +950 615 21 25 +894 608 20 21 +895 636 20 25 +830 610 21 21 +843 556 19 26 +781 562 23 26 +770 609 20 20 +696 592 23 33 +763 641 20 23 +807 665 22 22 +828 646 21 18 +888 666 23 22 +751 664 23 25 +694 649 26 20 +685 671 19 24 +947 688 18 18 +1000 681 13 22 +660 542 21 20 +647 558 23 31 +555 258 18 21 +615 260 18 16 +489 260 20 23 +322 252 18 22 +667 358 20 22 +1008 123 16 22 +0 490 16 23 +306 94 18 24 +607 92 17 20 +507 659 20 24 +303 662 22 29 +0 637 18 26 +0 669 12 29 +0 187 10 21 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_650.jpg +282 238 88 110 +498 206 86 130 +810 172 78 108 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_1007.jpg +977 196 23 27 +680 246 51 50 +530 202 41 42 +635 210 41 47 +605 224 17 20 +765 241 35 51 +365 230 42 54 +447 201 35 39 +344 169 31 36 +260 151 31 38 +226 198 36 54 +108 143 31 37 +156 156 17 17 +4 160 30 32 +# 12--Group/12_Group_Group_12_Group_Group_12_59.jpg +112 87 27 32 +172 106 27 41 +275 86 19 24 +216 195 31 33 +240 237 31 65 +745 172 23 35 +806 145 26 40 +523 89 19 29 +592 121 25 35 +415 84 19 24 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_171.jpg +848 236 19 23 +934 208 7 8 +724 231 15 22 +644 227 15 23 +649 223 23 28 +606 245 16 23 +579 234 26 32 +611 224 6 6 +488 247 29 31 +411 255 27 32 +354 289 30 31 +96 181 48 67 +187 259 33 26 +283 246 29 31 +0 319 17 32 +# 12--Group/12_Group_Group_12_Group_Group_12_247.jpg +788 351 44 75 +805 208 50 62 +529 141 34 39 +403 119 33 38 +120 178 49 65 +133 144 35 42 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_283.jpg +70 233 20 29 +96 260 19 22 +86 254 8 9 +108 236 20 22 +136 236 16 21 +148 251 16 22 +177 226 14 18 +176 259 21 26 +135 332 26 34 +63 318 27 33 +205 239 17 21 +215 227 18 24 +232 255 24 31 +249 233 14 18 +264 247 21 27 +282 230 17 23 +306 231 16 18 +317 220 13 19 +302 248 17 24 +306 264 20 27 +234 344 25 34 +296 347 22 29 +360 338 23 32 +441 327 23 31 +334 239 17 23 +331 258 17 27 +354 262 19 26 +362 232 16 20 +359 242 19 26 +386 227 18 25 +391 258 20 23 +413 244 17 22 +432 243 22 33 +479 213 20 25 +468 232 19 22 +460 249 19 27 +478 266 22 29 +521 259 16 25 +524 244 18 21 +522 222 18 20 +541 249 12 20 +559 238 21 30 +588 235 19 29 +614 232 19 28 +626 257 19 28 +519 348 20 26 +574 356 21 29 +634 243 14 21 +645 233 13 17 +661 243 11 12 +667 233 19 23 +654 258 19 25 +671 270 20 26 +701 230 14 19 +707 242 20 24 +736 229 18 25 +734 283 19 27 +952 264 19 23 +891 265 20 28 +845 246 21 28 +826 246 19 27 +801 241 18 24 +768 242 20 28 +672 355 21 28 +# 12--Group/12_Group_Group_12_Group_Group_12_268.jpg +217 212 36 38 +105 247 35 45 +305 244 29 36 +367 209 36 41 +809 398 41 44 +707 388 39 43 +913 323 38 44 +895 214 38 43 +853 231 35 41 +807 226 30 38 +757 214 34 41 +725 268 37 38 +702 194 32 39 +625 226 38 41 +570 228 35 39 +603 204 29 40 +486 197 33 41 +443 222 36 38 +458 330 37 46 +587 368 38 42 +341 360 43 51 +248 352 33 42 +154 351 43 45 +150 268 31 34 +240 94 27 23 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_107.jpg +78 213 46 55 +160 203 37 47 +231 174 35 48 +301 203 33 40 +345 213 30 40 +410 209 31 32 +431 240 33 42 +466 203 29 34 +510 204 34 42 +549 192 27 39 +590 206 34 43 +618 182 27 32 +645 215 29 39 +701 193 30 42 +759 199 31 44 +838 192 36 47 +717 372 39 50 +637 312 34 45 +545 295 34 46 +469 381 34 41 +385 317 37 45 +297 326 36 43 +187 378 42 54 +# 12--Group/12_Group_Group_12_Group_Group_12_759.jpg +68 114 58 120 +248 184 64 106 +378 114 62 96 +542 190 74 102 +832 130 70 120 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_644.jpg +148 152 210 274 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_186.jpg +68 157 31 39 +14 180 31 39 +113 173 27 37 +146 189 29 39 +163 147 27 33 +191 175 29 36 +223 203 28 38 +252 148 24 29 +282 171 24 28 +268 197 27 33 +326 155 25 36 +364 158 24 29 +401 143 25 38 +367 193 26 33 +296 270 27 35 +510 101 23 29 +575 111 21 29 +448 161 22 31 +496 166 23 30 +460 189 28 33 +515 194 25 32 +583 151 25 36 +559 196 26 34 +601 187 25 32 +624 205 26 30 +723 143 22 28 +762 156 25 29 +675 156 22 28 +719 177 23 30 +674 195 27 33 +696 219 28 34 +754 226 28 35 +979 310 11 16 +979 286 8 10 +971 339 30 43 +899 195 29 32 +824 199 30 36 +860 323 31 42 +716 354 29 35 +658 368 26 36 +598 361 28 37 +530 345 28 34 +470 309 28 35 +340 314 27 30 +396 370 27 34 +447 394 31 37 +200 361 33 37 +309 381 29 37 +# 12--Group/12_Group_Group_12_Group_Group_12_62.jpg +189 143 47 67 +257 129 45 63 +341 154 49 63 +399 121 42 55 +467 172 46 64 +556 141 42 53 +650 133 53 69 +744 105 45 48 +802 120 52 69 +# 12--Group/12_Group_Group_12_Group_Group_12_407.jpg +661 249 48 54 +589 373 47 51 +545 267 45 44 +427 246 47 54 +350 277 47 48 +250 272 45 55 +141 293 53 61 +# 12--Group/12_Group_Group_12_Group_Group_12_578.jpg +525 250 77 85 +746 372 85 65 +300 258 54 53 +# 12--Group/12_Group_Group_12_Group_Group_12_794.jpg +0 289 12 30 +384 157 28 47 +477 134 20 50 +582 42 56 54 +652 112 32 59 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_551.jpg +484 66 92 124 +274 12 80 88 +# 12--Group/12_Group_Group_12_Group_Group_12_29.jpg +79 201 90 132 +262 211 70 96 +510 197 66 87 +801 206 86 101 +933 206 58 73 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_707.jpg +209 161 37 39 +300 167 30 33 +388 167 33 38 +280 262 30 40 +389 275 30 38 +491 286 34 39 +599 279 42 45 +720 307 35 40 +791 125 43 49 +684 154 32 40 +607 146 30 34 +502 144 31 40 +177 276 43 42 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_211.jpg +64 149 51 61 +208 162 47 59 +342 174 41 55 +446 194 44 59 +670 184 44 59 +773 183 45 59 +920 216 45 53 +291 108 27 45 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_771.jpg +183 234 366 417 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_855.jpg +484 445 14 15 +672 281 29 29 +721 249 29 31 +746 282 24 30 +802 386 29 28 +805 280 32 34 +888 243 31 31 +648 341 10 19 +662 348 13 16 +678 342 15 19 +789 355 15 20 +808 353 18 20 +828 346 20 25 +888 320 12 22 +902 317 15 24 +918 316 15 22 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_412.jpg +234 203 35 52 +142 224 38 49 +116 195 34 41 +4 208 41 44 +207 213 24 32 +387 222 38 49 +329 219 37 47 +462 251 32 42 +589 230 27 35 +660 227 34 36 +555 236 32 42 +496 232 34 46 +496 176 30 34 +603 341 36 43 +578 405 43 54 +966 139 34 49 +887 228 38 43 +761 185 31 36 +781 61 30 37 +705 203 36 41 +907 217 33 39 +810 230 35 32 +# 12--Group/12_Group_Group_12_Group_Group_12_522.jpg +885 295 47 49 +793 288 43 47 +722 298 41 45 +684 235 42 40 +820 248 38 46 +602 267 37 43 +492 254 43 47 +504 394 45 49 +358 405 46 49 +372 302 38 47 +398 230 34 44 +301 233 38 41 +186 229 43 50 +112 230 42 48 +263 297 41 45 +653 400 46 51 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_759.jpg +214 300 96 126 +458 288 86 122 +708 296 80 104 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_653.jpg +73 175 45 47 +200 185 37 48 +240 367 45 45 +328 312 36 42 +316 234 33 41 +386 295 34 41 +426 394 43 47 +526 328 32 36 +594 382 41 46 +653 336 38 42 +770 365 43 49 +800 197 32 40 +847 198 31 37 +949 383 45 45 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_162.jpg +146 98 64 92 +244 130 58 92 +310 140 60 86 +456 110 60 86 +514 140 64 84 +672 130 54 72 +768 120 56 76 +846 104 58 88 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_67.jpg +304 30 150 182 +616 204 138 200 +# 12--Group/12_Group_Group_12_Group_Group_12_379.jpg +799 244 32 39 +732 223 35 36 +729 134 27 31 +664 122 31 36 +631 234 34 36 +637 180 28 33 +580 138 29 30 +614 113 27 31 +522 113 29 35 +533 169 35 34 +535 237 33 37 +481 120 30 37 +429 157 34 37 +425 249 33 40 +331 247 33 38 +358 157 31 34 +382 118 28 33 +416 100 29 31 +284 112 28 34 +294 171 31 33 +225 171 28 36 +188 103 34 39 +156 191 30 31 +157 246 30 38 +79 236 34 36 +241 238 32 37 +895 229 31 42 +869 173 31 35 +845 119 31 34 +791 109 29 31 +763 154 30 37 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_504.jpg +418 108 98 104 +172 232 92 82 +710 160 84 100 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_228.jpg +111 411 15 19 +155 414 16 20 +202 409 16 21 +338 412 16 20 +294 405 15 23 +248 411 16 21 +394 412 15 20 +430 415 15 21 +483 396 17 22 +526 407 16 22 +565 406 15 19 +608 411 15 23 +655 418 16 22 +464 503 13 15 +700 416 16 21 +744 407 16 23 +782 410 16 22 +907 414 16 21 +870 423 15 17 +828 408 17 21 +308 478 14 16 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_330.jpg +96 63 75 97 +286 98 65 90 +384 101 60 77 +465 78 56 75 +578 72 63 70 +851 76 67 89 +808 46 59 74 +694 127 59 73 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_685.jpg +156 1023 75 56 +258 1008 47 54 +421 988 64 64 +535 995 49 58 +649 988 61 68 +399 165 67 75 +904 572 21 26 +961 570 20 26 +947 620 21 26 +1009 683 14 24 +852 862 28 31 +754 866 29 29 +642 875 22 20 +803 773 21 26 +752 718 22 23 +706 666 20 29 +662 616 15 16 +592 614 21 21 +519 614 19 19 +622 659 22 27 +565 654 24 25 +483 658 23 20 +537 718 21 30 +602 718 25 28 +681 723 21 23 +727 759 28 29 +645 778 28 29 +565 771 26 27 +495 870 22 26 +395 766 23 28 +436 693 17 24 +360 648 20 25 +281 648 24 23 +327 765 21 25 +304 863 26 25 +164 768 26 28 +76 757 24 28 +89 636 24 28 +26 643 21 26 +130 531 18 21 +190 602 21 17 +161 480 19 24 +456 612 18 28 +560 901 20 21 +129 710 16 15 +55 742 16 26 +109 734 14 20 +98 689 22 24 +119 749 15 21 +71 714 10 16 +270 627 11 17 +213 693 13 14 +335 688 9 18 +# 12--Group/12_Group_Group_12_Group_Group_12_735.jpg +537 248 101 138 +764 228 41 48 +877 261 29 29 +816 387 94 104 +165 199 100 153 +213 300 92 113 +675 247 29 47 +# 12--Group/12_Group_Group_12_Group_Group_12_434.jpg +550 189 31 40 +813 214 44 61 +408 184 44 48 +158 214 35 68 +171 184 41 52 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_442.jpg +248 207 19 27 +200 189 21 27 +28 225 23 23 +526 186 19 30 +455 214 17 25 +379 209 19 29 +668 207 25 31 +760 187 19 25 +554 204 23 28 +972 218 24 26 +808 170 19 31 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_212.jpg +268 359 84 103 +685 375 81 110 +475 714 110 123 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_551.jpg +445 215 143 173 +# 12--Group/12_Group_Group_12_Group_Group_12_331.jpg +695 139 48 57 +771 444 48 56 +570 406 45 49 +423 353 49 53 +328 529 48 51 +208 384 54 60 +# 12--Group/12_Group_Group_12_Group_Group_12_823.jpg +214 335 45 64 +358 324 43 50 +483 305 39 53 +612 310 41 54 +713 296 37 61 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_778.jpg +62 285 23 27 +107 283 26 29 +170 287 25 27 +226 284 25 26 +287 296 24 24 +177 215 21 23 +235 223 20 25 +277 231 20 23 +330 230 19 23 +344 293 23 25 +405 312 23 25 +465 296 25 29 +518 297 24 28 +384 231 20 22 +440 228 22 26 +494 225 20 25 +555 231 22 23 +611 233 21 23 +653 230 22 27 +586 302 23 28 +646 301 24 29 +709 304 26 30 +781 306 26 28 +163 96 21 22 +211 54 18 22 +230 100 22 23 +252 80 19 21 +285 66 18 21 +295 104 20 23 +318 76 19 23 +346 57 18 23 +370 82 21 29 +389 70 18 24 +413 58 18 23 +435 87 20 24 +466 66 19 26 +499 93 22 23 +512 61 21 26 +539 69 21 24 +580 63 19 22 +581 100 21 27 +596 79 17 23 +622 94 21 22 +642 81 18 22 +659 75 17 21 +663 92 21 25 +689 79 19 22 +705 57 20 23 +716 92 21 21 +694 123 21 28 +718 122 24 28 +747 93 23 28 +742 71 21 24 +766 119 22 27 +789 95 20 24 +807 79 18 23 +805 117 23 25 +818 144 24 25 +831 101 21 24 +848 133 23 28 +877 137 23 23 +904 141 24 26 +931 153 31 33 +892 168 25 27 +849 193 27 27 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_550.jpg +64 60 42 60 +224 53 43 57 +398 57 44 55 +589 58 42 61 +753 60 39 58 +945 52 36 63 +904 514 36 56 +737 517 45 51 +579 518 42 63 +417 517 38 51 +231 516 40 57 +69 522 41 58 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_458.jpg +256 370 43 43 +134 226 43 43 +277 234 38 35 +306 205 30 34 +367 230 35 36 +473 187 28 29 +456 220 33 35 +405 213 30 34 +697 355 36 38 +574 272 32 36 +798 221 38 39 +732 216 34 35 +670 224 26 33 +628 213 29 34 +545 207 28 33 +912 212 35 37 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_500.jpg +230 192 94 86 +420 136 116 158 +588 154 112 130 +896 170 100 148 +942 78 80 116 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_662.jpg +397 207 20 29 +265 220 18 22 +231 234 12 16 +552 300 30 19 +445 272 25 26 +802 302 44 39 +986 207 38 45 +28 206 13 35 +446 400 15 25 +356 247 10 11 +119 251 5 7 +# 12--Group/12_Group_Group_12_Group_Group_12_179.jpg +895 251 55 65 +785 261 55 63 +722 201 54 58 +636 250 56 63 +493 208 56 66 +446 145 50 56 +356 235 47 60 +308 205 44 49 +270 162 37 46 +225 240 55 62 +143 202 48 53 +84 229 63 58 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_196.jpg +875 86 49 61 +805 141 41 58 +615 253 35 46 +542 193 35 47 +475 317 32 42 +404 280 33 44 +309 194 37 47 +232 195 36 51 +164 192 41 51 +102 107 53 69 +716 134 45 57 +# 12--Group/12_Group_Group_12_Group_Group_12_218.jpg +850 139 51 94 +850 62 52 68 +709 66 49 57 +460 92 52 42 +221 69 56 54 +78 48 58 58 +111 231 65 50 +678 176 48 93 +# 12--Group/12_Group_Group_12_Group_Group_12_182.jpg +875 184 46 54 +797 192 53 63 +703 234 44 56 +569 216 47 56 +372 198 47 62 +500 231 47 63 +250 208 47 55 +173 242 44 56 +22 255 41 49 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_55.jpg +431 96 32 38 +288 166 37 37 +186 137 31 32 +75 108 24 36 +# 12--Group/12_Group_Group_12_Group_Group_12_38.jpg +104 77 48 59 +199 78 47 51 +272 69 46 59 +411 88 47 51 +491 89 42 51 +572 76 41 51 +723 68 34 45 +789 69 46 49 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_503.jpg +156 52 30 32 +267 106 31 37 +189 97 33 40 +227 44 28 30 +96 106 36 35 +137 175 36 42 +299 55 27 35 +27 182 42 44 +329 91 35 37 +430 191 36 42 +337 207 34 40 +242 197 34 42 +367 48 31 30 +409 77 33 37 +432 41 27 33 +490 74 34 40 +489 29 29 30 +557 44 29 29 +755 204 40 44 +724 103 33 38 +655 81 34 38 +645 207 34 40 +542 182 40 48 +574 81 36 38 +628 52 29 36 +867 201 38 43 +746 65 29 34 +687 68 30 29 +796 95 37 39 +827 66 30 32 +881 119 36 32 +884 73 29 30 +950 205 40 42 +# 12--Group/12_Group_Group_12_Group_Group_12_478.jpg +158 240 72 82 +322 274 88 112 +434 268 68 96 +556 218 70 94 +792 256 74 82 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_322.jpg +61 349 29 37 +252 352 35 50 +320 366 28 37 +516 373 36 51 +757 364 28 40 +817 396 32 48 +999 380 25 39 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_143.jpg +616 338 40 51 +803 350 32 46 +77 205 28 36 +38 226 19 35 +166 194 34 37 +255 214 34 49 +301 168 34 41 +357 251 27 49 +389 228 29 43 +464 222 34 47 +578 216 27 36 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_295.jpg +277 292 11 18 +425 314 45 63 +410 282 11 16 +433 277 9 11 +454 282 8 11 +503 303 16 21 +494 289 8 16 +485 270 9 12 +536 269 7 9 +538 296 14 17 +554 293 11 14 +578 276 10 13 +582 291 12 14 +613 293 14 18 +503 336 18 22 +473 264 6 8 +665 277 13 16 +504 269 10 11 +# 12--Group/12_Group_Group_12_Group_Group_12_144.jpg +220 62 116 162 +362 144 110 152 +512 110 126 170 +708 144 112 156 +# 12--Group/12_Group_Group_12_Group_Group_12_249.jpg +145 26 62 61 +492 573 68 69 +982 615 35 33 +939 558 32 32 +882 535 32 32 +883 484 32 30 +826 423 30 32 +767 413 25 28 +743 363 27 31 +719 336 27 27 +623 380 39 44 +588 310 39 41 +554 255 38 42 +515 208 44 39 +455 202 42 40 +467 139 36 37 +346 290 46 47 +406 394 45 58 +# 12--Group/12_Group_Group_12_Group_Group_12_732.jpg +466 252 30 44 +588 230 27 38 +698 216 31 40 +822 198 26 37 +341 173 31 33 +222 144 33 41 +# 12--Group/12_Group_Group_12_Group_Group_12_411.jpg +318 611 107 186 +216 201 107 134 +348 165 96 130 +530 130 103 147 +739 157 119 147 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_853.jpg +435 264 413 564 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_583.jpg +816 301 58 67 +921 254 39 39 +991 157 28 28 +830 227 26 30 +829 177 26 29 +0 79 27 29 +142 49 33 30 +526 278 36 55 +435 220 24 26 +393 180 19 21 +256 231 54 52 +362 76 18 21 +260 64 18 21 +703 145 21 24 +609 151 35 34 +667 196 26 29 +535 137 20 25 +481 64 21 21 +551 230 14 17 +572 223 22 28 +818 12 10 10 +759 140 17 18 +694 278 27 34 +210 307 27 29 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_852.jpg +434 176 156 218 +# 12--Group/12_Group_Group_12_Group_Group_12_123.jpg +821 36 64 97 +544 89 40 56 +649 101 29 41 +444 106 25 38 +230 68 47 64 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_60.jpg +822 75 60 62 +927 0 76 53 +757 123 48 42 +659 117 62 61 +654 187 54 67 +521 238 55 71 +594 152 40 44 +525 157 40 56 +446 177 37 36 +416 260 52 69 +197 189 50 56 +297 155 40 42 +367 173 48 51 +110 154 49 51 +18 183 53 55 +182 150 42 43 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_495.jpg +24 233 81 102 +127 227 59 72 +173 261 40 67 +344 204 69 88 +625 170 53 62 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_15.jpg +137 322 40 52 +175 378 29 41 +75 412 34 44 +242 354 37 46 +360 282 33 46 +406 242 39 43 +306 197 42 49 +215 213 41 53 +409 364 44 61 +517 254 35 52 +613 198 39 46 +634 329 36 40 +737 346 32 34 +765 266 34 45 +855 203 40 53 +860 439 31 38 +808 427 31 41 +635 440 37 49 +520 393 33 36 +567 310 29 30 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_94.jpg +139 1 29 43 +173 76 42 65 +205 111 69 93 +313 0 20 29 +330 1 24 35 +359 7 43 54 +375 62 58 77 +404 134 71 88 +647 190 62 91 +654 111 54 71 +827 105 60 73 +869 51 57 68 +698 53 43 62 +712 0 39 45 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_379.jpg +438 174 159 246 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_120.jpg +84 225 9 11 +100 227 8 10 +117 227 9 12 +139 225 7 9 +155 225 8 11 +175 223 9 12 +193 222 9 11 +78 243 10 12 +101 244 9 11 +117 245 9 12 +135 246 10 10 +155 243 10 13 +173 252 9 12 +185 250 9 10 +86 284 12 14 +103 283 10 11 +121 280 10 11 +134 275 10 14 +152 280 10 14 +174 276 9 11 +190 274 10 14 +61 271 9 12 +51 299 8 13 +75 303 11 14 +101 304 10 12 +130 303 10 12 +41 329 10 12 +54 330 11 13 +69 329 12 14 +93 328 12 16 +122 331 10 10 +27 367 11 11 +46 364 10 11 +66 366 9 10 +95 363 10 12 +30 391 9 13 +43 400 11 15 +80 392 12 14 +56 423 12 14 +86 426 12 15 +78 456 11 11 +121 453 11 12 +147 463 12 11 +111 421 13 14 +122 399 10 11 +108 397 9 11 +147 392 9 13 +157 396 12 13 +140 423 11 12 +169 425 12 11 +210 226 9 10 +231 222 9 12 +249 221 9 12 +266 219 10 13 +282 222 10 10 +201 248 8 10 +220 246 10 13 +239 247 9 12 +249 250 9 11 +211 276 10 9 +231 271 9 12 +248 270 11 12 +276 250 9 12 +293 252 8 11 +271 272 11 14 +292 271 11 15 +155 303 8 11 +171 303 9 11 +201 303 9 11 +218 300 9 13 +235 300 9 12 +152 330 9 12 +181 331 9 12 +210 328 10 12 +242 325 11 15 +112 357 10 14 +142 361 10 12 +166 358 9 13 +183 363 8 8 +203 356 11 15 +188 390 9 12 +198 396 11 15 +194 423 10 10 +222 422 11 14 +253 301 10 11 +273 297 9 13 +291 298 8 12 +306 299 9 10 +324 294 10 13 +269 324 11 14 +295 326 10 12 +335 323 11 14 +222 358 10 13 +239 358 10 12 +254 356 10 13 +267 357 11 12 +281 358 10 12 +304 357 13 10 +328 353 12 15 +226 387 12 13 +240 396 11 13 +259 387 9 10 +277 395 12 15 +249 421 11 12 +278 420 14 16 +313 399 10 14 +303 389 9 13 +338 386 10 11 +309 423 10 13 +337 428 10 12 +347 393 12 15 +368 389 12 11 +387 393 12 14 +395 419 12 14 +364 425 11 12 +337 456 10 11 +364 450 13 15 +406 449 12 16 +424 417 11 15 +412 387 9 12 +426 389 12 16 +450 384 10 13 +470 392 12 14 +451 424 10 12 +477 423 9 15 +442 452 11 12 +477 457 10 13 +187 461 10 12 +226 460 11 12 +259 456 9 11 +293 449 9 14 +301 225 10 11 +316 223 9 10 +336 223 9 10 +360 221 10 13 +377 221 9 12 +395 222 7 10 +308 250 10 11 +331 248 10 11 +356 247 10 11 +383 247 11 11 +308 272 11 14 +324 272 11 12 +345 271 11 12 +370 273 9 13 +390 272 9 13 +347 294 10 14 +362 293 11 15 +384 295 9 12 +355 326 11 13 +378 324 9 12 +395 324 10 11 +413 325 10 11 +433 324 9 13 +453 321 9 13 +351 354 10 13 +372 354 10 14 +392 354 9 11 +407 349 11 15 +428 360 10 13 +448 361 10 11 +467 356 11 12 +412 219 9 11 +429 218 7 11 +448 219 9 11 +466 218 9 12 +489 218 9 11 +507 216 8 12 +523 220 8 11 +540 219 9 11 +557 220 7 10 +571 221 9 11 +584 220 8 12 +410 248 11 11 +434 248 10 13 +456 240 10 14 +473 247 10 11 +493 240 9 10 +510 238 10 10 +532 240 8 10 +553 243 9 10 +574 244 8 10 +411 275 8 11 +427 272 10 11 +444 270 11 12 +465 271 8 9 +484 270 10 11 +501 267 9 10 +521 267 9 12 +538 267 9 13 +565 267 9 11 +410 294 9 11 +429 297 11 13 +450 293 11 13 +474 291 11 12 +496 302 12 13 +516 298 9 11 +532 297 9 11 +550 294 11 14 +573 299 9 12 +470 323 9 13 +488 319 11 13 +512 326 10 10 +528 324 10 12 +495 358 10 11 +530 349 9 9 +487 382 13 13 +507 394 12 12 +525 379 13 14 +549 328 9 10 +566 329 10 11 +553 349 10 11 +582 352 10 10 +563 384 12 12 +547 392 12 14 +509 414 12 12 +545 423 12 11 +512 454 11 14 +546 455 10 12 +580 451 11 14 +574 425 9 12 +584 393 13 15 +588 328 9 12 +609 325 10 13 +627 326 9 12 +632 347 11 15 +606 350 10 12 +608 377 11 12 +624 386 12 15 +640 379 10 12 +596 424 10 14 +621 420 9 13 +612 453 10 13 +649 422 9 12 +670 423 10 13 +693 420 11 16 +665 392 11 14 +678 387 9 13 +702 388 11 16 +652 350 10 13 +671 348 10 14 +692 350 12 12 +655 331 7 9 +670 328 11 11 +695 328 9 11 +597 297 9 12 +623 301 9 11 +638 299 9 13 +658 301 9 10 +686 303 10 13 +702 305 9 10 +654 456 12 13 +691 456 11 13 +721 453 11 11 +739 422 11 12 +764 426 12 14 +763 456 9 11 +784 421 10 11 +805 421 10 12 +795 452 10 11 +829 449 12 16 +855 455 11 15 +886 453 11 13 +825 417 10 13 +850 421 10 12 +876 418 12 14 +927 424 12 14 +958 447 12 16 +959 425 14 16 +918 453 9 12 +588 269 10 11 +593 241 10 12 +599 221 8 12 +619 220 8 10 +642 219 10 12 +619 243 9 13 +642 241 11 11 +664 244 9 11 +660 220 10 12 +679 222 10 12 +697 227 8 10 +679 246 9 11 +700 248 10 11 +695 269 9 11 +675 271 11 12 +608 271 9 11 +628 270 10 12 +647 271 11 13 +722 223 9 11 +745 224 8 11 +769 223 8 10 +786 225 8 10 +718 249 10 12 +734 245 9 11 +757 245 9 11 +774 245 9 11 +799 246 9 9 +804 230 7 10 +827 224 9 11 +852 227 9 10 +821 247 8 10 +839 252 8 11 +856 254 8 11 +874 255 9 10 +716 271 9 10 +736 275 10 10 +762 271 12 11 +790 275 7 9 +810 277 9 10 +827 276 10 13 +848 279 9 11 +868 280 9 10 +888 282 9 10 +886 300 10 12 +864 301 9 10 +843 300 9 9 +825 299 11 11 +803 300 10 10 +789 298 9 9 +766 297 10 11 +738 296 9 11 +719 306 9 11 +716 328 9 10 +730 328 9 10 +755 330 9 11 +771 329 9 12 +798 327 10 11 +822 328 9 11 +852 335 9 10 +880 334 9 10 +717 350 9 11 +743 353 11 13 +763 350 9 11 +779 354 10 12 +799 353 11 11 +822 356 11 12 +842 355 11 14 +865 360 10 11 +880 359 11 14 +716 388 10 12 +736 392 11 13 +749 382 9 12 +776 391 11 12 +791 387 11 12 +813 392 10 13 +825 390 9 12 +851 390 11 12 +864 389 10 12 +716 418 11 13 +879 230 7 9 +902 254 8 10 +917 274 9 10 +913 298 8 12 +904 331 9 11 +901 350 11 13 +944 352 9 11 +886 397 10 13 +901 395 11 12 +927 388 13 16 +82 363 10 13 +# 12--Group/12_Group_Group_12_Group_Group_12_301.jpg +130 174 126 136 +346 190 84 118 +474 56 54 80 +594 70 62 92 +726 162 60 84 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_736.jpg +939 455 23 28 +631 359 17 25 +572 401 30 35 +433 395 12 18 +423 392 16 21 +398 391 17 21 +140 393 27 29 +151 337 21 29 +301 389 23 21 +69 312 11 12 +22 307 7 8 +# 12--Group/12_Group_Group_12_Group_Group_12_315.jpg +818 332 45 53 +720 321 45 55 +613 323 36 46 +520 312 38 42 +401 298 36 40 +331 320 31 40 +274 317 32 35 +196 318 33 35 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_43.jpg +404 200 22 27 +341 214 30 38 +328 199 17 21 +222 232 43 50 +186 209 35 39 +99 237 47 46 +384 279 34 43 +475 268 31 41 +451 223 28 37 +488 206 21 27 +546 208 20 23 +565 268 38 48 +544 246 26 35 +896 283 41 42 +777 294 33 35 +814 254 32 29 +742 253 32 33 +704 270 33 39 +683 215 25 30 +642 220 17 23 +615 233 27 36 +647 270 33 46 +# 12--Group/12_Group_Group_12_Group_Group_12_165.jpg +253 119 36 54 +185 232 44 58 +51 191 44 58 +316 210 41 59 +408 117 40 57 +443 205 43 56 +550 87 42 65 +509 200 48 71 +606 180 45 65 +681 111 42 54 +694 256 46 51 +788 230 41 55 +918 233 46 61 +# 12--Group/12_Group_Group_12_Group_Group_12_354.jpg +955 77 26 31 +866 72 30 31 +834 37 21 26 +773 97 27 30 +764 38 12 17 +740 36 12 17 +669 29 16 24 +556 26 20 26 +467 21 20 25 +427 68 19 21 +282 46 39 49 +193 98 43 54 +159 14 38 47 +58 48 44 52 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_235.jpg +424 566 62 83 +75 57 173 256 +207 393 155 217 +543 34 349 432 +833 494 124 165 +750 990 163 225 +91 988 178 246 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_315.jpg +117 346 23 27 +116 423 28 36 +180 406 26 35 +156 339 23 29 +257 409 24 34 +334 379 24 31 +291 358 22 27 +321 351 19 21 +320 324 18 23 +264 336 20 30 +227 334 22 27 +186 339 23 30 +130 327 23 27 +190 317 22 24 +209 339 20 26 +247 335 18 26 +287 314 16 23 +346 310 19 22 +372 317 17 29 +367 359 25 33 +429 346 21 26 +496 380 30 37 +535 331 21 29 +481 330 20 29 +447 329 19 27 +505 316 19 27 +579 409 33 44 +600 301 27 36 +647 303 31 39 +681 274 27 37 +753 267 31 40 +791 256 30 39 +691 407 36 49 +807 387 38 49 +864 411 21 27 +883 420 20 25 +949 448 21 30 +926 408 22 27 +954 385 17 22 +896 351 19 21 +311 278 16 22 +355 278 15 22 +386 288 17 23 +405 296 17 21 +453 282 16 20 +413 266 15 19 +382 268 15 18 +397 257 14 18 +446 260 13 19 +420 240 12 17 +436 225 13 18 +456 214 13 16 +456 232 13 18 +476 240 13 18 +474 261 16 22 +498 271 18 22 +495 241 15 18 +480 218 14 19 +506 229 13 17 +536 253 15 22 +554 234 15 22 +525 233 14 16 +497 213 13 16 +520 208 11 16 +536 209 10 13 +536 220 10 13 +547 213 9 16 +555 210 12 17 +571 236 13 19 +389 314 16 25 +430 317 16 19 +490 304 19 23 +566 200 10 12 +576 202 10 14 +587 210 9 12 +580 218 13 19 +596 215 11 15 +612 219 12 15 +608 203 10 14 +594 188 11 14 +602 182 9 11 +611 183 11 14 +611 168 10 12 +657 183 13 15 +656 161 10 14 +670 166 10 14 +681 163 10 13 +693 170 10 13 +665 145 11 15 +679 147 9 13 +695 142 10 14 +706 145 9 14 +723 136 10 13 +729 146 9 14 +749 142 11 14 +752 131 9 11 +772 137 10 13 +797 135 11 13 +811 129 9 12 +826 125 11 13 +834 114 13 12 +834 94 11 14 +830 78 15 14 +836 60 12 15 +882 392 21 25 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_942.jpg +272 234 521 717 +# 12--Group/12_Group_Group_12_Group_Group_12_227.jpg +836 198 30 33 +808 263 35 34 +729 274 36 36 +730 187 30 36 +716 149 30 36 +634 153 34 35 +647 207 32 35 +693 219 32 36 +644 279 32 38 +590 281 41 52 +554 137 27 35 +564 186 29 34 +604 223 31 36 +549 283 32 39 +510 228 32 37 +484 203 33 34 +445 154 29 34 +472 126 28 33 +378 130 30 28 +396 184 29 36 +424 227 32 37 +388 245 37 40 +327 223 29 35 +299 183 28 37 +339 157 27 34 +480 275 30 41 +384 309 39 40 +297 260 34 38 +239 277 35 42 +239 183 30 34 +178 260 39 42 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_927.jpg +122 196 28 31 +166 182 36 41 +211 201 30 34 +290 170 24 33 +348 181 25 34 +362 170 22 23 +404 199 27 31 +402 171 24 28 +441 176 26 30 +478 158 24 26 +502 181 25 32 +523 188 23 30 +546 179 22 25 +554 212 25 28 +579 183 25 29 +512 290 29 33 +604 214 23 25 +632 159 27 30 +677 178 26 29 +713 157 22 25 +744 157 24 30 +582 280 30 32 +668 377 27 30 +668 296 26 29 +724 341 25 32 +795 165 24 22 +797 188 22 31 +844 168 26 33 +909 152 30 36 +739 428 21 24 +757 334 14 17 +784 320 23 25 +811 341 21 24 +862 340 24 31 +969 169 26 29 +1001 201 22 32 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_86.jpg +166 374 5 7 +461 357 9 19 +698 436 10 13 +675 399 7 11 +704 399 7 10 +620 394 9 13 +599 397 7 9 +633 400 6 9 +572 461 6 11 +823 395 8 12 +873 403 6 8 +905 408 7 10 +963 409 7 10 +957 396 9 11 +920 411 10 10 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_286.jpg +0 193 31 43 +86 160 46 64 +283 156 44 62 +261 208 42 83 +404 240 65 100 +487 214 20 30 +490 239 30 40 +401 210 27 33 +570 256 16 17 +596 246 22 30 +602 272 41 62 +723 244 20 26 +746 242 48 55 +801 228 67 100 +901 253 13 23 +918 260 13 20 +967 255 20 25 +1001 261 22 31 +176 241 27 34 +153 204 17 24 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_607.jpg +335 343 25 31 +372 335 22 29 +570 178 19 23 +536 338 21 25 +616 349 23 28 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_115.jpg +166 144 74 102 +326 246 56 80 +376 146 72 96 +576 168 66 86 +674 118 82 84 +780 82 96 116 +# 12--Group/12_Group_Group_12_Group_Group_12_80.jpg +182 288 40 53 +302 299 34 44 +71 140 33 42 +132 107 33 46 +220 125 35 43 +282 116 32 40 +348 125 33 43 +406 101 32 42 +470 126 32 37 +523 99 29 37 +594 112 32 39 +640 112 31 43 +703 126 29 33 +838 166 28 41 +803 117 31 39 +931 132 31 41 +872 279 34 43 +770 279 32 48 +652 213 34 54 +419 221 35 49 +538 222 36 46 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_868.jpg +448 90 82 114 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_461.jpg +130 170 58 73 +222 152 51 68 +377 94 56 83 +458 120 48 66 +524 185 52 66 +592 117 47 65 +668 142 47 61 +824 135 55 73 +# 12--Group/12_Group_Group_12_Group_Group_12_728.jpg +36 188 114 106 +234 110 92 118 +352 122 92 120 +510 130 88 110 +654 108 82 116 +820 118 96 122 +# 12--Group/12_Group_Group_12_Group_Group_12_253.jpg +142 28 98 132 +290 12 86 114 +472 172 88 122 +580 34 80 100 +774 40 100 124 +# 12--Group/12_Group_Group_12_Group_Group_12_293.jpg +905 143 43 49 +754 150 41 59 +820 154 41 56 +648 90 30 31 +519 144 57 62 +571 124 35 56 +592 133 28 50 +368 107 48 57 +5 84 32 36 +106 147 59 49 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_461.jpg +247 285 31 36 +40 301 36 35 +117 313 13 15 +174 319 22 22 +157 309 11 14 +401 286 24 29 +285 302 10 12 +313 306 11 12 +326 328 19 24 +360 314 13 17 +342 411 28 37 +487 297 49 61 +633 297 24 33 +909 426 21 24 +906 328 12 15 +1002 292 7 11 +# 12--Group/12_Group_Group_12_Group_Group_12_417.jpg +859 214 38 41 +786 233 37 41 +721 221 31 36 +639 239 32 33 +581 230 27 34 +508 225 32 37 +372 244 32 37 +445 220 29 32 +273 225 34 34 +206 233 33 39 +113 215 38 42 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_162.jpg +371 216 141 171 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_270.jpg +444 16 62 80 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_276.jpg +136 11 52 52 +224 65 55 64 +296 8 45 55 +354 35 53 80 +439 69 51 67 +518 79 53 64 +216 239 60 73 +323 219 59 79 +437 229 60 82 +490 0 45 56 +583 26 38 53 +594 92 55 68 +651 68 43 49 +708 77 50 58 +848 88 61 69 +696 153 58 79 +569 201 60 77 +102 287 69 81 +666 267 68 75 +977 25 22 28 +0 69 32 68 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_418.jpg +302 373 25 27 +236 348 25 30 +168 358 22 26 +315 284 22 28 +393 405 22 28 +256 226 17 20 +305 206 19 24 +361 230 17 20 +149 217 19 24 +205 205 17 23 +93 226 19 24 +26 209 23 26 +90 188 18 20 +102 172 19 21 +121 141 17 20 +154 170 15 21 +134 194 17 20 +187 187 17 22 +205 166 16 21 +220 157 15 18 +657 309 20 24 +633 367 24 26 +558 369 25 28 +514 292 20 23 +470 376 24 34 +667 213 16 21 +404 260 23 32 +717 215 17 23 +619 219 18 20 +562 224 18 20 +509 214 18 22 +457 221 16 18 +407 222 18 22 +600 197 16 18 +620 172 13 16 +552 188 15 18 +576 174 15 18 +499 193 14 18 +521 178 15 15 +598 162 13 16 +576 144 13 13 +531 144 13 13 +550 160 13 16 +514 157 13 15 +494 169 13 16 +484 158 14 16 +503 147 13 13 +611 146 10 13 +375 79 12 16 +380 147 14 17 +359 139 14 16 +341 153 13 14 +326 143 12 14 +309 145 13 16 +305 163 16 18 +268 138 14 14 +270 160 13 15 +240 78 14 15 +283 90 12 14 +319 85 15 17 +267 68 11 15 +462 141 13 15 +449 160 15 16 +450 189 16 20 +403 191 16 19 +406 166 15 17 +355 192 18 19 +357 176 16 17 +427 145 12 14 +420 153 12 16 +311 185 14 17 +247 182 18 22 +253 167 16 21 +218 62 10 15 +191 72 13 17 +411 77 13 15 +295 35 14 18 +410 50 11 14 +388 47 12 15 +451 35 10 15 +651 109 10 17 +604 98 13 15 +558 96 11 15 +500 63 13 15 +514 91 12 14 +806 184 18 23 +697 190 15 17 +672 168 13 17 +648 190 16 16 +644 159 13 14 +176 151 14 18 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_319.jpg +448 202 100 136 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_13.jpg +118 333 23 29 +163 392 22 26 +230 356 21 25 +312 356 18 22 +374 357 18 20 +409 362 15 26 +478 367 16 22 +518 396 17 23 +356 404 18 24 +569 367 19 23 +637 345 17 23 +681 393 20 26 +729 353 20 24 +805 360 20 24 +867 407 23 30 +903 369 22 26 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_28.jpg +987 290 30 35 +965 253 23 32 +1001 248 18 29 +926 242 28 35 +871 277 28 27 +825 280 24 29 +681 54 45 56 +522 256 39 48 +599 273 29 28 +670 262 21 28 +431 265 31 34 +466 261 25 28 +331 262 27 27 +213 0 58 46 +138 271 27 34 +103 208 20 30 +9 220 32 34 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_274.jpg +635 191 55 74 +534 207 51 69 +442 185 58 70 +332 179 58 72 +256 197 63 76 +# 12--Group/12_Group_Group_12_Group_Group_12_112.jpg +145 178 33 33 +256 118 37 41 +366 153 32 38 +263 241 37 36 +148 232 34 40 +583 142 29 40 +701 158 33 34 +786 120 32 47 +861 152 33 40 +852 247 36 38 +923 317 35 42 +742 321 35 38 +727 250 35 38 +701 204 32 45 +788 202 41 33 +581 214 33 32 +611 242 30 35 +581 251 35 46 +487 264 37 40 +526 218 35 36 +469 154 33 31 +673 282 33 27 +448 238 34 37 +398 252 31 39 +369 320 35 33 +364 260 34 39 +231 272 40 41 +258 312 37 45 +180 308 41 41 +86 301 33 40 +787 463 36 33 +721 457 35 33 +639 487 36 34 +542 484 38 39 +456 479 35 38 +318 459 36 39 +185 487 37 34 +# 12--Group/12_Group_Group_12_Group_Group_12_772.jpg +689 265 23 35 +# 12--Group/12_Group_Group_12_Group_Group_12_610.jpg +348 130 64 72 +498 232 68 76 +898 186 68 104 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_850.jpg +471 167 128 161 +269 234 137 142 +670 266 33 47 +0 93 50 209 +929 283 8 9 +966 293 10 10 +913 278 7 11 +1011 276 5 10 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_996.jpg +360 337 295 475 +610 645 315 443 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_364.jpg +843 119 39 51 +613 153 33 46 +307 127 35 52 +159 118 35 44 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_557.jpg +124 200 104 138 +286 228 112 128 +486 92 96 124 +638 188 114 132 +834 158 106 126 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_253.jpg +7 245 14 30 +28 203 25 33 +87 243 28 36 +118 200 22 32 +158 210 29 35 +206 196 25 31 +261 221 23 26 +211 269 31 33 +160 264 32 39 +41 263 31 43 +281 261 27 35 +295 227 27 35 +330 199 23 33 +355 228 28 35 +393 201 26 30 +409 235 27 36 +459 233 27 34 +458 206 22 32 +500 210 21 29 +243 401 31 42 +131 415 35 47 +20 435 34 45 +530 217 4 7 +559 206 23 28 +613 213 21 24 +637 227 23 29 +682 239 21 27 +689 219 21 26 +707 246 26 31 +730 233 23 31 +766 203 22 28 +781 239 19 24 +797 231 24 35 +764 261 22 26 +859 224 24 28 +860 249 28 36 +894 239 23 33 +944 218 4 7 +933 259 27 35 +971 389 33 39 +873 389 30 34 +759 373 26 34 +680 322 26 34 +615 286 29 34 +606 323 26 34 +659 390 29 31 +590 424 29 34 +544 334 31 41 +564 320 20 32 +485 364 33 43 +388 433 41 43 +351 421 31 38 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_340.jpg +931 46 40 47 +846 72 44 49 +608 31 54 71 +686 0 22 31 +525 26 54 81 +477 0 42 48 +525 0 47 28 +325 111 57 80 +386 2 44 56 +359 2 33 42 +287 1 52 70 +135 0 47 60 +21 79 32 53 +36 0 51 56 +0 51 35 81 +86 0 38 41 +229 0 45 22 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_617.jpg +732 598 56 66 +902 191 50 51 +982 160 12 17 +967 179 14 18 +848 135 15 23 +806 164 15 23 +756 171 18 19 +975 162 15 18 +743 157 9 22 +715 155 14 20 +694 178 8 10 +654 160 16 18 +636 167 8 12 +511 192 5 7 +500 200 6 6 +492 204 5 5 +440 204 10 10 +383 161 8 19 +348 180 12 14 +357 229 17 26 +276 179 15 17 +293 166 15 20 +271 229 19 28 +256 224 24 22 +194 185 51 68 +67 205 25 28 +136 205 23 31 +47 185 6 12 +563 164 40 47 +772 176 11 12 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_138.jpg +835 412 37 42 +767 430 39 41 +877 1014 61 65 +763 969 66 84 +572 399 44 63 +512 430 33 41 +421 411 48 52 +385 422 34 55 +235 530 54 78 +138 434 70 78 +221 943 89 115 +102 906 87 103 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_21.jpg +563 305 25 26 +356 299 25 31 +471 277 22 26 +521 313 17 25 +264 320 38 45 +422 314 9 13 +427 297 11 14 +413 307 6 9 +498 303 13 16 +314 323 7 8 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_50.jpg +92 212 200 220 +528 204 244 252 +342 168 136 166 +4 190 88 152 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_83.jpg +730 369 171 245 +811 219 60 76 +996 194 20 22 +960 196 14 17 +916 206 14 13 +906 247 20 23 +958 248 13 15 +732 228 14 15 +759 213 13 11 +932 212 9 12 +562 276 35 28 +400 251 36 36 +302 245 33 44 +143 289 31 33 +209 272 20 22 +91 284 29 31 +107 240 25 30 +35 256 28 27 +7 262 23 25 +30 326 19 22 +11 298 23 19 +123 252 19 21 +144 264 15 17 +197 252 14 15 +200 296 11 21 +0 344 16 33 +895 215 13 17 +# 12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_126.jpg +800 395 52 56 +94 156 9 12 +# 12--Group/12_Group_Group_12_Group_Group_12_153.jpg +188 540 74 116 +400 322 236 300 +752 434 114 162 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_72.jpg +287 104 48 60 +216 259 57 72 +408 179 49 69 +454 350 44 52 +540 467 50 60 +355 466 56 73 +494 217 65 86 +616 233 47 59 +673 135 53 69 +711 373 57 75 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_613.jpg +83 204 18 18 +256 95 23 25 +461 122 23 28 +531 119 20 27 +622 117 24 26 +791 122 26 27 +843 131 22 26 +909 131 17 22 +860 101 23 26 +964 144 24 27 +537 345 28 33 +# 12--Group/12_Group_Large_Group_12_Group_Large_Group_12_112.jpg +214 42 76 102 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_36.jpg +390 76 116 162 +# 13--Interview/13_Interview_Interview_Sequences_13_209.jpg +84 125 74 84 +426 63 54 68 +288 175 28 47 +426 196 8 9 +517 200 27 40 +685 169 25 53 +865 72 80 93 +# 13--Interview/13_Interview_Interview_On_Location_13_186.jpg +575 311 84 132 +371 356 78 120 +# 13--Interview/13_Interview_Interview_Sequences_13_103.jpg +456 20 204 242 +# 13--Interview/13_Interview_Interview_On_Location_13_940.jpg +390 126 219 282 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_285.jpg +536 255 123 194 +# 13--Interview/13_Interview_Interview_Sequences_13_778.jpg +410 170 234 325 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_420.jpg +339 406 215 349 +# 13--Interview/13_Interview_Interview_Sequences_13_92.jpg +336 247 14 22 +436 521 17 19 +# 13--Interview/13_Interview_Interview_On_Location_13_537.jpg +112 325 531 784 +# 13--Interview/13_Interview_Interview_Sequences_13_609.jpg +486 92 168 236 +# 13--Interview/13_Interview_Interview_Sequences_13_929.jpg +82 172 74 96 +184 158 62 98 +490 158 52 80 +796 168 72 98 +# 13--Interview/13_Interview_Interview_Sequences_13_31.jpg +666 248 146 200 +198 134 138 186 +# 13--Interview/13_Interview_Interview_Sequences_13_108.jpg +186 94 98 150 +746 38 108 162 +# 13--Interview/13_Interview_Interview_On_Location_13_334.jpg +211 267 87 116 +513 255 78 118 +746 260 76 109 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_241.jpg +428 96 122 180 +# 13--Interview/13_Interview_Interview_On_Location_13_3.jpg +930 282 60 84 +# 13--Interview/13_Interview_Interview_On_Location_13_512.jpg +345 133 201 269 +# 13--Interview/13_Interview_Interview_Sequences_13_884.jpg +256 178 54 82 +# 13--Interview/13_Interview_Interview_On_Location_13_287.jpg +136 144 102 128 +414 56 104 144 +764 94 126 160 +# 13--Interview/13_Interview_Interview_Sequences_13_636.jpg +724 90 80 124 +# 13--Interview/13_Interview_Interview_On_Location_13_56.jpg +464 194 116 146 +# 13--Interview/13_Interview_Interview_On_Location_13_569.jpg +283 369 339 469 +# 13--Interview/13_Interview_Interview_Sequences_13_973.jpg +308 124 66 100 +826 166 72 98 +# 13--Interview/13_Interview_Interview_Sequences_13_187.jpg +200 6 64 76 +816 130 88 84 +# 13--Interview/13_Interview_Interview_On_Location_13_791.jpg +390 376 359 453 +# 13--Interview/13_Interview_Interview_Sequences_13_477.jpg +378 114 228 362 +# 13--Interview/13_Interview_Interview_On_Location_13_921.jpg +200 392 21 26 +660 389 26 32 +753 393 27 34 +935 388 18 29 +# 13--Interview/13_Interview_Interview_Sequences_13_268.jpg +520 190 72 102 +# 13--Interview/13_Interview_Interview_Sequences_13_134.jpg +342 100 204 276 +# 13--Interview/13_Interview_Interview_On_Location_13_736.jpg +209 220 348 542 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_223.jpg +122 48 40 51 +278 108 32 39 +419 51 43 54 +570 108 34 40 +696 64 39 51 +822 108 31 39 +120 318 41 52 +292 376 31 37 +455 373 31 40 +652 319 38 48 +865 323 38 44 +# 13--Interview/13_Interview_Interview_On_Location_13_401.jpg +219 87 49 61 +355 148 36 51 +345 228 44 65 +373 70 36 49 +418 113 40 56 +482 61 36 52 +536 158 41 56 +567 63 39 50 +659 89 39 50 +640 282 47 51 +789 149 41 51 +829 86 41 54 +1001 71 8 39 +# 13--Interview/13_Interview_Interview_Sequences_13_35.jpg +470 80 50 56 +362 275 48 62 +506 413 57 61 +544 480 52 68 +620 498 43 66 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_425.jpg +488 133 189 277 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_409.jpg +304 38 385 604 +# 13--Interview/13_Interview_Interview_On_Location_13_166.jpg +398 58 262 430 +# 13--Interview/13_Interview_Interview_Sequences_13_859.jpg +124 113 30 44 +227 201 32 35 +256 167 25 28 +552 199 36 35 +478 110 37 46 +619 183 20 27 +636 178 20 30 +707 199 25 33 +719 155 21 26 +812 166 20 28 +823 131 32 43 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_442.jpg +220 529 12 20 +959 434 3 3 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_374.jpg +408 34 212 306 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_381.jpg +390 180 184 244 +# 13--Interview/13_Interview_Interview_Sequences_13_937.jpg +241 275 497 722 +# 13--Interview/13_Interview_Interview_Sequences_13_807.jpg +418 110 170 266 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_327.jpg +358 198 172 272 +588 144 160 266 +# 13--Interview/13_Interview_Interview_On_Location_13_74.jpg +630 26 126 150 +# 13--Interview/13_Interview_Interview_On_Location_13_605.jpg +210 232 70 82 +424 152 70 96 +592 76 88 120 +# 13--Interview/13_Interview_Interview_On_Location_13_238.jpg +656 112 70 102 +# 13--Interview/13_Interview_Interview_Sequences_13_11.jpg +284 48 186 208 +# 13--Interview/13_Interview_Interview_On_Location_13_539.jpg +419 664 443 579 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_461.jpg +164 150 58 112 +# 13--Interview/13_Interview_Interview_On_Location_13_478.jpg +458 54 118 166 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_204.jpg +458 134 138 206 +# 13--Interview/13_Interview_Interview_On_Location_13_313.jpg +36 355 21 32 +30 339 17 28 +57 354 29 38 +115 350 25 27 +154 355 20 29 +177 371 23 32 +212 365 21 30 +206 346 19 28 +219 446 26 35 +262 356 23 30 +308 346 22 32 +295 476 28 35 +363 350 21 32 +391 360 19 26 +413 365 23 31 +444 360 19 27 +493 363 24 30 +453 498 26 37 +592 479 22 32 +655 474 26 36 +518 361 20 30 +559 363 26 35 +597 373 25 31 +623 341 24 31 +634 321 24 30 +658 340 26 29 +738 333 24 31 +773 333 23 34 +805 325 23 27 +801 362 25 37 +829 336 27 35 +878 321 26 33 +930 345 26 33 +959 352 25 32 +# 13--Interview/13_Interview_Interview_Sequences_13_513.jpg +350 118 146 174 +# 13--Interview/13_Interview_Interview_Sequences_13_111.jpg +122 211 23 31 +217 150 31 48 +290 170 27 35 +389 220 22 26 +508 176 25 34 +596 33 41 53 +763 117 30 42 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_325.jpg +290 119 40 59 +66 488 16 23 +88 507 9 11 +102 519 11 14 +138 528 9 11 +110 515 5 12 +157 527 8 11 +178 527 10 13 +194 532 8 12 +225 522 14 17 +370 629 31 45 +422 568 10 16 +455 570 9 12 +497 578 14 16 +# 13--Interview/13_Interview_Interview_Sequences_13_7.jpg +106 247 23 29 +253 206 36 47 +384 265 21 23 +476 249 25 30 +637 192 37 53 +573 305 15 14 +783 278 13 23 +768 296 13 17 +778 316 12 14 +863 244 23 26 +916 255 24 27 +# 13--Interview/13_Interview_Interview_On_Location_13_129.jpg +396 0 198 232 +# 13--Interview/13_Interview_Interview_Sequences_13_3.jpg +182 30 320 482 +# 13--Interview/13_Interview_Interview_Sequences_13_717.jpg +682 451 15 22 +627 132 88 125 +846 283 17 17 +902 324 17 17 +806 409 23 25 +835 464 20 32 +299 144 175 248 +72 310 17 22 +132 311 17 18 +161 421 17 22 +161 492 15 14 +258 447 15 16 +235 474 22 29 +389 412 19 31 +604 410 19 23 +650 466 20 29 +# 13--Interview/13_Interview_Interview_On_Location_13_610.jpg +553 229 141 245 +315 141 177 275 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_406.jpg +249 94 343 536 +# 13--Interview/13_Interview_Interview_Sequences_13_135.jpg +376 98 132 180 +# 13--Interview/13_Interview_Interview_Sequences_13_718.jpg +420 82 152 220 +760 68 148 206 +# 13--Interview/13_Interview_Interview_Sequences_13_2.jpg +0 22 26 46 +118 0 37 19 +237 23 35 49 +0 135 16 37 +13 239 22 30 +120 270 58 63 +225 190 26 40 +267 190 52 70 +316 136 26 39 +325 30 32 41 +450 0 35 19 +568 23 38 51 +656 26 38 46 +645 148 29 37 +492 171 52 61 +547 199 32 44 +638 239 52 73 +742 286 78 95 +914 219 40 49 +946 16 48 58 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_155.jpg +343 172 355 481 +# 13--Interview/13_Interview_Interview_Sequences_13_15.jpg +782 280 70 90 +# 13--Interview/13_Interview_Interview_On_Location_13_433.jpg +432 84 128 190 +# 13--Interview/13_Interview_Interview_Sequences_13_586.jpg +565 484 85 131 +# 13--Interview/13_Interview_Interview_On_Location_13_861.jpg +430 88 225 374 +846 828 140 211 +822 1050 184 252 +875 641 146 205 +# 13--Interview/13_Interview_Interview_Sequences_13_864.jpg +322 133 37 41 +546 103 55 54 +608 141 42 49 +729 241 53 42 +# 13--Interview/13_Interview_Interview_Sequences_13_152.jpg +702 64 126 174 +226 36 110 164 +# 13--Interview/13_Interview_Interview_On_Location_13_554.jpg +280 112 306 415 +# 13--Interview/13_Interview_Interview_On_Location_13_491.jpg +664 34 138 196 +# 13--Interview/13_Interview_Interview_On_Location_13_933.jpg +411 99 102 153 +# 13--Interview/13_Interview_Interview_On_Location_13_208.jpg +680 88 134 210 +# 13--Interview/13_Interview_Interview_On_Location_13_728.jpg +3 126 701 825 +# 13--Interview/13_Interview_Interview_On_Location_13_542.jpg +320 132 292 432 +# 13--Interview/13_Interview_Interview_On_Location_13_505.jpg +667 402 126 204 +312 324 138 171 +# 13--Interview/13_Interview_Interview_On_Location_13_849.jpg +292 218 354 418 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_475.jpg +640 206 137 250 +324 176 220 351 +# 13--Interview/13_Interview_Interview_On_Location_13_394.jpg +698 144 76 138 +610 222 72 112 +262 102 84 150 +# 13--Interview/13_Interview_Interview_Sequences_13_557.jpg +514 362 76 88 +# 13--Interview/13_Interview_Interview_Sequences_13_237.jpg +552 74 180 240 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_107.jpg +437 85 115 168 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_260.jpg +325 340 488 605 +# 13--Interview/13_Interview_Interview_On_Location_13_225.jpg +412 124 161 250 +# 13--Interview/13_Interview_Interview_Sequences_13_759.jpg +447 580 171 195 +# 13--Interview/13_Interview_Interview_Sequences_13_793.jpg +55 241 27 51 +879 299 31 48 +# 13--Interview/13_Interview_Interview_Sequences_13_189.jpg +290 2 416 542 +# 13--Interview/13_Interview_Interview_On_Location_13_301.jpg +149 95 517 634 +# 13--Interview/13_Interview_Interview_Sequences_13_779.jpg +376 140 342 494 +# 13--Interview/13_Interview_Interview_On_Location_13_187.jpg +288 76 108 160 +652 40 92 160 +# 13--Interview/13_Interview_Interview_On_Location_13_912.jpg +301 270 403 632 +# 13--Interview/13_Interview_Interview_Sequences_13_1032.jpg +414 151 181 263 +# 13--Interview/13_Interview_Interview_On_Location_13_246.jpg +228 312 526 727 +# 13--Interview/13_Interview_Interview_Sequences_13_89.jpg +152 21 37 56 +368 228 40 63 +717 309 43 50 +797 302 41 59 +# 13--Interview/13_Interview_Interview_On_Location_13_852.jpg +268 208 230 282 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_237.jpg +552 170 92 146 +# 13--Interview/13_Interview_Interview_On_Location_13_521.jpg +178 216 35 53 +387 346 18 22 +599 290 9 13 +635 288 12 16 +636 320 10 15 +605 326 9 12 +882 212 30 42 +702 39 95 150 +891 1 114 152 +414 365 12 15 +622 268 9 15 +437 261 8 11 +# 13--Interview/13_Interview_Interview_On_Location_13_510.jpg +377 84 303 425 +# 13--Interview/13_Interview_Interview_On_Location_13_284.jpg +451 60 85 123 +96 227 45 53 +# 13--Interview/13_Interview_Interview_Sequences_13_55.jpg +614 155 210 284 +540 373 64 78 +623 429 30 40 +992 443 32 43 +# 13--Interview/13_Interview_Interview_On_Location_13_636.jpg +187 42 55 75 +723 109 35 48 +798 114 32 42 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_189.jpg +171 218 59 66 +253 222 46 58 +396 212 48 68 +515 216 43 65 +571 186 39 50 +674 242 43 55 +751 239 39 49 +815 253 42 57 +919 250 45 53 +686 127 13 21 +696 98 5 10 +686 82 6 11 +672 78 6 10 +653 86 7 10 +639 79 6 11 +619 76 8 12 +598 113 7 12 +591 115 6 12 +605 81 8 8 +592 87 6 10 +572 82 8 12 +459 69 7 13 +447 69 5 12 +431 67 7 10 +413 65 9 9 +308 75 7 13 +517 69 6 9 +534 87 8 8 +397 65 9 10 +356 80 9 15 +313 104 9 13 +374 73 10 14 +618 99 7 10 +# 13--Interview/13_Interview_Interview_On_Location_13_865.jpg +106 44 52 66 +490 76 108 158 +# 13--Interview/13_Interview_Interview_On_Location_13_559.jpg +534 208 100 116 +# 13--Interview/13_Interview_Interview_Sequences_13_541.jpg +484 252 22 30 +# 13--Interview/13_Interview_Interview_On_Location_13_282.jpg +666 76 158 236 +# 13--Interview/13_Interview_Interview_Sequences_13_813.jpg +390 154 205 299 +# 13--Interview/13_Interview_Interview_On_Location_13_138.jpg +47 334 41 52 +98 262 16 21 +335 228 15 21 +414 247 9 14 +505 250 17 20 +671 278 18 22 +381 264 6 9 +# 13--Interview/13_Interview_Interview_Sequences_13_691.jpg +274 50 380 524 +# 13--Interview/13_Interview_Interview_Sequences_13_270.jpg +311 119 367 482 +# 13--Interview/13_Interview_Interview_On_Location_13_190.jpg +282 230 47 57 +# 13--Interview/13_Interview_Interview_Sequences_13_495.jpg +539 69 42 51 +496 121 28 37 +431 151 32 33 +408 142 23 34 +353 131 25 28 +399 121 24 27 +0 118 22 38 +109 37 17 24 +94 44 7 9 +209 107 18 22 +88 153 23 25 +139 125 21 34 +# 13--Interview/13_Interview_Interview_On_Location_13_773.jpg +714 397 60 90 +# 13--Interview/13_Interview_Interview_On_Location_13_513.jpg +492 100 136 216 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_239.jpg +545 66 236 337 +# 13--Interview/13_Interview_Interview_Sequences_13_33.jpg +359 169 219 292 +# 13--Interview/13_Interview_Interview_On_Location_13_33.jpg +106 70 86 118 +406 96 80 106 +786 52 76 118 +# 13--Interview/13_Interview_Interview_Sequences_13_867.jpg +383 255 245 350 +23 461 19 24 +220 459 14 22 +173 421 17 28 +13 442 13 20 +285 403 35 49 +239 413 18 28 +124 87 16 17 +879 428 24 31 +882 332 17 20 +996 370 18 22 +555 71 8 12 +575 74 9 10 +597 62 10 14 +627 42 11 12 +650 39 9 13 +678 29 12 12 +755 6 8 11 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_245.jpg +480 80 136 227 +240 123 139 189 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_743.jpg +152 0 56 69 +547 13 34 69 +667 74 11 14 +719 147 8 12 +857 49 37 60 +# 13--Interview/13_Interview_Interview_On_Location_13_179.jpg +328 172 248 388 +678 300 146 208 +12 246 148 294 +# 13--Interview/13_Interview_Interview_Sequences_13_764.jpg +418 100 202 286 +# 13--Interview/13_Interview_Interview_Sequences_13_121.jpg +418 144 244 350 +# 13--Interview/13_Interview_Interview_Sequences_13_5.jpg +105 286 39 62 +484 40 84 100 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_668.jpg +291 470 10 14 +88 686 8 16 +128 716 9 15 +415 474 11 14 +533 472 6 14 +598 471 15 20 +708 477 10 19 +937 454 17 23 +713 628 18 21 +28 554 12 14 +# 13--Interview/13_Interview_Interview_Sequences_13_347.jpg +334 160 80 118 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_254.jpg +188 192 140 108 +367 123 100 174 +188 19 61 84 +441 30 105 143 +605 0 105 81 +729 210 51 81 +# 13--Interview/13_Interview_Interview_Sequences_13_373.jpg +488 190 76 90 +# 13--Interview/13_Interview_Interview_Sequences_13_37.jpg +250 184 82 98 +496 172 70 82 +792 198 68 66 +# 13--Interview/13_Interview_Interview_On_Location_13_847.jpg +148 120 84 130 +410 56 68 122 +634 144 68 106 +850 100 58 126 +# 13--Interview/13_Interview_Interview_Sequences_13_868.jpg +287 88 34 45 +404 293 11 14 +578 267 12 11 +735 549 7 10 +930 549 10 10 +779 252 10 7 +# 13--Interview/13_Interview_Interview_On_Location_13_426.jpg +306 294 82 108 +# 13--Interview/13_Interview_Interview_Sequences_13_936.jpg +349 86 20 33 +478 153 29 34 +574 196 22 29 +668 110 20 39 +# 13--Interview/13_Interview_Interview_Sequences_13_40.jpg +392 156 64 94 +580 158 70 122 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_1001.jpg +125 366 15 24 +189 353 15 24 +210 369 13 22 +220 149 12 16 +295 143 15 20 +296 360 16 22 +377 150 13 17 +463 141 13 18 +475 350 17 29 +578 340 13 28 +627 324 17 27 +660 336 14 22 +724 344 15 20 +815 316 22 32 +958 334 15 20 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_217.jpg +336 120 282 426 +# 13--Interview/13_Interview_Interview_Sequences_13_456.jpg +220 6 576 646 +# 13--Interview/13_Interview_Interview_On_Location_13_247.jpg +368 104 204 262 +# 13--Interview/13_Interview_Interview_2_People_Visible_13_252.jpg +31 28 52 68 +226 21 31 28 +435 15 24 27 +620 36 61 77 +870 15 65 84 +82 288 30 33 +277 295 30 33 +503 285 24 26 +806 276 31 41 +110 556 46 62 +324 560 47 57 +577 511 33 41 +863 525 34 38 +# 14--Traffic/14_Traffic_Traffic_14_834.jpg +684 421 19 23 +622 349 4 6 +602 353 4 7 +577 350 7 7 +696 352 5 6 +714 351 5 6 +733 350 5 7 +723 348 5 6 +340 352 6 7 +390 352 5 6 +378 352 4 5 +355 349 4 6 +73 362 7 8 +91 361 5 6 +124 353 6 7 +52 365 6 7 +212 360 4 6 +639 66 13 16 +598 126 9 9 +952 341 9 11 +868 344 5 7 +149 350 7 13 +797 408 12 14 +# 14--Traffic/14_Traffic_Traffic_14_380.jpg +930 297 64 80 +894 212 46 53 +724 231 40 55 +648 200 33 34 +537 193 34 43 +555 162 26 25 +770 121 13 20 +494 193 21 26 +454 161 17 19 +399 177 25 31 +294 237 88 86 +226 168 22 22 +190 163 24 24 +116 164 27 30 +48 171 30 27 +0 154 19 21 +# 14--Traffic/14_Traffic_Traffic_14_361.jpg +844 303 15 14 +635 326 3 4 +670 330 3 5 +681 328 4 6 +# 14--Traffic/14_Traffic_Traffic_14_722.jpg +248 82 15 15 +436 112 5 8 +# 14--Traffic/14_Traffic_Traffic_14_170.jpg +371 738 6 7 +429 697 7 9 +530 702 7 8 +499 645 8 9 +515 623 6 7 +418 644 6 6 +412 647 5 6 +331 681 8 9 +419 622 7 8 +412 622 5 6 +351 608 6 7 +304 600 6 8 +246 563 7 7 +253 530 5 7 +314 537 6 7 +377 511 4 7 +376 559 6 7 +369 481 6 5 +330 504 5 5 +343 482 5 4 +350 484 5 6 +321 480 5 5 +250 445 5 5 +175 437 6 6 +211 435 5 5 +268 459 6 5 +292 469 5 9 +302 447 5 6 +291 448 4 6 +309 476 5 6 +314 471 4 6 +309 465 4 6 +312 466 4 6 +320 465 5 6 +365 426 4 5 +371 434 5 6 +327 412 5 5 +317 409 5 4 +322 407 4 4 +324 402 4 5 +343 399 4 4 +337 394 3 4 +315 392 4 4 +312 398 5 4 +289 339 4 4 +293 334 5 4 +246 337 4 5 +242 318 4 4 +254 412 5 5 +259 406 3 5 +209 394 5 4 +205 394 4 5 +155 382 5 6 +201 380 4 4 +239 377 4 4 +245 382 5 5 +250 393 4 5 +216 365 4 5 +219 361 5 6 +192 379 4 5 +196 370 5 5 +181 363 6 5 +212 353 4 5 +216 338 5 5 +216 330 3 4 +181 348 4 5 +175 342 6 5 +182 338 6 6 +533 533 6 9 +474 535 7 10 +# 14--Traffic/14_Traffic_Traffic_14_654.jpg +448 352 96 115 +# 14--Traffic/14_Traffic_Traffic_14_675.jpg +570 349 107 139 +# 14--Traffic/14_Traffic_Traffic_14_840.jpg +960 264 25 30 +924 270 33 35 +864 265 24 26 +807 285 29 30 +773 268 26 29 +751 259 21 26 +703 277 30 35 +710 237 22 29 +742 223 19 21 +793 214 17 20 +886 238 20 22 +840 257 22 27 +1014 222 10 22 +979 212 14 21 +977 187 9 12 +936 167 15 17 +890 172 17 21 +708 214 14 16 +670 268 25 28 +1012 242 11 19 +864 197 11 14 +662 244 25 26 +663 185 15 17 +608 287 29 33 +579 264 28 28 +629 244 22 24 +610 238 18 21 +598 221 19 21 +625 211 15 16 +492 276 31 35 +469 241 26 29 +532 223 18 21 +574 204 15 18 +518 208 17 19 +496 186 11 14 +608 157 7 8 +598 154 7 10 +612 180 8 9 +633 191 11 15 +383 298 26 30 +408 230 19 29 +495 227 17 23 +276 276 33 34 +315 254 25 28 +409 188 9 12 +145 278 32 39 +19 287 34 35 +51 252 30 35 +92 256 27 30 +134 240 23 28 +189 266 25 30 +210 252 24 31 +260 240 27 29 +287 214 17 20 +322 199 13 17 +52 201 16 17 +27 210 14 17 +0 198 12 17 +102 167 16 20 +471 156 6 8 +266 196 15 20 +195 230 22 25 +815 186 8 12 +754 149 11 13 +835 200 12 10 +538 153 8 10 +526 152 8 9 +428 150 6 6 +505 148 7 10 +135 198 14 16 +193 183 9 12 +30 191 8 10 +339 245 20 20 +227 203 17 17 +357 209 15 15 +376 192 12 10 +618 200 10 16 +# 14--Traffic/14_Traffic_Traffic_14_728.jpg +891 75 32 53 +866 138 22 26 +306 79 43 61 +2 106 38 47 +131 145 9 9 +155 203 26 34 +724 84 18 39 +# 14--Traffic/14_Traffic_Traffic_14_713.jpg +576 118 114 120 +222 386 94 124 +# 14--Traffic/14_Traffic_Traffic_14_850.jpg +594 144 86 122 +# 14--Traffic/14_Traffic_Traffic_14_55.jpg +207 467 15 14 +171 418 13 15 +240 401 12 12 +303 283 12 14 +571 352 11 13 +628 286 9 9 +550 217 10 12 +486 227 10 9 +163 172 10 10 +106 192 8 6 +441 171 8 11 +378 185 7 8 +411 165 9 11 +359 122 9 9 +296 134 9 8 +946 346 13 15 +889 223 11 13 +843 227 11 13 +826 134 12 15 +681 92 8 11 +629 100 12 14 +860 31 8 8 +168 227 9 11 +173 29 9 7 +87 70 6 7 +367 29 7 7 +572 25 6 8 +624 16 8 8 +584 74 9 11 +524 81 9 9 +# 14--Traffic/14_Traffic_Traffic_14_677.jpg +124 196 150 242 +238 38 178 262 +408 80 196 294 +612 204 212 280 +718 460 186 268 +# 14--Traffic/14_Traffic_Traffic_14_443.jpg +662 153 12 15 +510 132 12 16 +# 14--Traffic/14_Traffic_Traffic_14_504.jpg +784 245 33 42 +348 110 49 64 +988 304 10 9 +# 14--Traffic/14_Traffic_Traffic_14_644.jpg +588 388 19 24 +644 164 14 21 +523 158 15 20 +408 155 15 20 +654 99 14 18 +473 85 14 19 +336 107 16 17 +806 150 15 18 +440 32 14 18 +353 26 12 15 +# 14--Traffic/14_Traffic_Traffic_14_253.jpg +318 120 104 120 +# 14--Traffic/14_Traffic_Traffic_14_505.jpg +624 563 82 116 +115 190 19 23 +647 265 20 24 +623 213 11 14 +659 192 7 10 +732 191 7 11 +596 238 11 16 +910 176 8 11 +985 183 8 12 +994 168 7 11 +926 191 7 10 +82 271 13 20 +187 271 14 12 +506 211 10 12 +428 197 6 9 +436 196 6 9 +358 203 8 8 +151 358 14 38 +613 196 9 10 +# 14--Traffic/14_Traffic_Traffic_14_267.jpg +230 479 17 21 +168 461 18 20 +153 438 18 20 +74 447 17 20 +75 393 9 11 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_569.jpg +0 254 84 251 +49 342 100 129 +116 373 53 80 +182 439 55 65 +230 373 46 57 +320 438 38 38 +362 458 18 23 +403 435 18 24 +391 470 22 29 +468 464 16 20 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_460.jpg +775 73 62 73 +579 112 51 73 +449 143 52 64 +334 120 56 74 +213 43 57 76 +145 127 60 74 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_542.jpg +878 254 47 53 +764 237 44 45 +618 285 34 39 +482 300 36 45 +382 270 34 45 +284 316 35 44 +223 275 31 39 +141 280 34 38 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_286.jpg +689 133 92 115 +623 211 59 82 +488 245 40 48 +397 261 28 37 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_731.jpg +494 314 270 388 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_706.jpg +332 180 166 230 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_483.jpg +510 354 171 498 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_241.jpg +138 346 58 84 +352 364 54 74 +562 380 62 80 +698 326 54 94 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_303.jpg +652 102 302 448 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_676.jpg +0 52 354 658 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_102.jpg +286 386 66 122 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_846.jpg +277 217 431 687 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_781.jpg +374 54 186 306 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_554.jpg +485 200 104 114 +217 73 157 208 +339 109 96 132 +0 175 69 94 +678 217 38 52 +667 150 13 14 +114 182 38 51 +738 255 23 27 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_382.jpg +290 88 104 168 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_751.jpg +260 104 160 226 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_313.jpg +279 115 48 51 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_526.jpg +594 104 110 164 +# 15--Stock_Market/15_Stock_Market_Stock_Market_15_301.jpg +186 98 106 160 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_752.jpg +136 114 64 112 +360 144 68 88 +684 162 64 90 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_239.jpg +566 106 110 154 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_195.jpg +240 28 37 66 +304 105 38 51 +457 182 30 48 +536 214 26 37 +600 236 23 38 +672 270 15 29 +869 278 13 23 +681 297 19 23 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_25.jpg +60 109 45 51 +150 93 40 58 +227 116 37 50 +332 154 43 48 +440 119 35 46 +504 97 33 43 +562 101 40 56 +648 135 41 50 +742 143 40 58 +842 137 39 44 +932 151 38 60 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_589.jpg +456 91 103 147 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_305.jpg +387 264 96 156 +592 279 90 132 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_84.jpg +434 165 250 356 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_422.jpg +291 159 381 510 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_474.jpg +351 212 357 484 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_467.jpg +313 307 376 539 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_361.jpg +465 82 112 204 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_569.jpg +0 31 39 64 +88 23 71 86 +163 88 28 54 +191 35 41 56 +261 16 69 93 +336 81 59 57 +445 9 65 77 +564 149 141 170 +772 38 120 191 +830 98 117 197 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_94.jpg +144 292 72 94 +386 340 68 96 +560 350 72 98 +782 384 64 100 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_392.jpg +474 222 189 276 +174 456 93 117 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_482.jpg +425 171 270 364 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_4.jpg +15 252 12 16 +42 261 8 13 +79 344 17 29 +304 297 10 20 +370 302 15 26 +384 281 9 13 +453 296 10 17 +539 234 4 8 +150 277 6 8 +294 271 7 7 +287 272 5 6 +271 276 4 4 +393 267 4 5 +324 270 3 4 +343 268 4 4 +332 269 5 5 +405 267 5 4 +630 264 7 12 +681 262 8 10 +827 262 7 11 +910 268 9 13 +863 282 12 21 +947 269 6 15 +957 267 10 16 +199 267 4 5 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_134.jpg +426 211 156 211 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_512.jpg +358 120 235 358 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_495.jpg +385 151 209 299 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_85.jpg +165 180 54 78 +271 235 51 66 +409 227 49 73 +523 229 52 76 +681 229 58 78 +823 245 62 88 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_750.jpg +370 100 204 318 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_56.jpg +332 90 64 94 +770 74 60 110 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_309.jpg +274 172 274 385 +890 449 131 228 +721 312 111 207 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_124.jpg +442 67 260 404 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_226.jpg +202 92 58 79 +605 206 30 35 +751 373 25 36 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_135.jpg +351 81 270 378 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_490.jpg +337 179 353 473 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_317.jpg +217 105 37 50 +600 138 28 40 +844 117 29 44 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_231.jpg +298 136 14 18 +903 157 17 27 +747 220 26 28 +430 152 125 148 +556 108 121 157 +35 145 46 48 +126 146 58 59 +713 139 16 20 +794 147 15 19 +738 159 16 28 +719 220 25 29 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_143.jpg +258 223 427 629 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_338.jpg +361 136 268 381 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_116.jpg +392 163 130 190 +548 117 133 184 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_346.jpg +220 22 70 108 +652 60 200 308 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_447.jpg +331 187 301 404 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_591.jpg +369 220 306 438 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_637.jpg +396 65 244 394 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_141.jpg +0 230 30 59 +25 244 28 60 +89 261 38 51 +138 260 38 55 +186 281 37 44 +348 250 37 55 +393 242 41 51 +431 258 29 58 +447 254 25 59 +465 258 28 48 +497 279 18 42 +511 286 17 39 +545 300 23 35 +570 311 20 36 +607 317 18 31 +639 322 17 29 +648 320 12 29 +32 303 26 45 +272 46 55 76 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_64.jpg +428 182 232 339 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_524.jpg +438 284 102 138 +848 314 86 140 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_59.jpg +402 159 183 297 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_270.jpg +434 72 256 458 +214 876 78 139 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_311.jpg +390 106 191 281 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_566.jpg +270 219 366 501 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_73.jpg +259 118 37 51 +410 144 36 52 +315 77 36 36 +203 52 19 28 +267 77 16 22 +239 86 15 16 +224 67 16 19 +240 58 15 19 +253 67 13 17 +184 69 16 21 +169 57 11 18 +127 95 13 20 +147 71 12 17 +141 52 13 16 +90 73 19 22 +6 79 15 21 +364 48 17 22 +392 42 16 18 +414 63 13 26 +431 81 13 19 +444 51 16 22 +462 56 14 20 +489 61 18 21 +533 64 13 16 +537 44 16 22 +472 29 14 18 +491 24 14 14 +344 38 13 15 +557 32 11 17 +565 26 16 22 +568 76 16 20 +594 79 13 16 +640 78 16 20 +624 72 14 19 +610 62 13 19 +570 51 14 16 +587 51 11 14 +556 56 11 18 +530 40 12 15 +601 21 16 20 +616 29 15 20 +638 44 13 19 +656 67 13 14 +654 21 15 20 +686 23 15 20 +701 32 14 17 +701 1 15 19 +727 91 41 51 +614 146 40 51 +771 1 14 15 +758 1 10 7 +988 34 15 20 +990 2 15 15 +863 37 17 19 +851 20 16 18 +812 31 13 19 +791 36 11 17 +972 19 13 19 +874 18 13 13 +889 33 13 19 +917 34 13 19 +750 42 18 20 +22 76 16 23 +656 39 13 15 +699 44 13 17 +721 14 16 17 +721 43 17 21 +# 16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_546.jpg +185 66 779 1076 +# 17--Ceremony/17_Ceremony_Ceremony_17_1007.jpg +243 372 174 237 +700 447 195 264 +# 17--Ceremony/17_Ceremony_Ceremony_17_469.jpg +265 347 38 28 +761 286 39 52 +798 298 34 57 +# 17--Ceremony/17_Ceremony_Ceremony_17_592.jpg +3 371 18 22 +56 308 21 27 +159 258 32 34 +194 337 21 23 +300 283 17 22 +277 334 11 18 +371 202 32 46 +509 288 16 17 +550 301 14 20 +591 321 12 16 +707 350 12 14 +727 319 10 15 +790 341 11 15 +# 17--Ceremony/17_Ceremony_Ceremony_17_368.jpg +344 149 83 103 +435 158 25 29 +576 96 106 125 +707 238 81 75 +135 13 22 30 +# 17--Ceremony/17_Ceremony_Ceremony_17_765.jpg +80 304 11 16 +137 307 10 14 +186 314 9 15 +239 306 11 13 +325 296 11 15 +374 288 12 17 +398 290 12 16 +454 283 11 18 +557 277 13 17 +668 289 14 17 +716 286 12 17 +796 287 14 20 +830 311 13 19 +876 298 14 20 +926 298 12 18 +86 468 41 61 +377 393 20 43 +# 17--Ceremony/17_Ceremony_Ceremony_17_344.jpg +642 258 70 140 +932 372 82 108 +596 52 98 110 +518 110 64 76 +324 514 54 38 +# 17--Ceremony/17_Ceremony_Ceremony_17_253.jpg +225 546 7 16 +230 531 10 13 +237 519 10 11 +255 499 9 15 +273 487 8 12 +292 474 9 11 +323 460 7 12 +344 448 8 9 +110 509 11 15 +117 492 7 13 +126 477 8 11 +141 466 8 13 +158 454 8 12 +177 439 9 14 +195 425 8 13 +239 412 8 11 +262 401 9 11 +287 396 8 11 +79 470 8 12 +93 453 8 11 +109 446 9 12 +140 430 10 12 +150 423 7 11 +193 400 9 11 +221 393 7 9 +245 382 8 8 +269 376 9 12 +311 387 8 8 +339 375 8 10 +322 363 8 9 +299 367 7 10 +77 410 9 13 +101 406 7 10 +153 382 7 11 +182 377 7 11 +200 367 10 11 +229 359 9 9 +261 351 9 11 +316 346 6 8 +151 359 5 8 +127 370 9 11 +102 381 9 13 +78 387 7 9 +59 397 7 11 +343 338 7 9 +282 345 8 10 +371 378 9 11 +367 443 9 9 +556 315 9 10 +569 313 9 9 +568 337 9 11 +549 352 9 10 +537 339 8 10 +581 350 8 12 +601 339 7 10 +616 351 9 12 +611 309 9 11 +628 330 9 12 +644 346 11 12 +641 312 8 10 +672 313 7 9 +690 333 8 13 +700 307 9 16 +733 309 8 13 +721 339 8 10 +775 387 9 10 +771 356 9 11 +800 357 9 12 +825 360 9 12 +816 344 7 13 +784 344 9 11 +757 300 11 11 +788 304 8 10 +822 306 10 10 +850 306 10 13 +859 325 8 11 +847 347 7 11 +862 367 8 13 +877 358 8 12 +891 377 8 11 +904 360 9 12 +919 381 9 13 +937 365 10 12 +743 382 11 13 +761 441 7 10 +820 453 9 12 +805 390 9 11 +834 426 7 10 +882 435 9 11 +866 458 12 13 +704 346 11 13 +662 329 8 12 +677 346 11 15 +38 723 15 16 +17 719 14 22 +55 427 8 10 +# 17--Ceremony/17_Ceremony_Ceremony_17_234.jpg +260 48 66 92 +556 96 72 84 +108 390 76 86 +872 452 60 100 +# 17--Ceremony/17_Ceremony_Ceremony_17_588.jpg +96 178 35 49 +263 215 25 40 +193 342 28 42 +312 355 21 38 +437 235 19 35 +489 275 22 28 +574 219 21 37 +747 198 27 38 +952 174 40 46 +# 17--Ceremony/17_Ceremony_Ceremony_17_211.jpg +586 370 44 54 +838 157 54 63 +769 206 42 59 +685 223 42 49 +592 238 38 48 +553 233 34 45 +218 210 46 52 +135 203 29 61 +409 103 30 68 +# 17--Ceremony/17_Ceremony_Ceremony_17_1048.jpg +25 421 39 46 +129 400 34 40 +334 139 93 136 +467 95 108 147 +# 17--Ceremony/17_Ceremony_Ceremony_17_444.jpg +332 263 341 461 +# 17--Ceremony/17_Ceremony_Ceremony_17_452.jpg +221 378 13 26 +312 369 20 29 +309 336 14 20 +843 386 12 22 +932 391 16 22 +976 390 16 24 +# 17--Ceremony/17_Ceremony_Ceremony_17_57.jpg +729 125 40 43 +909 167 39 43 +577 160 37 35 +424 122 37 41 +286 102 41 48 +127 117 41 42 +# 17--Ceremony/17_Ceremony_Ceremony_17_944.jpg +471 427 12 16 +495 433 11 14 +518 426 9 15 +151 465 10 16 +173 472 11 13 +199 466 10 15 +233 460 10 15 +276 474 9 16 +0 525 15 30 +758 452 11 16 +813 458 14 19 +842 459 10 16 +881 524 11 21 +954 550 19 43 +326 505 9 15 +637 446 7 18 +509 260 7 9 +# 17--Ceremony/17_Ceremony_Ceremony_17_1037.jpg +461 180 199 242 +# 17--Ceremony/17_Ceremony_Ceremony_17_271.jpg +257 382 15 28 +516 378 21 34 +571 380 19 26 +597 372 22 33 +733 372 19 30 +58 448 9 14 +# 17--Ceremony/17_Ceremony_Ceremony_17_735.jpg +340 178 446 642 +# 17--Ceremony/17_Ceremony_Ceremony_17_300.jpg +734 139 42 50 +600 152 41 52 +427 137 44 57 +314 155 43 53 +188 150 43 60 +# 17--Ceremony/17_Ceremony_Ceremony_17_490.jpg +48 3 54 56 +209 2 54 75 +388 14 49 72 +485 18 35 62 +289 262 64 78 +517 227 70 75 +888 186 102 129 +# 17--Ceremony/17_Ceremony_Ceremony_17_972.jpg +399 202 127 169 +487 547 171 239 +# 17--Ceremony/17_Ceremony_Ceremony_17_415.jpg +338 909 107 118 +650 926 79 101 +762 138 180 256 +444 118 219 253 +65 127 194 250 +# 17--Ceremony/17_Ceremony_Ceremony_17_406.jpg +374 252 58 104 +712 382 52 98 +# 17--Ceremony/17_Ceremony_Ceremony_17_668.jpg +236 104 128 190 +662 208 102 128 +# 17--Ceremony/17_Ceremony_Ceremony_17_218.jpg +54 313 35 43 +183 311 31 41 +292 318 29 41 +369 364 31 34 +642 476 43 54 +763 463 40 57 +891 507 45 48 +1008 546 16 42 +905 663 85 82 +513 324 27 39 +562 330 27 35 +640 325 30 33 +724 300 24 36 +822 319 30 44 +925 351 30 37 +997 360 27 45 +10 365 36 45 +155 365 34 42 +253 384 33 41 +364 410 37 43 +492 401 31 40 +549 436 31 36 +763 364 26 32 +870 375 29 37 +704 427 33 37 +632 442 29 35 +855 421 34 41 +962 422 35 39 +938 487 38 40 +804 464 25 38 +38 419 35 41 +66 456 44 53 +198 429 41 56 +45 516 42 51 +134 549 42 53 +248 501 42 55 +325 480 37 48 +389 527 42 53 +462 459 34 38 +512 521 48 61 +# 17--Ceremony/17_Ceremony_Ceremony_17_1005.jpg +323 239 201 279 +546 142 56 71 +674 189 57 67 +789 175 50 77 +828 116 53 67 +863 186 53 68 +960 181 60 73 +437 103 31 39 +# 17--Ceremony/17_Ceremony_Ceremony_17_113.jpg +611 161 117 167 +355 210 113 134 +826 304 30 42 +763 244 19 43 +530 284 18 38 +953 290 17 27 +741 272 20 37 +851 270 20 22 +249 308 14 28 +984 318 10 27 +1013 283 11 20 +816 294 13 19 +568 297 10 15 +# 17--Ceremony/17_Ceremony_Ceremony_17_171.jpg +929 380 95 148 +645 298 102 144 +520 318 93 118 +395 301 88 106 +254 245 70 84 +354 268 71 91 +197 231 61 83 +151 224 45 76 +23 219 62 76 +9 130 37 47 +92 122 33 48 +151 50 35 41 +247 148 42 50 +301 149 43 57 +331 139 56 67 +455 134 50 70 +475 157 57 81 +631 157 58 65 +692 188 53 64 +854 239 72 90 +957 153 66 114 +268 53 34 48 +292 30 27 44 +517 82 41 50 +669 73 37 49 +722 72 47 65 +765 49 42 75 +525 21 34 38 +579 2 33 39 +750 217 71 83 +# 17--Ceremony/17_Ceremony_Ceremony_17_1009.jpg +174 186 104 132 +# 17--Ceremony/17_Ceremony_Ceremony_17_227.jpg +574 462 12 14 +957 466 12 14 +908 450 11 14 +311 491 11 14 +166 500 11 15 +262 495 11 15 +115 498 12 15 +414 644 7 11 +822 610 11 12 +# 17--Ceremony/17_Ceremony_Ceremony_17_852.jpg +407 243 28 43 +459 240 38 43 +507 214 26 47 +850 238 29 45 +# 17--Ceremony/17_Ceremony_Ceremony_17_470.jpg +80 257 44 46 +22 295 32 36 +143 270 15 20 +229 230 41 46 +350 170 53 78 +482 252 47 60 +579 282 50 61 +640 299 27 29 +680 291 30 35 +714 267 34 42 +726 213 51 59 +939 268 54 71 +893 364 19 20 +# 17--Ceremony/17_Ceremony_Ceremony_17_418.jpg +11 116 24 32 +21 144 30 42 +87 133 27 36 +61 93 14 22 +145 163 27 36 +130 125 20 28 +169 108 21 31 +202 116 25 36 +250 103 21 29 +274 134 23 26 +299 143 19 26 +117 87 13 17 +72 76 14 17 +81 64 10 18 +86 118 25 27 +53 118 9 15 +124 66 16 20 +183 80 15 19 +154 93 14 16 +298 81 13 18 +353 142 26 33 +363 124 18 23 +316 112 19 26 +379 114 17 24 +404 99 14 20 +468 121 18 25 +508 115 23 29 +521 123 16 25 +563 124 13 19 +533 81 12 18 +581 103 13 15 +586 128 16 25 +556 174 27 39 +501 105 15 19 +626 131 17 24 +659 123 19 26 +685 135 17 22 +701 125 19 28 +701 150 25 32 +714 167 25 35 +765 129 18 26 +785 142 20 25 +784 192 25 38 +858 107 12 17 +867 139 17 23 +891 125 13 18 +904 130 14 14 +939 127 12 15 +965 122 13 20 +933 155 13 16 +984 148 17 23 +1003 148 15 19 +1000 170 16 22 +1015 165 9 22 +947 196 18 23 +953 210 20 25 +967 221 20 23 +991 206 15 20 +927 246 22 38 +969 151 10 16 +785 289 28 39 +# 17--Ceremony/17_Ceremony_Ceremony_17_46.jpg +266 141 17 25 +110 119 18 28 +65 130 18 26 +# 17--Ceremony/17_Ceremony_Ceremony_17_818.jpg +678 201 97 148 +47 330 82 101 +276 173 107 179 +# 17--Ceremony/17_Ceremony_Ceremony_17_782.jpg +634 86 136 174 +262 90 100 164 +# 17--Ceremony/17_Ceremony_Ceremony_17_220.jpg +582 290 10 13 +486 299 10 14 +427 313 9 12 +377 304 11 12 +652 285 9 14 +165 378 9 19 +56 372 10 15 +# 17--Ceremony/17_Ceremony_Ceremony_17_803.jpg +16 27 83 298 +317 22 109 134 +647 84 95 119 +89 109 16 20 +91 139 26 30 +161 69 9 13 +122 124 18 23 +150 124 16 18 +171 142 18 23 +208 127 14 16 +204 109 13 14 +162 104 15 18 +221 117 12 17 +228 106 12 15 +417 114 13 18 +444 116 11 17 +480 120 14 18 +490 128 18 21 +506 115 13 16 +527 115 13 15 +557 130 13 18 +569 115 14 17 +573 138 15 20 +541 153 23 31 +597 123 16 19 +605 144 21 27 +630 150 21 27 +942 175 24 29 +627 109 13 18 +194 128 14 21 +112 145 15 19 +# 18--Concerts/18_Concerts_Concerts_18_602.jpg +937 372 38 55 +896 477 42 62 +870 371 22 26 +837 340 17 19 +992 345 15 15 +928 339 10 19 +739 377 19 21 +772 334 16 21 +710 341 17 19 +682 354 11 18 +605 339 13 12 +566 335 11 13 +554 385 33 43 +511 393 21 33 +480 399 28 35 +471 361 20 26 +520 339 16 19 +519 323 13 18 +668 341 14 15 +653 348 14 14 +596 358 15 18 +407 393 18 25 +368 390 28 44 +346 380 17 25 +437 330 14 17 +452 347 16 20 +614 268 10 12 +483 262 11 12 +408 290 13 14 +399 327 11 15 +393 298 12 12 +543 279 9 13 +308 385 26 38 +249 304 21 26 +233 298 14 24 +262 260 12 18 +300 257 13 14 +335 270 10 14 +370 273 10 15 +332 331 14 19 +211 274 11 16 +189 263 11 17 +170 269 11 14 +142 256 13 21 +108 258 13 15 +192 367 16 17 +50 264 11 17 +39 345 16 23 +458 497 27 31 +327 505 36 55 +297 531 38 55 +259 537 26 42 +166 402 26 35 +142 407 20 27 +131 395 17 24 +78 419 22 31 +51 380 19 30 +22 340 14 18 +579 259 13 17 +# 18--Concerts/18_Concerts_Concerts_18_381.jpg +324 76 124 156 +# 18--Concerts/18_Concerts_Concerts_18_27.jpg +365 99 449 671 +# 18--Concerts/18_Concerts_Concerts_18_1038.jpg +626 142 82 102 +536 88 76 88 +450 156 84 110 +276 162 86 118 +288 632 100 148 +90 660 102 158 +462 606 100 124 +632 572 94 108 +# 18--Concerts/18_Concerts_Concerts_18_313.jpg +126 60 92 126 +312 44 96 114 +488 98 186 198 +748 46 80 90 +912 94 100 128 +# 18--Concerts/18_Concerts_Concerts_18_151.jpg +998 331 18 25 +981 271 14 26 +1010 205 14 19 +871 257 20 25 +897 204 20 24 +954 151 17 17 +986 63 16 18 +851 65 17 19 +872 149 18 23 +783 147 18 23 +784 202 19 22 +852 330 17 24 +889 439 13 21 +867 415 16 22 +742 415 17 24 +718 441 10 20 +755 263 18 22 +727 321 20 25 +670 258 17 22 +671 209 17 24 +694 145 17 24 +720 59 16 23 +592 70 17 19 +588 145 17 23 +560 204 18 20 +504 139 19 22 +465 68 17 23 +399 149 17 23 +474 202 18 29 +336 207 16 17 +346 82 15 18 +591 328 18 21 +581 269 18 19 +471 264 18 19 +504 343 13 19 +472 321 18 23 +331 264 19 22 +362 337 19 22 +305 398 19 24 +226 328 21 25 +208 268 19 22 +122 333 18 23 +88 287 15 17 +153 403 17 20 +94 427 16 25 +265 431 13 24 +204 92 14 18 +216 159 13 10 +131 86 18 22 +105 56 15 16 +103 109 14 15 +70 100 15 15 +64 75 15 18 +34 80 14 19 +14 80 14 16 +19 98 13 16 +12 112 8 16 +55 8 14 17 +79 118 12 14 +9 351 19 24 +163 623 8 18 +# 18--Concerts/18_Concerts_Concerts_18_528.jpg +458 120 78 102 +# 18--Concerts/18_Concerts_Concerts_18_104.jpg +560 393 14 19 +935 449 15 22 +845 423 14 17 +949 542 12 17 +# 18--Concerts/18_Concerts_Concerts_18_855.jpg +98 148 70 96 +370 96 52 84 +432 124 78 122 +824 196 58 100 +738 122 74 108 +# 18--Concerts/18_Concerts_Concerts_18_670.jpg +810 319 28 22 +296 181 30 31 +691 132 11 14 +785 114 11 19 +# 18--Concerts/18_Concerts_Concerts_18_469.jpg +848 241 41 45 +662 240 40 62 +477 262 39 59 +9 298 40 56 +# 18--Concerts/18_Concerts_Concerts_18_349.jpg +537 69 117 168 +# 18--Concerts/18_Concerts_Concerts_18_522.jpg +498 669 73 87 +630 660 64 92 +378 602 56 84 +87 646 59 95 +898 602 62 106 +# 18--Concerts/18_Concerts_Concerts_18_612.jpg +192 306 379 603 +# 18--Concerts/18_Concerts_Concerts_18_60.jpg +718 403 15 27 +707 477 26 30 +502 397 23 41 +256 379 19 28 +93 361 18 25 +806 536 9 11 +975 621 15 17 +# 18--Concerts/18_Concerts_Concerts_18_815.jpg +314 12 162 232 +# 18--Concerts/18_Concerts_Concerts_18_252.jpg +491 184 136 181 +# 18--Concerts/18_Concerts_Concerts_18_1013.jpg +462 144 108 156 +# 18--Concerts/18_Concerts_Concerts_18_38.jpg +758 394 30 42 +726 316 17 25 +806 276 21 27 +893 353 49 57 +733 287 15 19 +594 300 21 29 +894 275 15 19 +882 246 8 11 +798 235 10 10 +839 219 7 7 +943 221 9 9 +712 257 12 14 +744 242 11 13 +644 186 20 29 +531 269 24 31 +561 347 21 32 +569 198 9 11 +530 215 14 15 +508 215 11 11 +457 237 12 15 +431 196 10 12 +478 287 10 10 +488 309 13 16 +364 188 14 13 +279 192 18 19 +308 207 15 19 +279 211 31 49 +148 325 23 30 +394 505 50 54 +416 446 50 43 +65 417 36 41 +96 391 25 40 +0 601 52 85 +286 526 51 79 +544 469 59 98 +576 174 7 7 +847 251 6 7 +772 210 6 6 +229 291 20 28 +814 230 8 12 +# 18--Concerts/18_Concerts_Concerts_18_257.jpg +462 50 86 132 +# 18--Concerts/18_Concerts_Concerts_18_258.jpg +155 289 66 142 +784 351 50 61 +898 77 36 47 +792 35 28 30 +86 50 31 44 +# 18--Concerts/18_Concerts_Concerts_18_486.jpg +277 196 226 262 +# 18--Concerts/18_Concerts_Concerts_18_1016.jpg +193 170 140 176 +# 18--Concerts/18_Concerts_Concerts_18_706.jpg +469 173 216 288 +# 18--Concerts/18_Concerts_Concerts_18_447.jpg +353 483 37 45 +344 368 45 48 +212 385 38 40 +246 524 35 39 +119 547 40 39 +88 379 35 48 +399 189 41 42 +244 194 33 23 +70 196 48 55 +901 120 54 55 +# 18--Concerts/18_Concerts_Concerts_18_536.jpg +502 307 28 38 +# 18--Concerts/18_Concerts_Concerts_18_872.jpg +518 107 93 145 +# 18--Concerts/18_Concerts_Concerts_18_665.jpg +519 154 233 334 +# 18--Concerts/18_Concerts_Concerts_18_251.jpg +392 291 229 310 +# 18--Concerts/18_Concerts_Concerts_18_910.jpg +273 254 497 656 +# 18--Concerts/18_Concerts_Concerts_18_1015.jpg +824 598 46 55 +618 625 51 55 +399 630 47 63 +155 628 51 62 +# 18--Concerts/18_Concerts_Concerts_18_657.jpg +740 282 152 190 +# 18--Concerts/18_Concerts_Concerts_18_853.jpg +910 228 48 44 +753 215 49 52 +743 133 42 46 +598 249 49 46 +314 385 23 20 +245 382 18 20 +179 382 21 23 +103 378 20 23 +# 18--Concerts/18_Concerts_Concerts_18_555.jpg +321 93 339 465 +# 18--Concerts/18_Concerts_Concerts_18_1004.jpg +318 54 250 308 +# 18--Concerts/18_Concerts_Concerts_18_127.jpg +914 198 38 65 +944 191 42 57 +850 207 24 37 +805 185 41 65 +735 180 49 80 +664 165 51 75 +577 190 26 48 +528 230 55 60 +358 234 57 78 +449 237 39 72 +453 185 36 41 +306 181 41 48 +275 239 52 83 +243 131 36 52 +180 164 40 67 +146 276 71 82 +55 182 29 43 +13 192 41 63 +# 18--Concerts/18_Concerts_Concerts_18_655.jpg +234 150 344 434 +# 18--Concerts/18_Concerts_Concerts_18_504.jpg +776 469 19 28 +795 372 14 19 +722 313 16 18 +687 395 12 19 +969 294 13 11 +807 335 9 13 +791 330 11 13 +742 333 10 14 +848 312 9 11 +938 278 11 13 +970 272 10 10 +611 474 20 25 +521 613 37 45 +338 627 38 52 +464 363 11 11 +289 416 13 22 +215 404 17 23 +183 382 14 26 +171 375 12 13 +287 394 14 17 +324 352 9 10 +405 402 11 13 +400 390 12 16 +125 390 12 19 +88 407 7 9 +# 18--Concerts/18_Concerts_Concerts_18_402.jpg +313 197 472 663 +# 18--Concerts/18_Concerts_Concerts_18_433.jpg +608 140 194 280 +# 18--Concerts/18_Concerts_Concerts_18_66.jpg +890 350 47 59 +925 296 38 46 +971 484 53 63 +743 329 55 70 +809 270 38 50 +627 246 34 57 +640 182 16 22 +663 170 14 24 +664 154 11 15 +761 172 19 32 +827 218 18 31 +869 193 22 32 +639 424 63 71 +580 311 45 61 +508 179 19 31 +509 162 14 18 +595 164 12 19 +658 115 18 19 +480 200 23 32 +401 200 24 27 +435 256 38 53 +387 221 26 33 +443 108 17 20 +465 251 21 36 +303 296 29 40 +196 248 33 45 +225 307 49 66 +190 324 33 44 +228 203 22 25 +211 169 21 24 +289 158 15 21 +376 149 16 20 +360 186 19 26 +492 327 58 84 +441 362 61 96 +455 169 15 18 +170 201 22 29 +287 463 70 64 +75 319 51 61 +2 539 112 47 +102 257 29 31 +32 267 26 37 +21 216 26 24 +542 196 19 29 +# 18--Concerts/18_Concerts_Concerts_18_389.jpg +418 142 150 165 +# 18--Concerts/18_Concerts_Concerts_18_828.jpg +610 240 100 154 +# 18--Concerts/18_Concerts_Concerts_18_102.jpg +504 148 115 181 +# 18--Concerts/18_Concerts_Concerts_18_366.jpg +235 45 365 493 +# 18--Concerts/18_Concerts_Concerts_18_693.jpg +432 94 110 166 +# 18--Concerts/18_Concerts_Concerts_18_920.jpg +982 587 12 13 +894 610 12 15 +904 565 7 13 +928 564 11 12 +869 576 10 10 +824 574 10 14 +852 557 10 13 +950 525 8 11 +909 521 6 11 +818 530 7 10 +791 552 9 7 +769 624 11 13 +752 626 10 14 +758 570 9 13 +616 607 10 13 +665 563 6 12 +871 534 8 11 +279 506 16 22 +249 660 28 16 +590 565 6 8 +904 490 7 8 +868 526 8 10 +# 18--Concerts/18_Concerts_Concerts_18_656.jpg +68 14 152 184 +706 462 88 124 +# 18--Concerts/18_Concerts_Concerts_18_554.jpg +562 486 30 42 +# 18--Concerts/18_Concerts_Concerts_18_784.jpg +249 277 366 401 +# 18--Concerts/18_Concerts_Concerts_18_350.jpg +122 82 386 450 +# 18--Concerts/18_Concerts_Concerts_18_403.jpg +538 98 158 246 +# 18--Concerts/18_Concerts_Concerts_18_133.jpg +642 920 30 41 +460 970 32 33 +151 1013 32 45 +816 1007 37 42 +657 356 58 76 +321 424 65 58 +# 19--Couple/19_Couple_Couple_19_156.jpg +520 141 28 48 +551 130 38 47 +# 19--Couple/19_Couple_Couple_19_881.jpg +252 195 96 127 +585 234 81 125 +# 19--Couple/19_Couple_Couple_19_1014.jpg +691 225 198 261 +437 401 198 273 +# 19--Couple/19_Couple_Couple_19_88.jpg +582 190 132 180 +684 120 132 180 +# 19--Couple/19_Couple_Couple_19_631.jpg +662 216 142 210 +618 50 128 250 +# 19--Couple/19_Couple_Couple_19_810.jpg +458 335 20 26 +480 346 19 21 +# 19--Couple/19_Couple_Couple_19_836.jpg +194 110 68 104 +243 86 70 113 +653 50 88 149 +812 86 74 135 +119 455 106 155 +288 466 81 151 +646 432 104 185 +812 473 88 169 +124 839 110 178 +290 860 99 169 +# 19--Couple/19_Couple_Couple_19_325.jpg +316 248 162 166 +382 144 176 140 +# 19--Couple/19_Couple_Couple_19_106.jpg +306 122 60 82 +582 170 58 86 +# 19--Couple/19_Couple_Couple_19_90.jpg +333 733 123 207 +441 787 135 186 +# 19--Couple/19_Couple_Couple_19_910.jpg +0 201 302 526 +335 214 312 496 +704 286 72 98 +957 116 44 53 +# 19--Couple/19_Couple_Couple_19_688.jpg +75 290 275 374 +593 135 266 353 +# 19--Couple/19_Couple_Couple_19_936.jpg +766 320 86 112 +514 212 94 128 +# 19--Couple/19_Couple_Couple_19_24.jpg +182 160 180 202 +232 212 208 168 +# 19--Couple/19_Couple_Couple_19_319.jpg +487 581 33 55 +496 613 42 37 +# 19--Couple/19_Couple_Couple_19_254.jpg +606 42 108 160 +572 68 68 150 +# 19--Couple/19_Couple_Couple_19_86.jpg +222 288 336 369 +420 501 423 498 +# 19--Couple/19_Couple_Couple_19_847.jpg +509 490 165 270 +286 641 177 241 +# 19--Couple/19_Couple_Couple_19_873.jpg +78 140 357 505 +392 95 467 575 +# 19--Couple/19_Couple_Couple_19_349.jpg +414 108 142 216 +552 60 138 218 +# 19--Couple/19_Couple_Couple_19_770.jpg +624 386 60 80 +698 376 56 76 +# 19--Couple/19_Couple_Couple_19_50.jpg +588 148 70 94 +670 236 74 108 +# 19--Couple/19_Couple_Couple_19_125.jpg +334 86 214 334 +678 114 200 312 +38 174 186 276 +# 19--Couple/19_Couple_Couple_19_301.jpg +367 113 99 153 +582 214 97 129 +# 19--Couple/19_Couple_Couple_19_743.jpg +858 301 33 41 +801 298 36 42 +733 198 15 24 +759 214 25 25 +757 248 30 35 +696 410 31 36 +669 393 30 33 +596 318 25 27 +637 323 20 28 +522 271 35 47 +449 302 28 40 +320 213 26 34 +403 304 33 38 +155 309 33 37 +126 318 31 37 +183 473 20 34 +280 414 22 39 +698 117 10 11 +# 19--Couple/19_Couple_Couple_19_548.jpg +264 96 190 268 +614 70 174 234 +# 19--Couple/19_Couple_Couple_19_509.jpg +208 48 692 880 +# 19--Couple/19_Couple_Couple_19_139.jpg +366 160 160 212 +514 130 132 204 +# 19--Couple/19_Couple_Couple_19_317.jpg +326 164 110 212 +470 120 182 206 +# 19--Couple/19_Couple_Couple_19_514.jpg +200 366 252 336 +2 296 198 314 +# 19--Couple/19_Couple_Couple_19_667.jpg +461 84 234 353 +644 96 252 359 +# 19--Couple/19_Couple_Couple_19_110.jpg +682 258 134 180 +158 200 134 184 +# 19--Couple/19_Couple_Couple_19_822.jpg +188 124 266 370 +412 138 270 364 +# 19--Couple/19_Couple_Couple_19_31.jpg +596 252 54 108 +# 19--Couple/19_Couple_Couple_19_835.jpg +448 92 140 206 +540 116 142 194 +# 19--Couple/19_Couple_Couple_19_832.jpg +770 158 200 114 +148 334 124 150 +# 2--Demonstration/2_Demonstration_Demonstrators_2_413.jpg +0 257 13 32 +19 226 20 25 +42 254 34 43 +130 245 27 28 +111 242 15 17 +143 224 19 19 +279 225 19 22 +384 186 17 26 +360 192 16 17 +338 195 14 17 +283 198 13 21 +493 176 14 22 +530 170 7 11 +960 96 9 13 +961 123 11 14 +22 243 14 16 +# 2--Demonstration/2_Demonstration_Protesters_2_905.jpg +668 398 59 68 +484 436 48 63 +249 467 52 69 +# 2--Demonstration/2_Demonstration_Political_Rally_2_219.jpg +319 170 381 557 +# 2--Demonstration/2_Demonstration_Protesters_2_46.jpg +616 625 98 103 +804 607 71 113 +947 665 76 63 +926 593 94 123 +961 528 59 72 +481 566 87 141 +435 511 80 133 +738 444 91 132 +653 280 72 117 +725 207 66 77 +582 73 52 68 +567 43 50 59 +553 26 44 55 +470 21 35 54 +683 28 35 43 +643 0 26 35 +74 561 94 130 +582 561 91 145 +930 214 73 106 +958 125 59 78 +824 73 70 87 +928 51 53 66 +759 135 59 62 +756 53 59 68 +830 25 34 40 +1002 172 22 37 +995 454 28 57 +389 337 90 112 +360 354 64 82 +279 550 79 120 +256 478 72 92 +209 279 81 88 +54 257 67 97 +7 433 40 135 +262 158 68 96 +513 159 58 96 +464 153 61 77 +337 80 66 87 +390 124 47 68 +127 106 73 99 +80 57 60 72 +43 30 37 55 +267 16 56 73 +238 88 61 81 +203 19 35 50 +130 0 35 46 +615 101 55 89 +327 37 27 38 +# 2--Demonstration/2_Demonstration_Demonstrators_2_712.jpg +641 530 43 62 +701 536 24 27 +682 529 17 31 +761 467 44 60 +889 431 36 41 +989 368 35 61 +812 514 51 52 +175 601 34 51 +117 566 25 35 +# 2--Demonstration/2_Demonstration_Demonstrators_2_181.jpg +880 592 48 62 +940 541 33 47 +850 503 21 29 +783 501 17 23 +719 516 20 28 +762 563 26 33 +664 527 35 45 +632 508 8 13 +545 515 12 16 +530 563 22 33 +411 533 25 32 +465 533 13 17 +482 505 8 9 +471 514 8 10 +80 536 51 72 +55 557 14 21 +231 544 18 26 +260 533 10 14 +187 556 17 23 +279 527 18 25 +339 536 11 15 +377 536 12 18 +411 520 7 9 +378 520 12 15 +369 514 7 9 +230 529 9 13 +312 516 14 18 +421 515 10 14 +# 2--Demonstration/2_Demonstration_Protesters_2_131.jpg +952 555 19 21 +974 573 19 23 +865 568 18 25 +838 594 8 17 +691 545 9 16 +710 543 11 15 +825 517 10 12 +868 516 8 11 +881 519 10 12 +902 517 9 13 +936 525 10 12 +997 535 11 12 +785 504 7 8 +744 508 7 9 +724 509 6 8 +731 508 7 8 +741 548 9 14 +974 497 7 8 +958 528 7 11 +1008 563 10 17 +960 502 7 9 +710 510 5 8 +769 504 5 9 +777 517 6 9 +788 536 6 10 +803 506 5 7 +896 557 13 18 +997 520 10 10 +999 497 7 9 +949 518 9 10 +970 515 7 10 +905 488 7 8 +955 496 5 7 +480 561 12 19 +409 541 11 16 +360 561 10 13 +377 609 13 27 +414 582 14 22 +462 615 16 29 +556 551 9 16 +626 599 12 25 +622 521 8 13 +515 516 8 10 +465 534 8 12 +405 522 6 10 +389 519 7 11 +424 515 6 11 +319 591 13 26 +443 536 5 10 +365 525 5 11 +320 525 7 9 +502 523 6 10 +564 525 6 10 +182 587 17 22 +123 616 21 27 +86 583 21 24 +31 599 23 25 +43 570 21 20 +129 583 16 21 +265 583 16 19 +209 618 21 22 +265 662 26 20 +297 627 18 22 +309 556 10 16 +216 553 11 16 +188 561 13 18 +183 522 8 11 +149 536 10 12 +133 526 10 12 +172 542 9 13 +37 539 13 16 +255 525 9 11 +219 515 10 11 +200 509 6 8 +320 539 8 14 +65 519 9 9 +55 508 6 8 +172 511 5 11 +214 529 6 12 +184 510 6 10 +77 504 5 8 +6 533 8 13 +369 511 5 11 +31 504 6 6 +552 517 7 8 +564 514 6 8 +404 498 5 6 +752 525 6 10 +707 533 7 10 +672 536 7 9 +884 609 13 28 +331 581 11 17 +118 522 6 9 +102 518 5 9 +666 581 15 19 +# 2--Demonstration/2_Demonstration_Political_Rally_2_35.jpg +552 196 20 29 +625 199 21 28 +706 199 21 29 +# 2--Demonstration/2_Demonstration_Demonstrators_2_700.jpg +256 146 98 124 +# 2--Demonstration/2_Demonstration_Protesters_2_748.jpg +125 194 23 29 +300 237 24 29 +747 249 24 31 +# 2--Demonstration/2_Demonstration_Demonstrators_2_486.jpg +28 260 10 16 +153 458 51 54 +199 419 38 48 +256 365 65 85 +370 386 101 121 +551 398 68 93 +1003 274 17 24 +924 321 46 75 +# 2--Demonstration/2_Demonstration_Protesters_2_260.jpg +116 186 100 114 +318 194 96 92 +584 116 92 102 +710 58 124 144 +# 2--Demonstration/2_Demonstration_Demonstrators_2_188.jpg +373 462 92 111 +155 418 35 45 +222 353 47 53 +285 342 41 47 +306 275 27 27 +225 248 34 38 +192 297 30 35 +4 430 34 39 +28 416 20 18 +483 391 39 43 +389 349 39 43 +523 334 17 22 +876 372 30 30 +855 227 19 23 +1006 363 17 21 +107 355 18 19 +775 1096 102 98 +# 2--Demonstration/2_Demonstration_Demonstrators_2_689.jpg +47 259 16 16 +86 236 16 21 +146 236 15 21 +174 233 15 19 +216 211 15 18 +257 227 17 24 +185 255 30 57 +348 196 16 20 +312 211 17 22 +295 243 34 56 +2 248 16 23 +123 313 38 50 +350 325 79 83 +# 2--Demonstration/2_Demonstration_Political_Rally_2_641.jpg +987 213 31 60 +929 185 43 54 +883 148 50 60 +848 127 27 51 +818 168 45 53 +645 164 50 58 +573 173 53 59 +591 150 49 61 +518 169 50 54 +428 191 49 53 +461 209 42 54 +294 152 43 65 +202 141 46 56 +121 131 46 64 +148 158 48 54 +11 153 41 52 +390 179 51 66 +724 171 34 61 +# 2--Demonstration/2_Demonstration_Political_Rally_2_18.jpg +766 420 19 21 +863 449 21 24 +915 434 18 24 +624 456 18 24 +528 393 19 27 +485 361 18 23 +501 449 18 24 +442 457 21 31 +460 485 20 27 +481 401 16 22 +423 428 17 19 +433 412 17 25 +424 369 16 22 +448 365 14 21 +386 404 14 23 +336 389 15 22 +353 346 18 21 +370 345 14 23 +452 295 14 22 +368 303 16 24 +314 264 17 19 +331 202 15 17 +265 207 15 21 +276 265 17 23 +227 253 17 21 +222 275 20 24 +239 311 18 23 +299 319 17 20 +299 337 19 20 +249 349 15 18 +229 363 18 20 +239 383 15 25 +272 398 17 24 +382 439 17 23 +364 429 18 24 +316 439 20 24 +242 422 20 25 +171 451 20 27 +158 408 17 24 +166 334 20 23 +129 430 16 23 +96 448 19 25 +41 449 18 23 +22 462 21 23 +23 435 13 18 +42 423 18 21 +147 382 20 27 +185 306 19 23 +188 225 14 24 +170 258 17 21 +132 294 18 25 +155 303 16 21 +65 338 19 23 +12 354 19 25 +26 312 18 23 +23 257 17 23 +41 238 17 25 +74 323 16 21 +82 290 18 20 +89 267 19 19 +104 239 17 22 +0 209 12 21 +# 2--Demonstration/2_Demonstration_Protesters_2_486.jpg +446 133 163 162 +815 115 118 118 +45 400 133 154 +232 425 55 60 +24 542 39 39 +597 239 80 80 +802 306 35 49 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_795.jpg +456 456 130 132 +# 2--Demonstration/2_Demonstration_Political_Rally_2_107.jpg +227 492 30 35 +171 592 58 72 +232 563 45 57 +130 514 26 36 +860 505 22 31 +730 560 41 43 +530 547 63 90 +935 486 16 22 +919 541 20 25 +835 508 14 12 +283 497 15 23 +128 585 40 59 +772 623 43 60 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_120.jpg +966 586 51 48 +900 511 41 41 +822 516 27 34 +940 475 25 30 +995 481 28 36 +838 467 19 22 +933 429 26 22 +980 421 28 22 +958 399 17 16 +914 391 21 21 +978 372 20 15 +1002 371 12 14 +988 343 15 14 +982 338 11 9 +960 351 16 15 +1020 354 4 8 +908 374 21 18 +941 339 10 10 +937 344 8 8 +932 343 8 9 +917 343 15 14 +898 343 13 13 +894 329 9 10 +890 397 22 20 +860 422 24 21 +867 394 16 13 +882 357 14 14 +871 352 10 13 +854 350 14 14 +861 340 8 8 +854 331 9 8 +841 322 10 9 +2 498 80 66 +870 313 6 6 +855 303 6 5 +828 319 7 7 +838 308 7 8 +840 301 5 5 +882 303 4 4 +806 323 11 12 +815 311 6 8 +851 319 6 7 +795 323 7 8 +783 314 8 9 +794 306 6 9 +777 340 13 13 +770 338 8 9 +758 327 10 12 +756 313 6 7 +777 320 5 8 +773 311 6 7 +789 342 8 9 +836 388 17 15 +816 395 19 18 +798 380 15 17 +814 364 11 13 +824 359 9 9 +812 350 12 14 +773 366 15 12 +747 378 15 17 +766 408 18 19 +723 397 16 18 +747 363 15 15 +763 357 10 13 +715 356 11 11 +724 337 11 12 +738 329 8 8 +747 322 6 7 +712 324 9 11 +696 334 9 8 +741 308 7 8 +727 329 8 8 +707 319 8 7 +693 310 6 7 +692 301 7 7 +714 305 6 7 +708 304 7 8 +723 299 6 6 +790 299 5 5 +691 363 11 15 +715 432 21 24 +659 446 29 28 +821 449 19 21 +727 564 31 29 +620 529 25 31 +603 367 10 14 +592 314 6 7 +601 315 6 8 +583 309 8 9 +563 313 7 7 +570 322 7 8 +560 331 11 10 +586 297 5 5 +566 299 4 4 +569 294 5 5 +553 292 6 6 +609 294 6 6 +597 307 6 7 +593 300 7 7 +576 292 5 6 +483 296 22 25 +503 267 20 23 +470 257 19 23 +521 273 13 13 +466 238 8 14 +397 259 12 20 +361 284 13 17 +325 289 8 10 +290 300 14 15 +275 288 14 15 +219 270 19 19 +246 279 7 6 +215 274 7 8 +208 273 5 6 +120 296 14 15 +110 291 8 12 +115 284 7 6 +136 292 8 12 +183 299 11 15 +333 282 8 9 +348 293 9 11 +336 361 16 22 +212 418 20 28 +270 415 14 27 +27 286 18 16 +15 291 7 11 +65 289 11 12 +81 286 10 13 +56 271 7 7 +0 302 13 13 +25 272 7 9 +51 267 5 6 +80 273 9 9 +151 293 8 13 +0 273 6 10 +952 295 6 7 +173 295 12 19 +# 2--Demonstration/2_Demonstration_Political_Rally_2_83.jpg +111 78 29 43 +124 81 33 49 +170 0 29 37 +35 60 32 42 +0 71 39 46 +10 122 54 64 +43 178 53 64 +70 211 65 69 +208 222 65 77 +178 151 51 63 +131 172 44 51 +234 120 40 53 +203 113 32 39 +218 59 37 50 +327 167 49 62 +415 193 42 66 +384 144 38 56 +401 111 36 47 +376 95 35 40 +445 111 31 54 +237 13 21 35 +266 19 22 31 +290 30 26 32 +385 57 29 36 +426 31 30 44 +344 29 33 37 +94 284 71 91 +3 391 81 104 +167 390 81 97 +117 532 113 110 +97 615 107 69 +292 572 103 112 +403 296 93 126 +598 494 95 133 +465 182 51 69 +504 205 52 76 +560 279 72 92 +626 384 82 107 +633 248 43 52 +588 184 46 69 +553 184 33 40 +674 615 113 69 +885 556 110 128 +986 440 38 118 +725 366 76 109 +881 335 78 81 +980 237 40 65 +929 202 52 58 +950 156 56 64 +886 149 49 66 +812 135 43 53 +788 115 40 52 +473 79 42 49 +557 83 34 44 +475 44 18 28 +472 17 24 29 +631 64 26 36 +672 51 21 27 +742 150 42 48 +756 183 31 44 +825 61 27 37 +869 27 21 25 +816 36 26 31 +749 17 25 30 +780 38 23 33 +602 62 23 23 +567 39 17 26 +590 55 20 27 +585 17 20 25 +980 85 28 34 +1000 122 22 29 +919 1 20 26 +228 267 69 86 +825 340 39 47 +311 73 23 26 +488 44 32 31 +941 82 30 38 +# 2--Demonstration/2_Demonstration_Demonstrators_2_666.jpg +542 196 25 28 +914 116 46 65 +# 2--Demonstration/2_Demonstration_Protesters_2_884.jpg +704 42 18 38 +583 91 26 30 +869 152 22 34 +887 173 25 48 +717 188 37 36 +529 156 29 37 +518 219 31 37 +665 257 27 61 +932 420 92 146 +769 332 40 76 +366 84 20 32 +290 96 16 24 +198 89 15 24 +34 80 22 21 +168 232 31 58 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_173.jpg +4 219 12 16 +18 242 14 17 +109 245 38 57 +175 225 20 28 +135 211 13 17 +4 254 16 15 +39 219 10 11 +35 227 6 4 +108 220 9 12 +159 198 5 7 +171 216 11 17 +173 195 7 6 +52 203 6 9 +96 208 7 8 +64 226 8 11 +69 220 8 10 +181 209 11 13 +0 307 28 50 +51 350 45 56 +169 314 49 58 +205 241 23 26 +250 228 14 11 +254 252 32 48 +272 277 35 47 +363 292 38 60 +355 226 26 39 +215 202 9 15 +224 206 14 19 +289 198 15 19 +320 212 18 23 +360 197 17 17 +460 227 31 46 +505 214 39 60 +449 207 27 34 +437 194 13 19 +491 163 14 15 +517 162 11 9 +547 171 14 15 +576 191 18 19 +331 191 8 9 +376 180 7 7 +336 182 9 10 +242 212 9 11 +262 181 7 9 +674 153 16 27 +742 161 19 29 +640 172 19 24 +775 192 30 42 +584 231 40 57 +652 204 36 46 +671 274 44 70 +848 292 51 69 +870 209 40 55 +824 164 24 29 +115 203 11 11 +38 201 9 9 +192 193 6 7 +237 190 5 5 +427 171 7 9 +427 154 9 11 +128 199 11 14 +153 112 12 19 +582 157 11 13 +858 220 17 33 +997 134 7 12 +996 180 5 7 +721 156 7 8 +441 176 9 10 +94 219 15 20 +117 219 9 12 +406 187 6 8 +369 171 5 7 +457 166 7 6 +396 199 9 11 +471 193 11 16 +# 2--Demonstration/2_Demonstration_Political_Rally_2_611.jpg +178 166 148 204 +# 2--Demonstration/2_Demonstration_Demonstrators_2_329.jpg +22 1 51 57 +64 49 39 43 +114 39 37 44 +134 89 48 56 +207 54 43 53 +251 106 64 68 +375 100 27 29 +606 177 9 14 +622 159 7 11 +708 173 22 32 +731 174 23 22 +666 153 25 35 +793 194 19 24 +861 175 34 38 +911 165 12 13 +930 181 35 42 +956 193 27 32 +980 223 21 26 +608 160 5 6 +585 148 7 8 +# 2--Demonstration/2_Demonstration_Demonstrators_2_414.jpg +15 41 26 24 +89 65 22 21 +135 41 22 30 +201 42 22 21 +249 54 19 22 +306 43 25 26 +398 31 23 29 +488 34 20 24 +602 41 21 26 +685 58 22 22 +763 47 21 22 +824 54 18 23 +937 44 26 28 +910 55 22 23 +1013 58 11 27 +# 2--Demonstration/2_Demonstration_Demonstrators_2_567.jpg +63 89 56 80 +237 95 49 67 +431 120 43 60 +537 52 65 81 +700 55 52 83 +902 45 49 93 +# 2--Demonstration/2_Demonstration_Political_Rally_2_741.jpg +578 483 38 52 +737 479 20 23 +124 511 29 45 +# 2--Demonstration/2_Demonstration_Protesters_2_618.jpg +0 21 38 48 +146 74 44 51 +244 48 41 56 +322 90 45 63 +0 164 32 69 +253 239 52 63 +365 165 50 70 +458 256 61 78 +444 192 50 64 +484 19 40 52 +377 1 28 24 +457 0 24 20 +515 171 54 65 +604 146 54 77 +598 9 48 61 +705 41 41 59 +808 0 44 51 +837 2 32 53 +963 50 44 51 +937 162 46 56 +821 242 47 52 +844 256 56 82 +# 2--Demonstration/2_Demonstration_Political_Rally_2_135.jpg +524 433 96 106 +154 554 71 90 +936 628 78 95 +36 686 67 68 +910 909 14 19 +811 887 20 27 +0 1011 32 62 +# 2--Demonstration/2_Demonstration_Protesters_2_811.jpg +542 283 7 9 +883 282 9 11 +819 269 9 10 +755 258 20 31 +289 296 30 37 +511 296 17 29 +613 368 30 27 +# 2--Demonstration/2_Demonstration_Protesters_2_351.jpg +105 25 51 56 +226 118 33 37 +389 0 38 50 +422 52 25 54 +602 493 58 32 +613 52 31 44 +# 2--Demonstration/2_Demonstration_Protesters_2_531.jpg +1001 579 20 27 +927 541 22 25 +878 538 21 25 +919 483 19 23 +867 462 17 22 +978 436 19 23 +963 423 14 20 +788 572 15 18 +766 539 21 25 +784 487 19 22 +749 488 16 23 +773 451 17 21 +786 440 15 19 +747 421 17 20 +684 475 15 20 +685 450 14 20 +997 647 19 26 +666 546 16 22 +570 551 20 22 +585 494 18 21 +644 479 16 22 +603 458 17 20 +618 435 19 24 +516 465 20 21 +517 514 19 14 +456 512 17 22 +467 483 17 21 +494 447 17 20 +478 427 17 19 +521 565 19 24 +365 584 21 25 +413 524 20 23 +399 508 17 23 +408 478 16 20 +415 447 18 19 +356 479 14 18 +337 514 16 19 +313 491 17 23 +280 499 19 23 +232 491 18 20 +245 475 18 22 +273 468 14 18 +279 439 14 19 +350 453 17 18 +375 424 16 19 +347 415 18 21 +428 413 18 19 +269 577 16 19 +294 655 22 22 +227 650 21 27 +170 513 17 21 +114 513 23 22 +124 542 19 22 +143 625 18 21 +161 645 22 23 +107 586 18 21 +13 570 20 24 +0 608 11 20 +23 518 18 21 +60 494 18 19 +82 461 17 20 +121 474 16 19 +156 474 17 21 +192 432 18 22 +144 409 16 18 +95 426 16 18 +18 437 19 18 +11 418 17 20 +22 475 17 20 +416 577 14 19 +1013 424 11 16 +989 391 18 21 +896 408 15 20 +889 386 15 20 +904 366 14 18 +845 413 18 22 +828 404 16 19 +794 395 15 18 +751 363 15 17 +987 334 14 19 +997 318 13 18 +960 315 13 17 +975 300 13 15 +897 310 15 17 +843 332 14 15 +809 359 15 17 +786 330 15 13 +835 314 15 16 +739 334 16 17 +757 319 12 14 +712 300 14 18 +682 390 16 19 +702 414 15 19 +695 323 15 18 +654 316 14 17 +662 355 14 17 +633 411 17 18 +627 390 16 20 +666 382 13 19 +618 360 14 18 +605 332 14 17 +583 327 15 17 +570 308 14 17 +581 288 13 15 +614 303 13 19 +525 306 12 11 +536 324 14 17 +564 278 14 16 +469 374 16 17 +456 356 13 16 +499 315 16 18 +499 286 13 15 +470 282 14 16 +453 321 15 16 +475 313 13 17 +423 301 13 13 +417 282 12 15 +420 339 14 16 +372 383 14 17 +355 346 13 13 +343 399 15 19 +299 389 12 16 +285 390 13 17 +247 374 15 15 +278 328 16 17 +346 344 12 16 +356 300 16 16 +344 289 11 14 +266 278 14 16 +238 288 12 13 +264 326 11 16 +247 311 12 15 +318 258 11 13 +380 263 11 15 +400 252 12 16 +368 244 10 13 +501 373 14 15 +467 348 13 17 +151 324 13 15 +116 321 15 16 +135 304 14 15 +202 312 14 16 +201 298 13 15 +154 285 14 17 +209 269 16 17 +71 315 13 18 +55 318 14 17 +36 317 13 15 +39 288 13 14 +57 300 13 14 +91 279 15 17 +62 261 15 17 +49 374 14 18 +21 364 15 19 +38 357 14 18 +22 243 14 16 +44 228 12 14 +93 240 13 18 +137 251 11 15 +174 241 12 12 +126 219 13 14 +97 217 11 13 +74 224 11 12 +68 207 15 16 +48 190 13 16 +11 206 12 15 +58 183 11 15 +89 179 10 11 +185 212 10 13 +164 197 10 11 +207 188 11 15 +241 228 13 16 +229 247 11 12 +276 225 12 13 +308 212 12 12 +329 245 11 11 +322 227 12 14 +339 216 11 14 +356 226 11 13 +452 238 12 15 +458 225 9 10 +455 206 10 14 +425 209 13 14 +409 200 12 14 +412 232 10 12 +502 229 11 12 +541 219 11 15 +583 245 12 12 +592 262 12 16 +607 245 12 16 +584 209 11 13 +638 236 12 15 +633 215 12 15 +618 187 11 15 +552 182 12 13 +496 196 12 14 +411 158 10 14 +364 176 9 9 +321 177 11 15 +323 161 9 14 +356 160 10 12 +379 206 10 14 +374 298 10 13 +842 265 15 16 +867 247 15 18 +900 266 13 13 +933 261 15 16 +991 263 13 17 +958 239 14 17 +973 220 12 16 +1005 228 14 16 +944 227 11 13 +881 236 14 17 +873 226 14 17 +822 231 13 15 +810 257 12 16 +798 269 12 14 +801 291 13 19 +776 286 13 15 +774 260 13 17 +745 249 12 13 +724 253 12 14 +720 273 11 15 +682 248 12 13 +668 235 13 14 +731 234 11 14 +768 230 12 15 +787 205 10 11 +732 198 11 13 +714 191 11 13 +657 265 13 14 +676 282 13 14 +684 201 9 13 +742 192 11 13 +848 199 12 13 +890 193 12 15 +910 184 10 13 +897 173 14 11 +960 183 8 10 +990 184 9 12 +830 189 10 12 +863 181 13 14 +684 174 10 11 +749 175 11 13 +640 183 12 13 +991 168 10 14 +943 163 9 13 +965 146 7 12 +983 119 8 10 +1008 107 8 12 +964 110 7 9 +964 100 9 12 +972 105 8 10 +995 69 8 11 +942 94 10 11 +912 105 8 10 +894 112 9 11 +884 102 8 10 +912 88 8 9 +904 80 6 9 +895 79 8 11 +867 120 9 12 +875 143 9 13 +834 142 9 10 +823 135 9 11 +836 113 8 12 +870 109 9 12 +795 121 8 13 +815 108 9 11 +815 89 8 11 +816 75 7 9 +858 75 7 10 +781 80 7 9 +774 126 9 12 +771 95 8 11 +760 93 8 12 +742 75 10 12 +744 111 8 12 +759 123 8 10 +713 120 9 10 +691 121 9 11 +678 122 9 12 +678 143 8 13 +699 145 8 10 +709 171 9 11 +662 121 8 10 +650 80 8 12 +718 92 10 12 +637 96 8 11 +628 113 7 9 +596 179 12 13 +611 119 8 10 +599 113 7 11 +572 118 9 11 +574 106 8 10 +583 95 9 12 +601 97 7 10 +546 137 12 13 +516 139 8 11 +484 149 10 11 +489 127 7 8 +514 121 9 11 +542 120 8 11 +539 101 7 9 +552 91 7 10 +512 108 6 10 +519 95 9 12 +500 107 7 10 +483 111 7 9 +471 100 7 10 +468 74 6 8 +490 84 7 9 +477 89 7 8 +547 63 8 11 +528 65 8 9 +615 69 7 9 +784 91 9 12 +459 139 10 13 +422 132 10 12 +432 118 9 11 +406 124 8 11 +393 134 12 14 +358 138 9 11 +378 123 7 9 +388 120 9 12 +354 114 8 10 +336 138 9 10 +300 168 10 13 +295 188 10 11 +273 186 11 13 +290 165 7 9 +267 157 11 12 +280 141 9 11 +294 145 9 11 +293 117 10 13 +279 105 10 12 +332 126 10 12 +318 114 6 8 +392 106 7 9 +424 113 8 10 +427 85 8 10 +443 87 8 10 +459 88 6 9 +438 79 6 9 +393 89 8 10 +399 79 9 9 +366 96 6 7 +304 137 6 8 +355 193 7 10 +124 196 11 14 +159 182 10 13 +184 165 11 11 +114 166 9 13 +140 146 10 12 +160 139 10 14 +168 130 10 13 +194 124 10 12 +181 128 8 10 +243 156 9 11 +238 185 9 12 +229 218 12 13 +32 165 10 13 +4 147 9 13 +44 50 8 12 +64 43 6 8 +57 31 7 7 +89 62 7 11 +3 79 8 12 +8 62 7 9 +209 118 7 10 +233 144 9 11 +244 119 10 12 +227 129 11 13 +209 92 10 13 +254 91 8 9 +83 36 8 13 +65 57 11 14 +175 66 8 11 +995 5 7 6 +974 86 9 11 +806 24 7 10 +799 35 7 9 +780 15 7 9 +735 27 8 9 +742 21 6 7 +704 27 7 8 +695 43 6 8 +691 32 6 8 +668 25 7 9 +677 17 6 8 +687 17 5 8 +760 1 8 9 +761 16 7 9 +913 17 7 9 +958 23 8 9 +891 2 7 10 +817 13 7 9 +759 152 10 12 +794 156 9 12 +858 168 10 13 +894 168 9 12 +849 107 8 9 +95 79 8 9 +269 57 8 9 +621 21 7 9 +643 32 7 8 +611 54 7 9 +587 58 6 8 +571 57 7 9 +509 58 6 8 +512 37 7 10 +577 68 8 9 +566 10 7 7 +580 13 7 9 +547 6 6 8 +529 15 6 7 +515 5 7 9 +503 3 7 7 +488 12 7 7 +506 29 6 7 +502 42 6 8 +513 24 6 8 +520 23 6 7 +437 31 8 8 +426 68 7 9 +397 68 8 9 +309 91 7 10 +264 80 7 9 +368 70 8 10 +363 85 7 8 +347 70 7 8 +327 72 7 9 +329 32 7 8 +348 37 7 9 +321 54 8 8 +376 29 6 8 +385 32 7 9 +387 47 7 9 +365 49 7 9 +362 26 6 7 +368 38 7 8 +498 184 10 13 +505 173 10 11 +460 111 8 10 +781 163 8 9 +802 188 6 10 +1016 51 5 10 +1008 60 8 11 +1004 50 7 9 +1010 34 7 9 +977 16 7 9 +943 4 8 10 +704 8 7 12 +731 5 5 8 +719 18 6 8 +639 70 7 9 +623 87 6 9 +696 101 9 10 +605 4 6 9 +521 78 9 12 +488 75 8 9 +466 25 7 9 +477 14 6 8 +534 39 7 10 +544 37 6 9 +440 117 8 10 +473 405 15 16 +757 407 12 19 +849 241 12 12 +925 336 15 19 +0 324 12 19 +129 383 11 18 +1 494 15 21 +# 2--Demonstration/2_Demonstration_Protesters_2_370.jpg +45 267 39 53 +125 240 39 55 +264 239 50 58 +337 215 37 79 +453 233 20 22 +503 213 61 72 +579 240 72 77 +680 222 11 12 +697 183 67 71 +# 2--Demonstration/2_Demonstration_Political_Rally_2_187.jpg +387 23 378 637 +670 674 44 55 +588 741 41 52 +60 879 36 44 +215 1255 84 89 +638 1175 41 54 +224 1404 44 79 +# 2--Demonstration/2_Demonstration_Demonstrators_2_307.jpg +107 557 9 17 +102 506 10 15 +68 484 9 13 +195 599 14 14 +208 503 9 14 +204 456 12 13 +78 433 11 14 +152 444 9 15 +296 447 8 12 +247 466 9 14 +284 348 10 12 +260 410 10 13 +191 383 10 13 +147 413 9 11 +75 309 8 12 +121 286 8 12 +115 258 9 11 +157 265 7 12 +269 395 7 11 +236 315 9 13 +175 237 11 12 +241 205 9 9 +251 172 9 10 +178 155 11 11 +55 201 8 11 +6 173 6 10 +32 161 11 13 +51 130 8 9 +88 119 11 13 +122 188 11 9 +197 92 9 9 +258 99 6 9 +246 68 9 12 +146 105 7 8 +34 100 7 8 +73 87 9 10 +101 61 7 9 +124 50 7 10 +99 39 7 9 +113 29 7 7 +64 26 8 8 +27 54 4 7 +152 20 8 10 +244 36 6 9 +207 23 8 9 +134 6 6 8 +191 5 8 10 +262 28 6 8 +91 61 6 6 +146 86 8 9 +79 115 5 8 +45 50 6 8 +146 25 6 6 +492 592 13 16 +389 604 12 12 +523 416 10 10 +453 414 7 11 +388 398 12 13 +424 336 11 15 +456 444 10 8 +483 329 6 9 +534 315 8 10 +337 334 10 16 +325 342 9 15 +334 311 11 9 +339 293 7 11 +438 234 7 11 +452 232 7 12 +344 222 8 11 +357 221 9 11 +323 215 6 10 +298 191 10 12 +394 197 9 10 +413 176 7 10 +373 164 8 8 +395 169 7 8 +401 180 5 8 +385 186 7 8 +330 159 8 7 +370 152 6 7 +354 144 7 11 +357 125 8 10 +320 142 7 10 +301 152 7 5 +312 172 7 7 +441 214 8 12 +451 145 8 9 +477 130 10 10 +482 105 7 11 +467 85 10 10 +430 105 7 10 +415 110 7 7 +379 96 10 10 +402 74 9 10 +436 73 10 9 +376 46 8 8 +344 49 8 10 +483 48 8 11 +347 38 8 9 +280 19 6 8 +355 11 8 11 +472 21 6 9 +428 0 6 7 +561 642 10 8 +714 512 11 13 +834 461 8 10 +757 566 6 12 +778 476 7 9 +841 500 6 9 +840 512 6 9 +966 570 8 13 +1005 507 11 9 +933 498 11 11 +973 392 10 11 +992 383 6 9 +1006 354 9 13 +847 327 9 11 +861 313 9 8 +912 310 9 11 +945 329 8 9 +985 336 10 11 +933 373 5 9 +982 530 6 10 +630 320 5 9 +736 273 9 14 +567 290 8 10 +682 245 8 12 +669 229 7 11 +538 245 8 10 +641 191 8 10 +618 174 8 9 +606 152 8 9 +542 196 7 9 +537 200 6 7 +575 136 7 7 +588 115 6 7 +617 107 8 9 +588 93 8 9 +553 86 9 10 +544 133 7 9 +525 76 8 11 +556 61 8 11 +604 86 7 10 +618 83 7 10 +609 61 7 9 +556 37 9 11 +528 39 7 8 +490 44 6 9 +501 49 6 10 +620 32 7 10 +506 6 7 8 +526 4 5 9 +624 16 6 8 +653 0 6 8 +670 11 5 7 +647 46 6 9 +633 44 5 6 +644 90 8 11 +686 96 8 10 +702 80 8 11 +694 58 6 8 +671 55 8 6 +684 41 7 9 +689 23 6 7 +707 12 6 9 +712 61 7 8 +726 24 7 12 +883 484 6 10 +910 435 6 9 +826 306 7 10 +849 274 6 9 +872 271 9 12 +917 282 10 10 +916 262 9 9 +949 263 10 14 +835 243 8 7 +795 300 9 8 +878 233 7 10 +923 241 6 11 +936 252 9 10 +996 250 8 9 +963 233 8 10 +955 255 10 9 +978 238 6 10 +974 223 6 8 +908 219 7 9 +817 222 8 11 +845 215 9 10 +744 226 10 11 +798 213 7 9 +791 191 7 11 +742 197 10 11 +725 178 8 10 +752 171 9 11 +810 169 9 10 +889 169 7 10 +675 122 9 10 +713 148 8 10 +717 137 6 7 +725 151 6 7 +697 146 6 9 +772 159 9 8 +744 121 8 10 +650 120 8 12 +810 152 6 8 +741 63 9 9 +727 60 6 8 +741 20 5 7 +797 42 6 9 +866 92 5 4 +860 74 6 8 +876 78 6 5 +842 77 6 7 +827 88 6 8 +816 96 7 6 +809 79 7 7 +793 69 6 8 +766 61 5 7 +856 19 5 9 +855 62 7 8 +851 81 7 8 +833 96 5 6 +847 101 5 6 +876 49 7 9 +888 19 6 8 +850 6 8 8 +776 2 7 7 +759 11 7 8 +796 6 9 8 +859 126 5 9 +812 90 5 6 +832 78 5 6 +907 66 6 8 +901 62 5 7 +900 52 6 7 +1008 477 7 9 +988 26 5 8 +1012 82 7 9 +986 92 7 7 +983 78 5 9 +961 0 5 6 +959 23 7 6 +1019 13 5 7 +978 21 6 8 +971 17 5 7 +947 0 6 6 +929 0 5 7 +797 14 6 7 +850 40 5 7 +938 114 8 9 +937 100 8 9 +909 74 7 9 +927 199 8 11 +933 173 7 8 +938 188 5 7 +973 176 5 9 +925 133 4 6 +982 200 6 9 +992 187 6 8 +199 259 8 11 +237 304 9 13 +513 387 9 10 +518 378 6 11 +479 395 9 12 +497 390 8 11 +497 410 6 11 +511 395 9 12 +457 255 6 9 +548 225 8 9 +520 197 6 10 +529 198 6 8 +700 637 6 11 +952 606 7 9 +289 568 13 9 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_617.jpg +7 404 44 38 +55 417 56 60 +156 392 48 60 +143 390 31 57 +230 414 34 41 +280 450 50 85 +311 440 41 61 +365 430 27 40 +416 455 50 48 +486 423 50 52 +540 442 42 46 +591 486 58 64 +646 468 44 48 +733 509 19 39 +810 503 145 112 +731 84 61 77 +295 435 21 23 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_615.jpg +1 269 36 38 +37 251 31 38 +122 251 30 40 +178 248 30 33 +291 223 32 38 +432 253 27 37 +508 268 23 28 +558 242 26 31 +613 256 21 31 +680 256 19 26 +718 273 13 20 +942 248 31 31 +896 246 30 29 +826 255 29 26 +761 257 27 30 +# 2--Demonstration/2_Demonstration_Demonstrators_2_499.jpg +259 356 24 30 +363 363 20 27 +421 355 23 30 +453 364 21 24 +485 384 13 20 +514 387 19 23 +553 355 15 29 +603 365 21 25 +634 364 21 26 +656 369 13 23 +698 379 11 21 +760 346 26 25 +717 379 10 14 +812 348 24 31 +833 326 22 34 +910 361 20 32 +963 343 15 30 +978 321 38 53 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_183.jpg +284 319 99 129 +106 512 50 49 +9 530 24 31 +# 2--Demonstration/2_Demonstration_Protesters_2_57.jpg +932 261 24 36 +898 233 25 36 +806 266 25 37 +710 193 31 39 +654 244 28 33 +591 313 16 30 +514 255 32 45 +484 274 23 27 +387 237 34 45 +360 265 26 33 +224 232 35 41 +72 226 37 46 +30 242 31 38 +# 2--Demonstration/2_Demonstration_Political_Rally_2_763.jpg +527 157 94 125 +668 187 58 69 +742 218 47 58 +800 246 74 98 +288 111 90 118 +250 152 45 64 +440 200 49 58 +487 215 66 89 +645 236 29 37 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_438.jpg +452 268 45 59 +464 550 61 69 +# 2--Demonstration/2_Demonstration_Protesters_2_163.jpg +507 265 23 22 +553 334 31 37 +767 397 24 51 +222 129 31 38 +960 18 23 21 +864 71 11 11 +901 130 10 11 +773 93 9 10 +808 117 9 10 +902 61 9 10 +756 113 9 12 +755 164 7 9 +787 148 9 10 +680 164 9 14 +690 150 10 11 +616 113 10 12 +641 112 10 14 +539 123 10 14 +589 123 12 14 +525 118 10 13 +499 124 10 12 +414 141 12 11 +429 131 11 16 +529 184 13 14 +558 123 11 11 +599 105 11 14 +366 153 15 19 +284 158 12 19 +155 185 20 24 +257 172 16 15 +274 160 12 20 +49 176 21 23 +80 204 25 31 +100 195 16 23 +128 200 14 18 +14 206 13 17 +650 369 48 27 +999 27 14 14 +513 145 13 16 +454 136 14 11 +312 159 14 14 +# 2--Demonstration/2_Demonstration_Demonstrators_2_306.jpg +140 414 14 18 +154 418 9 16 +186 365 10 13 +236 376 7 9 +211 374 8 9 +84 369 8 9 +245 372 7 10 +70 376 5 8 +75 371 8 11 +37 376 6 8 +23 374 8 7 +271 367 6 6 +# 2--Demonstration/2_Demonstration_Demonstrators_2_781.jpg +255 172 52 65 +237 122 36 59 +393 194 22 33 +463 200 40 52 +544 144 56 79 +697 224 41 50 +535 213 31 39 +438 225 7 10 +532 192 19 23 +109 230 17 16 +44 204 7 7 +921 274 41 39 +933 226 25 32 +815 228 18 18 +662 147 10 9 +752 259 26 24 +864 244 10 17 +495 202 15 29 +758 232 23 24 +# 2--Demonstration/2_Demonstration_Demonstrators_2_28.jpg +0 365 13 21 +35 360 26 32 +39 391 32 40 +66 352 18 25 +133 407 26 39 +139 371 14 22 +229 320 7 9 +215 341 11 14 +240 343 11 16 +172 344 9 14 +192 354 11 14 +216 370 16 19 +170 379 27 33 +260 361 20 27 +286 367 17 26 +252 396 20 30 +262 391 19 26 +171 327 7 14 +279 348 9 13 +197 384 12 23 +85 479 35 46 +180 519 38 46 +38 547 38 54 +181 600 46 71 +326 311 6 9 +330 342 13 17 +368 381 17 23 +428 311 6 7 +422 352 14 21 +385 430 26 32 +296 426 24 32 +262 453 22 28 +331 443 21 33 +330 513 31 46 +470 320 8 11 +454 344 15 19 +445 369 16 26 +478 367 16 24 +499 369 12 19 +489 385 22 31 +458 394 22 35 +440 461 19 37 +514 443 28 37 +602 380 24 33 +570 397 13 21 +548 363 15 23 +562 367 16 22 +588 360 13 25 +620 362 14 21 +598 503 37 49 +495 562 34 54 +657 607 53 61 +699 577 33 50 +501 353 8 12 +527 344 7 12 +562 321 7 11 +572 341 11 16 +589 320 6 10 +672 377 16 25 +684 452 19 21 +648 368 15 23 +620 323 6 11 +621 319 7 9 +641 326 8 9 +594 357 12 16 +669 335 6 11 +653 341 7 9 +670 350 9 14 +658 330 7 6 +699 358 12 20 +758 355 9 18 +769 350 11 18 +798 331 5 10 +813 330 4 9 +804 358 9 16 +818 360 7 14 +805 390 12 21 +758 404 18 23 +754 431 23 31 +839 388 12 22 +834 367 9 14 +831 329 7 10 +842 328 6 11 +828 339 7 11 +879 354 6 7 +884 365 11 17 +993 340 10 15 +1002 359 9 11 +865 396 23 33 +938 406 17 19 +911 423 17 17 +895 435 21 44 +980 478 24 37 +1005 446 19 32 +905 368 12 15 +848 517 33 50 +# 2--Demonstration/2_Demonstration_Political_Rally_2_5.jpg +135 463 24 49 +148 490 29 27 +313 227 30 48 +438 117 31 52 +22 483 36 34 +866 449 27 38 +823 385 29 37 +749 411 26 34 +692 395 30 39 +586 411 31 35 +564 406 29 37 +555 357 23 24 +477 370 27 33 +873 364 24 27 +648 310 20 27 +678 290 15 23 +361 341 29 29 +363 301 21 24 +55 298 22 20 +0 298 8 22 +716 383 22 28 +671 335 20 22 +883 330 21 20 +713 421 17 26 +907 406 23 30 +723 265 11 16 +929 248 9 13 +948 173 10 13 +928 174 9 11 +979 174 9 13 +951 232 8 11 +928 234 9 12 +851 224 11 13 +906 174 8 14 +882 177 9 11 +862 183 10 11 +582 279 19 28 +756 160 10 14 +746 266 12 17 +496 229 8 12 +592 349 23 28 +450 361 23 38 +343 302 18 21 +26 231 9 12 +990 165 9 12 +864 246 11 14 +904 237 7 8 +839 248 9 10 +936 430 24 33 +747 380 23 29 +719 334 15 17 +185 228 29 40 +36 275 19 20 +33 259 18 22 +132 186 7 10 +741 158 8 10 +778 169 7 11 +795 161 7 12 +812 160 8 11 +887 168 8 10 +784 242 12 13 +883 237 6 7 +813 266 11 12 +802 481 27 36 +# 2--Demonstration/2_Demonstration_Protesters_2_912.jpg +881 330 25 38 +839 321 25 43 +790 331 14 23 +721 273 30 47 +646 336 16 29 +595 307 30 46 +546 309 25 28 +523 314 20 24 +997 349 26 32 +946 310 10 11 +916 338 9 19 +634 316 15 22 +614 318 19 25 +509 337 21 34 +439 308 13 20 +474 311 30 36 +368 330 16 18 +350 347 24 29 +298 349 14 17 +288 322 12 16 +269 336 9 13 +253 336 15 17 +263 380 17 39 +147 431 24 34 +172 362 16 24 +64 348 13 17 +40 348 19 20 +270 319 6 7 +323 334 8 12 +164 364 11 19 +131 369 13 23 +# 2--Demonstration/2_Demonstration_Demonstrators_2_314.jpg +97 500 17 43 +19 486 19 35 +55 417 18 33 +20 391 26 55 +146 370 27 41 +173 386 23 51 +250 387 24 46 +268 417 19 47 +230 684 32 70 +258 437 18 32 +0 334 12 27 +4 326 23 28 +62 307 19 27 +128 303 29 37 +111 265 27 28 +67 234 21 26 +131 252 18 27 +149 271 15 23 +174 259 22 22 +206 271 27 32 +186 294 15 24 +196 334 21 32 +69 337 26 25 +71 291 21 25 +1 254 15 22 +0 278 11 17 +18 277 12 12 +232 271 14 21 +265 274 25 24 +244 354 14 19 +262 316 16 26 +387 284 16 30 +337 293 20 27 +414 261 26 27 +378 351 18 29 +479 304 28 39 +531 254 22 33 +765 306 21 29 +605 343 28 75 +877 408 24 43 +# 2--Demonstration/2_Demonstration_Protesters_2_56.jpg +238 394 60 68 +50 404 60 72 +390 398 72 90 +282 552 70 70 +488 468 78 86 +624 328 66 82 +744 438 80 84 +886 276 66 82 +814 194 62 82 +# 2--Demonstration/2_Demonstration_Protesters_2_901.jpg +835 94 46 63 +883 0 53 28 +579 169 41 48 +785 341 60 68 +675 338 54 54 +445 343 50 67 +274 348 55 72 +594 376 47 58 +148 328 46 57 +894 686 69 109 +531 652 63 85 +766 545 35 50 +401 360 42 65 +35 336 50 63 +772 118 36 48 +# 2--Demonstration/2_Demonstration_Demonstrators_2_713.jpg +59 215 8 7 +371 245 9 10 +449 274 6 8 +459 268 7 10 +483 281 7 10 +559 278 7 7 +363 392 26 32 +564 342 18 31 +402 310 20 28 +300 310 14 22 +515 332 27 39 +107 384 39 62 +862 317 15 17 +208 293 7 9 +189 310 6 7 +168 293 8 9 +216 266 9 11 +244 267 11 11 +56 233 6 7 +214 280 7 8 +921 313 18 19 +912 277 6 8 +260 286 7 9 +47 328 10 18 +16 201 10 12 +50 215 8 12 +95 233 5 7 +349 243 8 9 +392 285 7 9 +454 294 6 6 +417 281 4 6 +607 276 6 8 +595 293 5 8 +605 293 5 9 +572 279 7 8 +576 266 6 8 +473 250 4 5 +615 273 5 9 +578 290 8 7 +830 262 9 10 +592 257 7 8 +1006 245 6 7 +901 258 7 9 +862 263 8 11 +958 268 9 7 +1011 266 6 8 +171 241 6 7 +86 289 7 10 +181 238 5 7 +235 278 7 8 +# 2--Demonstration/2_Demonstration_Protesters_2_12.jpg +920 332 50 64 +914 309 43 46 +1009 316 14 52 +960 301 35 47 +831 329 42 64 +768 293 41 39 +768 333 42 50 +635 357 52 63 +722 315 18 18 +691 283 34 49 +644 320 45 49 +592 312 46 49 +488 314 47 61 +398 268 36 43 +373 321 48 67 +413 340 46 56 +494 280 24 28 +372 303 35 28 +295 284 49 61 +326 320 41 44 +175 304 55 51 +136 308 41 47 +211 285 30 34 +21 354 52 68 +47 310 56 63 +666 584 27 60 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_176.jpg +65 300 34 41 +134 332 14 15 +199 343 7 10 +262 327 9 12 +320 336 16 20 +356 300 30 35 +480 319 20 26 +540 290 33 39 +571 292 25 34 +630 313 25 30 +387 310 22 28 +750 276 33 46 +815 323 19 23 +936 326 20 21 +1014 310 9 19 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_225.jpg +259 410 17 20 +255 394 10 9 +214 347 8 12 +113 402 10 17 +136 411 14 18 +354 442 14 18 +443 436 17 16 +428 420 17 16 +462 415 14 12 +206 451 13 18 +229 439 18 16 +101 438 10 14 +278 393 12 13 +135 399 9 10 +158 400 7 13 +37 409 17 27 +499 436 18 22 +558 424 17 20 +577 410 11 19 +538 343 8 9 +569 360 8 7 +595 346 9 10 +587 394 13 12 +466 372 13 12 +566 376 10 13 +630 413 10 11 +660 360 15 9 +920 411 14 14 +916 388 15 14 +803 364 9 4 +828 340 6 4 +864 359 8 8 +904 366 6 10 +1003 353 7 7 +813 346 8 4 +782 365 9 14 +703 352 8 9 +765 395 9 13 +782 400 9 10 +741 382 6 10 +857 427 15 22 +869 422 11 19 +883 439 14 12 +696 423 15 23 +890 367 10 10 +879 357 6 6 +850 414 11 13 +945 433 15 22 +995 404 12 17 +919 333 8 11 +981 358 8 7 +996 360 5 8 +995 387 8 9 +1016 405 7 13 +802 430 13 19 +763 418 15 13 +817 419 7 14 +679 425 10 12 +718 387 9 8 +729 427 14 18 +321 441 16 23 +407 391 12 9 +1011 501 7 12 +986 503 14 17 +997 452 8 10 +1014 475 7 8 +934 408 9 8 +928 376 10 15 +1005 395 7 7 +868 398 5 6 +822 373 9 16 +801 386 6 9 +752 364 9 10 +846 327 7 6 +886 339 7 10 +843 341 6 9 +815 331 6 5 +820 335 7 8 +770 365 6 6 +775 352 6 6 +792 347 6 5 +751 322 7 7 +659 308 5 7 +643 316 6 9 +604 328 6 9 +586 316 6 5 +546 378 12 11 +606 395 9 11 +627 401 7 9 +636 412 10 10 +651 396 8 12 +662 398 6 7 +652 380 7 10 +665 388 10 12 +633 374 8 10 +676 379 6 8 +682 380 7 12 +676 401 8 12 +499 357 8 11 +674 345 15 24 +690 420 6 7 +648 413 8 11 +629 390 7 8 +622 382 8 10 +628 366 6 11 +739 356 7 11 +713 344 8 12 +694 408 14 9 +711 394 10 11 +451 354 10 11 +437 350 9 8 +472 385 10 11 +431 369 10 14 +399 363 7 11 +410 373 6 6 +358 403 11 9 +322 401 7 7 +332 425 9 15 +395 430 10 11 +520 401 12 14 +536 398 10 12 +491 407 10 12 +280 365 9 13 +252 363 10 11 +248 355 7 8 +289 352 10 9 +287 362 7 10 +414 350 8 10 +356 368 6 7 +270 442 10 13 +198 391 11 14 +175 425 10 11 +176 342 14 15 +360 327 7 7 +159 377 10 13 +303 378 10 8 +323 368 12 20 +381 382 13 12 +54 348 13 19 +821 420 11 9 +891 447 14 14 +799 433 11 13 +956 349 7 7 +604 434 13 13 +576 403 9 11 +585 409 10 13 +586 371 11 9 +200 368 6 11 +598 371 7 10 +741 342 5 7 +54 379 10 14 +158 319 10 10 +163 291 8 13 +345 417 14 19 +# 2--Demonstration/2_Demonstration_Protesters_2_213.jpg +366 302 70 104 +238 297 58 67 +593 545 64 83 +754 542 33 50 +960 589 51 70 +526 469 21 28 +496 474 11 20 +116 454 21 28 +65 422 11 16 +49 400 11 15 +19 404 10 11 +0 425 9 14 +68 394 9 12 +60 388 8 12 +99 397 9 10 +680 375 6 9 +711 379 6 8 +772 377 9 9 +806 376 9 11 +843 375 10 13 +887 376 10 12 +950 376 8 15 +969 380 7 9 +997 379 9 11 +1019 372 5 13 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_471.jpg +93 148 48 93 +179 174 88 114 +423 258 51 89 +587 290 59 73 +751 233 71 80 +647 326 49 65 +819 451 25 30 +824 396 25 30 +791 406 26 32 +785 459 22 32 +850 444 26 32 +620 449 24 34 +622 497 26 39 +590 493 24 34 +433 457 28 34 +450 513 28 39 +414 517 28 36 +391 465 28 35 +375 520 23 32 +488 515 19 36 +473 464 22 33 +471 410 18 37 +219 413 37 45 +238 481 31 52 +200 480 28 45 +259 377 31 45 +1006 275 18 64 +# 2--Demonstration/2_Demonstration_Protesters_2_228.jpg +344 138 140 226 +644 78 178 312 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_665.jpg +168 459 37 49 +163 385 25 63 +292 605 27 49 +330 575 42 58 +333 623 44 59 +382 554 119 167 +476 423 221 319 +769 469 14 21 +720 597 54 65 +792 567 72 96 +846 600 35 59 +846 464 33 134 +# 2--Demonstration/2_Demonstration_Protesters_2_826.jpg +627 71 29 49 +583 71 21 41 +562 53 20 30 +549 63 14 22 +535 73 13 16 +518 65 14 26 +607 135 35 50 +543 165 45 59 +499 159 29 34 +512 120 23 31 +544 102 16 28 +422 87 18 24 +363 91 25 20 +404 87 11 18 +257 66 10 13 +207 84 23 15 +178 119 14 14 +230 102 17 23 +86 82 17 19 +52 88 33 33 +490 81 9 15 +892 232 66 89 +705 192 52 80 +727 4 25 32 +# 2--Demonstration/2_Demonstration_Political_Rally_2_382.jpg +381 499 152 160 +# 2--Demonstration/2_Demonstration_Political_Rally_2_365.jpg +405 333 6 7 +419 320 5 7 +437 336 5 6 +450 353 8 12 +424 365 9 11 +413 356 7 11 +378 358 8 9 +342 362 8 9 +374 379 9 11 +434 330 5 7 +374 331 6 7 +354 336 6 6 +362 396 11 14 +345 400 11 13 +352 409 12 14 +330 416 10 12 +322 418 11 13 +351 425 10 14 +353 442 12 19 +373 430 12 16 +386 401 13 19 +405 396 12 14 +539 355 8 10 +554 346 8 10 +553 334 6 7 +733 108 247 347 +463 296 73 95 +205 343 84 90 +0 355 81 265 +# 2--Demonstration/2_Demonstration_Political_Rally_2_341.jpg +864 336 19 25 +790 337 19 18 +566 332 23 28 +501 345 20 27 +467 355 19 21 +403 362 11 15 +238 369 9 12 +205 364 15 16 +182 355 17 19 +128 341 21 26 +106 348 16 22 +62 370 13 15 +# 2--Demonstration/2_Demonstration_Protesters_2_369.jpg +26 508 116 105 +272 445 114 111 +525 367 127 141 +314 388 42 57 +422 410 9 9 +571 342 20 18 +185 87 44 54 +206 369 20 36 +605 272 27 25 +612 306 30 36 +656 309 43 46 +635 344 52 71 +737 457 37 73 +958 211 66 90 +762 265 40 52 +768 238 45 31 +# 2--Demonstration/2_Demonstration_Political_Rally_2_114.jpg +285 91 4 6 +311 95 3 6 +341 66 4 6 +344 93 5 5 +359 96 5 6 +328 97 5 8 +236 124 3 3 +232 118 4 4 +649 320 27 34 +598 285 14 26 +828 276 13 19 +951 348 26 41 +276 175 8 9 +475 181 8 9 +64 212 9 11 +# 2--Demonstration/2_Demonstration_Demonstrators_2_195.jpg +920 190 68 136 +687 75 96 110 +392 196 73 99 +325 199 55 75 +288 226 22 34 +549 278 34 51 +# 2--Demonstration/2_Demonstration_Political_Rally_2_22.jpg +76 390 102 107 +377 326 61 91 +863 404 52 81 +579 465 41 78 +598 483 32 53 +427 441 18 34 +456 380 20 27 +576 413 22 32 +763 362 41 54 +723 424 20 39 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_450.jpg +188 273 45 70 +292 353 19 27 +319 375 16 20 +583 272 35 55 +729 318 17 30 +812 361 8 9 +861 360 9 13 +1014 361 8 14 +982 343 7 12 +954 339 16 21 +# 2--Demonstration/2_Demonstration_Demonstrators_2_425.jpg +16 375 10 10 +55 391 14 10 +48 400 10 9 +33 424 12 16 +47 432 16 19 +15 473 39 50 +155 424 15 19 +140 376 12 17 +109 381 16 21 +94 382 13 13 +167 369 10 13 +191 358 9 8 +173 345 6 8 +191 345 4 7 +178 335 6 4 +162 336 4 5 +149 339 5 4 +197 331 3 7 +213 349 7 7 +201 403 21 30 +201 392 18 19 +225 424 36 54 +275 419 20 27 +250 368 19 24 +289 349 14 17 +272 346 10 14 +254 344 10 14 +270 330 7 10 +289 331 5 8 +305 333 7 9 +326 326 7 8 +340 334 7 11 +360 342 9 12 +362 322 10 10 +390 336 14 17 +379 325 10 12 +400 368 20 23 +356 430 41 65 +344 393 30 40 +446 349 18 24 +463 346 12 16 +429 329 14 14 +411 331 7 13 +415 315 7 7 +453 320 10 15 +445 314 5 9 +483 315 10 11 +510 352 10 13 +527 330 17 25 +485 371 19 23 +462 411 22 28 +563 464 38 60 +430 560 71 116 +577 385 26 35 +622 391 17 19 +633 395 34 44 +617 330 16 24 +656 304 19 23 +698 314 19 25 +641 305 14 19 +613 302 13 20 +593 310 12 11 +575 330 15 21 +542 312 11 17 +516 316 14 15 +581 296 10 12 +692 281 10 8 +710 300 7 13 +702 374 27 34 +537 301 6 7 +524 308 7 7 +631 299 8 10 +711 279 8 14 +616 295 10 10 +511 305 9 9 +490 295 7 10 +505 328 14 20 +790 426 69 79 +992 339 32 83 +863 298 33 43 +783 305 44 48 +730 335 22 24 +774 312 19 26 +741 303 17 19 +796 273 17 19 +842 283 18 19 +860 273 31 30 +938 260 26 26 +733 282 10 15 +775 269 8 11 +738 269 8 9 +129 348 9 12 +156 352 9 9 +332 357 18 25 +230 349 11 22 +# 2--Demonstration/2_Demonstration_Protesters_2_345.jpg +246 56 62 68 +432 56 62 92 +680 80 64 84 +# 2--Demonstration/2_Demonstration_Political_Rally_2_267.jpg +751 308 34 36 +196 362 20 25 +97 396 22 34 +773 396 32 40 +449 319 20 31 +# 2--Demonstration/2_Demonstration_Political_Rally_2_566.jpg +385 201 331 453 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_117.jpg +910 614 36 41 +932 473 36 38 +881 491 37 42 +967 434 29 30 +801 459 33 30 +777 532 28 26 +717 402 26 26 +698 455 31 43 +661 419 26 32 +608 458 29 31 +625 412 23 33 +597 434 23 29 +515 442 26 32 +530 380 18 26 +439 427 21 32 +374 423 21 32 +380 401 12 17 +431 408 21 23 +371 369 16 18 +409 358 17 20 +489 363 17 20 +283 440 21 30 +226 414 22 30 +338 408 20 26 +165 420 18 24 +126 429 20 24 +62 415 19 28 +172 388 19 23 +200 366 10 13 +185 345 12 15 +137 348 10 13 +15 527 24 30 +23 417 19 22 +50 403 12 13 +64 337 7 10 +104 319 7 11 +39 311 8 9 +276 337 12 13 +363 348 11 11 +433 332 10 13 +405 324 7 11 +493 329 11 15 +510 320 7 10 +648 318 8 9 +670 314 7 8 +682 324 10 12 +684 302 8 9 +667 331 12 17 +767 314 9 11 +781 310 10 12 +790 299 7 10 +760 299 8 11 +780 298 6 7 +776 296 6 8 +990 268 17 19 +380 332 9 12 +78 385 6 10 +# 2--Demonstration/2_Demonstration_Protesters_2_817.jpg +378 538 41 49 +290 554 34 49 +519 93 31 34 +# 2--Demonstration/2_Demonstration_Political_Rally_2_867.jpg +914 375 31 31 +950 377 20 19 +797 432 33 34 +824 403 23 24 +855 383 14 12 +818 360 12 14 +924 515 46 47 +885 579 79 91 +647 501 32 35 +617 508 23 26 +177 559 56 59 +64 527 53 39 +128 488 17 20 +71 460 18 14 +166 385 11 14 +44 424 10 15 +171 514 40 55 +332 561 43 38 +866 364 12 13 +885 357 7 9 +796 372 12 12 +982 358 7 8 +153 401 7 10 +119 406 9 10 +5 398 9 11 +0 526 22 24 +# 2--Demonstration/2_Demonstration_Political_Rally_2_791.jpg +778 378 66 70 +649 281 71 80 +546 309 73 82 +397 352 63 77 +205 418 76 84 +399 143 63 77 +592 47 78 81 +993 522 28 30 +957 514 25 27 +933 552 37 27 +824 543 36 36 +857 524 25 32 +1006 412 15 18 +963 433 18 20 +926 496 17 21 +854 495 20 24 +1014 467 10 22 +744 546 35 33 +673 555 34 24 +692 517 23 35 +651 495 28 35 +588 480 22 29 +489 544 38 35 +401 532 30 31 +334 546 37 33 +324 473 26 27 +317 436 17 20 +328 404 14 14 +18 547 39 32 +42 466 24 32 +89 489 27 31 +59 441 16 23 +100 455 18 22 +11 514 22 27 +301 561 33 18 +881 312 20 31 +703 462 20 31 +675 429 19 22 +1005 495 19 31 +470 520 28 33 +44 517 24 30 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_419.jpg +31 410 53 68 +85 421 50 65 +27 597 79 76 +137 412 32 63 +306 612 36 72 +282 245 52 76 +455 281 54 72 +638 479 41 75 +595 161 55 88 +788 173 66 88 +986 574 22 38 +979 309 45 34 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_619.jpg +550 14 7 10 +595 24 10 10 +736 49 9 13 +678 96 12 16 +621 115 11 15 +573 131 11 13 +215 165 11 15 +205 150 9 14 +417 116 12 12 +454 114 9 14 +500 113 11 13 +528 127 10 15 +553 111 12 18 +431 96 12 14 +759 118 12 14 +857 102 12 17 +808 86 11 14 +822 140 11 15 +648 136 11 17 +945 223 13 17 +991 196 11 16 +786 181 12 18 +896 187 12 19 +849 249 12 18 +204 226 11 20 +476 544 14 19 +982 256 14 16 +985 285 13 20 +737 140 10 12 +784 149 11 12 +910 116 15 20 +908 134 8 12 +936 204 8 8 +561 197 10 14 +580 153 13 15 +494 133 13 13 +529 114 7 7 +336 134 11 14 +338 113 9 9 +340 104 8 11 +346 101 6 5 +351 90 8 11 +395 128 9 12 +422 167 11 14 +285 138 7 9 +213 150 8 12 +443 289 10 17 +250 252 12 21 +206 203 10 14 +159 200 10 12 +96 247 17 22 +207 271 7 15 +487 215 14 20 +463 205 15 18 +293 177 11 16 +418 395 16 16 +# 2--Demonstration/2_Demonstration_Protesters_2_86.jpg +683 414 31 37 +966 556 58 53 +805 424 40 42 +968 353 31 40 +903 371 29 38 +980 306 20 25 +932 322 27 31 +893 306 17 20 +916 293 18 22 +1013 296 10 23 +1007 260 15 18 +968 262 13 16 +909 264 13 16 +934 266 9 10 +945 249 9 11 +990 253 10 13 +1008 244 9 10 +1016 235 7 10 +978 244 9 11 +988 235 8 9 +1008 220 7 8 +953 231 8 11 +941 237 6 10 +924 233 9 11 +899 234 12 14 +885 254 11 16 +864 265 14 17 +839 285 16 18 +965 220 7 9 +914 224 8 9 +899 215 7 7 +918 196 8 8 +881 241 9 9 +875 229 9 10 +885 211 7 10 +904 213 7 9 +873 213 6 8 +860 211 7 9 +912 206 6 7 +808 324 28 32 +825 302 17 22 +731 342 28 36 +765 301 21 26 +839 244 12 14 +817 266 13 15 +785 278 12 17 +765 284 15 16 +729 295 15 17 +740 279 14 19 +773 250 12 15 +818 218 9 9 +794 242 9 9 +783 209 6 7 +756 219 6 6 +760 204 5 6 +775 210 6 7 +764 241 9 13 +731 257 12 13 +700 253 12 14 +702 237 10 12 +704 224 7 9 +745 226 8 9 +785 227 7 8 +793 219 5 7 +785 218 6 7 +779 220 6 7 +772 222 5 5 +764 197 4 5 +726 231 8 10 +722 208 6 8 +710 202 6 7 +742 204 6 6 +746 210 6 7 +757 208 6 7 +735 207 6 7 +727 197 5 7 +875 200 5 5 +896 184 4 5 +854 184 5 6 +834 180 5 7 +814 176 5 7 +753 174 5 6 +779 173 5 6 +538 446 44 49 +676 308 28 31 +461 378 35 40 +601 330 25 33 +627 317 19 17 +493 319 23 32 +531 303 22 26 +566 289 19 23 +452 303 19 21 +681 269 16 19 +655 279 20 24 +677 254 11 11 +652 247 12 12 +605 270 17 21 +620 256 15 16 +672 225 9 10 +651 225 8 10 +610 254 8 10 +632 251 8 6 +631 232 9 10 +595 235 9 13 +573 274 17 17 +571 243 12 13 +611 228 6 9 +573 231 9 10 +566 237 6 12 +550 232 8 12 +662 219 7 8 +695 213 5 7 +653 216 6 7 +636 215 7 7 +634 223 9 9 +624 211 8 10 +608 214 9 8 +579 217 9 8 +664 203 6 7 +683 210 5 7 +531 282 18 18 +512 262 16 18 +510 243 13 13 +522 235 12 13 +557 245 8 11 +534 229 9 12 +543 212 7 10 +515 231 9 10 +486 257 12 16 +483 238 11 11 +493 230 9 13 +489 225 8 9 +501 213 6 9 +462 249 14 15 +446 268 18 22 +458 233 10 12 +490 215 6 8 +483 217 6 8 +453 225 7 9 +451 216 6 7 +461 222 7 9 +349 477 51 44 +320 364 32 40 +390 332 26 30 +284 336 30 34 +399 302 23 24 +331 300 21 26 +276 294 19 21 +360 273 17 19 +328 266 15 19 +427 258 14 15 +294 277 15 14 +397 246 12 15 +374 252 14 12 +321 257 15 14 +270 278 14 14 +282 248 10 13 +247 257 15 18 +261 257 14 15 +357 251 11 13 +422 241 10 10 +429 244 9 12 +41 602 75 73 +46 482 46 51 +184 377 36 37 +118 341 28 32 +218 331 24 30 +0 361 13 34 +186 314 23 28 +121 298 21 23 +73 313 23 27 +23 298 24 26 +0 281 14 21 +96 280 18 20 +151 276 15 15 +197 277 18 17 +53 279 21 22 +25 278 16 17 +129 277 15 14 +101 247 14 16 +72 269 15 12 +25 265 16 16 +230 235 10 12 +262 235 9 9 +253 241 7 8 +185 234 12 14 +199 232 11 12 +155 235 14 18 +134 233 15 15 +168 235 9 10 +175 217 9 10 +220 214 9 10 +255 230 8 10 +221 229 10 12 +246 216 6 8 +275 227 9 14 +210 232 7 8 +207 220 6 8 +158 216 7 9 +188 216 8 9 +357 233 10 12 +375 243 9 9 +401 227 9 12 +426 228 9 8 +306 233 12 13 +293 232 9 11 +374 233 8 9 +412 233 8 10 +423 218 6 8 +409 222 6 7 +412 209 6 8 +402 210 6 6 +333 220 8 9 +316 222 11 10 +371 221 8 8 +354 219 7 7 +348 221 5 6 +363 221 7 7 +392 210 5 7 +394 221 5 9 +383 215 5 6 +375 211 6 6 +284 216 6 8 +277 216 5 6 +294 213 5 7 +301 207 5 8 +311 210 5 7 +338 214 5 6 +437 213 5 6 +439 220 5 6 +348 205 4 4 +382 203 5 4 +323 211 4 5 +309 203 6 6 +101 193 13 14 +130 181 9 11 +154 185 8 10 +95 184 9 9 +105 184 9 9 +67 187 13 16 +77 180 12 15 +48 186 10 13 +36 183 12 16 +17 195 15 17 +17 186 13 14 +235 202 6 7 +225 181 6 7 +181 177 7 8 +202 203 6 7 +239 180 4 5 +235 184 5 6 +181 207 7 8 +157 206 5 7 +211 190 4 5 +209 201 5 5 +255 207 8 8 +266 193 6 10 +314 195 5 5 +323 185 4 5 +300 200 4 5 +280 196 7 8 +276 211 5 5 +297 191 5 6 +289 188 4 5 +331 186 3 3 +349 196 3 4 +359 191 6 6 +325 194 4 5 +707 190 4 4 +702 191 3 4 +696 190 5 4 +690 187 5 6 +680 189 4 4 +684 185 3 4 +687 179 7 9 +671 176 2 3 +665 188 3 4 +671 188 4 4 +669 199 5 8 +676 202 6 10 +517 206 6 7 +427 201 4 6 +# 2--Demonstration/2_Demonstration_Political_Rally_2_637.jpg +281 355 35 43 +161 396 37 45 +1 391 34 41 +732 110 48 66 +905 400 25 31 +853 586 33 22 +726 410 33 37 +612 349 37 40 +660 403 27 29 +451 368 33 58 +453 354 36 59 +365 414 46 54 +# 2--Demonstration/2_Demonstration_Protesters_2_525.jpg +994 462 30 30 +999 400 23 26 +991 382 17 21 +949 350 22 22 +918 379 22 23 +946 433 30 27 +922 483 30 26 +880 524 34 36 +962 556 46 20 +766 512 19 30 +758 459 29 33 +848 428 27 27 +844 396 21 23 +848 367 17 20 +794 365 20 20 +780 342 17 18 +762 403 25 25 +886 330 12 17 +862 330 13 16 +833 340 15 18 +896 349 23 17 +714 362 20 22 +718 332 14 17 +783 320 15 15 +672 322 15 19 +712 311 15 16 +656 428 27 29 +659 487 33 35 +553 489 30 33 +577 405 24 27 +592 343 16 20 +535 334 15 19 +524 337 17 19 +524 395 18 20 +417 517 29 36 +482 439 28 31 +478 397 19 24 +388 438 23 28 +467 328 17 18 +387 349 15 19 +420 337 16 15 +630 378 19 14 +298 490 30 35 +230 489 31 27 +325 417 27 27 +310 385 21 23 +339 347 16 19 +251 373 19 21 +203 384 22 26 +318 328 14 17 +366 325 16 16 +148 395 20 13 +80 382 20 23 +105 351 20 21 +144 340 13 17 +184 339 15 15 +53 436 29 29 +53 414 23 24 +1 373 21 25 +60 345 18 19 +21 337 14 15 +32 322 14 14 +81 318 14 15 +123 321 14 16 +127 302 9 11 +168 288 11 12 +224 313 10 16 +248 312 12 14 +244 293 11 14 +267 288 10 12 +215 300 10 12 +21 298 13 12 +53 300 11 13 +38 282 9 9 +26 288 9 12 +75 280 9 11 +134 288 9 11 +116 273 8 9 +189 325 14 14 +276 274 9 10 +310 267 8 11 +344 267 9 12 +357 277 9 10 +324 297 8 10 +361 308 16 17 +191 258 8 9 +169 257 8 9 +111 259 8 8 +189 242 8 8 +230 256 8 9 +210 262 8 9 +270 261 6 7 +156 332 14 12 +501 317 15 17 +578 297 12 14 +564 303 11 11 +534 309 13 16 +544 291 9 11 +527 279 10 13 +569 284 8 12 +503 285 11 13 +475 295 12 12 +456 309 13 16 +442 294 10 13 +454 272 10 11 +413 317 12 13 +371 293 11 12 +384 283 8 11 +418 275 9 11 +560 328 11 17 +639 300 14 16 +662 309 14 16 +1006 242 18 19 +995 226 15 19 +968 249 16 21 +958 236 16 16 +865 229 14 18 +887 219 14 17 +810 229 15 18 +824 179 12 13 +776 235 13 15 +771 221 12 17 +767 304 12 17 +721 226 14 16 +684 220 16 18 +644 220 13 15 +632 228 10 14 +616 213 11 15 +598 213 12 14 +589 218 11 13 +561 209 10 13 +523 207 12 14 +501 221 10 12 +507 210 10 12 +446 213 11 12 +478 216 9 12 +466 211 8 10 +430 222 8 10 +409 208 10 13 +714 219 12 15 +393 214 10 11 +374 213 10 11 +343 212 8 11 +329 212 8 9 +305 212 10 11 +321 211 8 11 +287 204 9 10 +268 206 8 10 +282 206 8 11 +260 206 7 9 +248 208 7 7 +272 220 6 8 +254 219 7 8 +90 302 11 11 +75 310 9 13 +31 268 10 11 +4 280 10 10 +80 265 7 8 +80 215 8 8 +108 217 8 10 +46 220 6 8 +54 222 6 7 +148 257 7 8 +162 210 8 9 +185 209 6 8 +208 210 8 9 +227 212 7 9 +121 214 9 10 +67 206 8 8 +48 209 6 8 +57 266 7 8 +45 272 9 10 +241 286 9 12 +240 272 10 11 +160 281 9 8 +150 272 7 7 +297 310 11 14 +392 277 9 11 +371 268 10 10 +268 257 9 10 +0 347 12 15 +83 521 24 19 +266 353 14 16 +292 292 11 13 +332 258 9 12 +101 280 10 11 +75 298 10 11 +134 216 6 8 +152 211 7 10 +193 201 8 11 +363 200 8 13 +574 210 7 12 +936 213 11 18 +971 215 10 15 +891 184 12 16 +# 2--Demonstration/2_Demonstration_Demonstrators_2_290.jpg +411 149 79 101 +698 167 68 92 +21 502 16 17 +56 533 20 27 +55 502 14 12 +172 376 80 78 +510 573 86 99 +458 507 24 28 +483 492 16 21 +378 514 17 19 +257 467 13 23 +272 481 8 7 +361 509 11 13 +339 504 11 14 +392 527 7 9 +408 515 6 11 +415 508 8 14 +434 517 4 7 +568 504 22 22 +517 511 5 7 +493 513 9 9 +544 507 4 5 +505 515 4 6 +519 507 6 9 +544 516 5 5 +732 517 48 61 +639 481 44 39 +702 490 8 11 +615 503 5 7 +623 507 3 6 +857 473 58 58 +938 462 16 16 +813 487 5 6 +984 451 2 4 +914 464 12 9 +799 487 5 5 +# 2--Demonstration/2_Demonstration_Protesters_2_24.jpg +890 1121 92 122 +870 1053 31 32 +665 1053 76 101 +452 1129 74 91 +815 658 65 81 +680 861 31 50 +584 870 44 57 +481 756 75 107 +392 817 61 71 +154 816 83 91 +36 849 44 55 +637 694 80 78 +603 637 64 73 +503 671 61 79 +363 591 70 71 +671 476 57 78 +378 274 96 152 +547 276 88 145 +633 141 100 107 +216 160 86 105 +13 402 111 143 +184 462 45 66 +295 264 91 139 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_306.jpg +0 260 62 107 +175 167 123 148 +318 204 37 46 +362 183 57 70 +457 218 40 28 +590 236 45 65 +663 213 63 86 +748 261 37 64 +826 226 101 127 +617 204 48 59 +# 2--Demonstration/2_Demonstration_Protesters_2_204.jpg +614 193 74 108 +388 210 65 109 +179 209 79 96 +942 363 32 77 +743 249 27 61 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_924.jpg +662 154 184 170 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_816.jpg +109 0 33 25 +160 0 34 26 +93 64 39 48 +15 174 39 46 +306 149 36 48 +319 211 40 51 +193 156 29 50 +276 159 29 37 +181 201 47 58 +139 283 51 64 +218 257 46 66 +350 273 44 60 +358 396 46 62 +307 1 38 38 +360 5 27 31 +389 1 22 17 +448 65 33 50 +497 156 39 51 +460 217 48 59 +439 197 29 54 +719 1 41 54 +642 11 33 41 +882 1 32 37 +688 62 29 49 +814 76 38 53 +864 169 39 57 +794 230 38 50 +679 200 42 54 +642 144 41 44 +631 171 35 49 +780 279 48 63 +574 122 42 51 +616 232 33 59 +1006 81 18 54 +972 87 33 53 +972 149 39 54 +0 36 19 57 +# 2--Demonstration/2_Demonstration_Political_Rally_2_800.jpg +362 212 148 194 +# 2--Demonstration/2_Demonstration_Political_Rally_2_64.jpg +850 385 27 34 +793 261 20 31 +634 455 30 39 +583 391 33 38 +638 394 20 28 +520 353 24 32 +634 297 11 12 +925 186 10 13 +# 2--Demonstration/2_Demonstration_Political_Rally_2_896.jpg +298 288 82 102 +692 58 92 106 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_30.jpg +771 84 41 60 +596 83 18 18 +435 63 20 22 +48 66 8 10 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_162.jpg +443 139 46 59 +525 178 31 42 +556 183 18 27 +606 152 32 43 +93 225 26 29 +3 226 23 29 +150 218 14 16 +963 196 18 19 +931 194 14 22 +129 208 12 15 +162 219 14 17 +192 208 9 10 +471 198 34 39 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_98.jpg +866 602 28 50 +662 601 30 46 +635 630 28 25 +607 582 34 43 +654 552 25 33 +509 530 28 37 +504 628 35 28 +593 517 28 41 +560 487 31 37 +596 473 25 36 +507 484 28 30 +553 442 22 33 +520 447 23 34 +702 486 20 35 +441 455 25 35 +411 514 24 28 +393 464 24 29 +363 494 30 38 +358 485 16 27 +331 575 42 43 +286 569 36 41 +268 530 30 38 +325 474 20 31 +321 438 24 31 +377 419 22 34 +312 410 8 12 +270 425 10 11 +213 597 42 54 +169 604 26 48 +166 578 20 34 +194 505 30 33 +135 518 23 30 +54 521 18 32 +8 479 22 25 +21 458 22 32 +0 429 20 26 +116 457 16 22 +3 259 6 11 +2 239 8 9 +25 242 8 10 +31 263 8 11 +47 276 7 11 +38 299 7 11 +54 304 10 10 +56 281 9 13 +67 299 9 12 +58 317 8 12 +103 296 9 13 +119 295 10 12 +130 298 10 13 +76 277 9 11 +60 239 7 8 +77 239 8 10 +103 266 8 12 +115 259 7 11 +126 267 5 9 +154 270 7 12 +158 294 9 13 +197 300 9 10 +191 286 9 11 +221 308 9 13 +252 306 10 12 +224 281 6 11 +169 261 8 10 +192 229 7 10 +225 224 7 8 +294 283 8 11 +319 294 8 14 +338 288 9 13 +349 303 8 11 +281 251 7 10 +263 243 9 8 +347 276 8 12 +155 225 8 13 +373 289 7 11 +387 263 6 9 +409 259 9 10 +418 292 8 12 +419 276 7 11 +411 238 7 9 +444 238 7 8 +469 281 6 9 +455 317 10 13 +510 302 6 8 +519 299 8 10 +476 265 5 10 +492 253 6 8 +531 251 7 9 +526 236 7 9 +558 254 7 10 +578 256 9 10 +604 271 8 13 +598 287 7 10 +577 321 10 14 +707 317 10 14 +721 322 12 10 +744 258 8 10 +684 245 7 10 +670 248 9 12 +665 244 7 8 +836 281 11 13 +850 331 10 14 +888 329 10 14 +936 324 9 12 +797 293 6 11 +779 276 5 11 +784 224 7 12 +810 237 6 11 +870 252 7 11 +894 229 7 10 +927 224 8 10 +960 299 12 12 +960 216 9 11 +840 262 8 10 +932 208 8 11 +976 252 9 14 +991 235 8 10 +976 222 7 10 +980 201 8 12 +998 278 8 11 +994 199 6 9 +1015 105 6 9 +998 118 7 8 +1003 109 6 10 +984 106 7 9 +972 142 8 11 +943 137 7 9 +873 142 8 9 +870 178 7 11 +907 114 7 10 +922 87 7 9 +971 80 6 8 +997 57 7 10 +955 44 6 8 +926 63 7 10 +896 32 5 9 +886 39 6 7 +855 55 6 8 +864 93 5 9 +831 161 9 10 +807 106 7 9 +818 80 7 10 +828 62 7 10 +792 57 7 9 +779 93 6 7 +779 134 9 9 +755 146 9 10 +695 138 7 9 +724 167 10 9 +763 83 6 8 +753 61 6 8 +739 65 6 9 +723 41 6 7 +735 79 7 7 +739 112 9 10 +799 101 6 10 +761 134 8 10 +673 127 8 9 +678 61 5 7 +671 70 6 8 +643 56 7 8 +642 81 6 7 +625 57 7 9 +589 85 7 7 +616 111 6 8 +639 133 6 7 +617 131 6 10 +628 170 7 9 +582 147 8 8 +579 159 7 10 +578 127 9 10 +598 106 8 9 +592 97 7 8 +564 87 8 8 +560 67 7 9 +548 68 7 8 +555 35 6 7 +549 141 7 10 +558 159 6 9 +750 183 9 9 +698 182 8 11 +662 182 7 10 +619 181 7 10 +590 195 8 8 +528 164 8 8 +524 175 8 9 +527 140 6 9 +535 94 7 8 +515 106 6 8 +485 102 7 10 +479 114 6 9 +473 138 6 8 +455 117 6 8 +439 128 8 9 +441 109 6 10 +517 80 6 7 +527 61 5 8 +495 73 8 8 +491 61 7 9 +469 68 5 8 +455 79 5 8 +437 64 6 8 +428 57 7 8 +425 82 7 11 +389 82 7 8 +403 42 5 7 +361 40 6 8 +404 161 6 8 +392 162 6 7 +313 46 6 6 +301 73 7 8 +298 106 6 8 +422 177 7 9 +419 197 7 8 +416 212 7 11 +353 202 8 9 +279 205 6 8 +361 142 7 9 +289 114 7 9 +266 102 6 8 +276 89 6 8 +255 87 7 8 +222 100 7 8 +216 115 6 8 +183 123 6 9 +202 95 5 9 +171 85 7 7 +182 48 6 7 +245 61 6 7 +199 84 6 6 +276 58 6 8 +237 107 6 8 +212 139 6 8 +173 173 8 9 +162 65 5 8 +153 121 7 10 +151 170 7 9 +106 158 6 9 +125 121 7 8 +117 115 6 8 +122 99 7 9 +132 64 7 9 +117 63 7 8 +101 65 7 8 +89 62 8 7 +80 66 7 8 +67 100 6 8 +96 162 8 9 +74 185 7 10 +79 178 6 8 +57 159 5 9 +173 190 7 8 +197 200 6 8 +166 218 7 9 +129 197 8 9 +60 177 5 8 +59 264 5 9 +59 60 7 9 +48 62 7 8 +18 98 7 8 +35 58 7 9 +10 63 8 7 +74 139 5 8 +60 118 7 7 +33 117 7 9 +14 113 6 9 +0 133 4 9 +19 178 6 10 +0 183 3 9 +147 211 8 12 +207 221 8 8 +140 185 6 9 +169 206 7 11 +259 103 7 8 +217 89 7 6 +491 230 8 8 +406 215 8 9 +640 303 10 13 +589 275 7 12 +728 83 8 10 +177 373 15 18 +139 381 23 22 +294 366 15 20 +894 139 8 9 +706 55 7 9 +719 89 5 7 +173 241 9 9 +404 231 5 7 +477 288 10 14 +# 2--Demonstration/2_Demonstration_Demonstrators_2_517.jpg +616 71 11 15 +563 72 9 12 +571 93 17 20 +533 69 7 9 +510 70 7 8 +700 82 10 16 +713 81 10 11 +766 66 16 19 +502 109 42 53 +652 144 29 50 +702 131 51 58 +787 82 29 35 +802 109 34 40 +884 94 35 37 +921 106 22 21 +862 89 10 18 +878 97 11 16 +969 76 14 26 +985 87 27 28 +1015 90 9 20 +777 285 78 104 +464 364 64 76 +428 76 14 20 +259 118 51 69 +459 75 7 10 +248 83 10 15 +242 94 12 15 +219 100 11 14 +220 111 22 26 +165 127 50 52 +110 93 26 33 +132 287 84 93 +1010 179 13 44 +486 96 11 15 +515 86 10 12 +482 114 18 31 +635 69 7 10 +651 71 8 9 +744 77 8 11 +703 69 4 6 +# 2--Demonstration/2_Demonstration_Political_Rally_2_451.jpg +52 224 138 196 +204 90 114 164 +514 276 160 180 +226 442 184 226 +726 108 124 170 +850 120 122 160 +834 52 66 74 +804 6 70 88 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_476.jpg +214 653 27 29 +327 618 20 27 +392 606 17 26 +375 559 16 19 +193 557 20 24 +61 549 23 24 +138 495 18 29 +194 501 20 24 +276 503 21 26 +331 498 23 22 +404 459 20 23 +424 494 23 24 +245 487 22 24 +217 481 18 24 +136 467 23 26 +200 449 18 22 +36 421 21 18 +64 440 17 20 +92 480 18 20 +109 434 20 18 +80 421 17 18 +146 436 21 17 +227 426 18 24 +263 430 14 21 +280 437 20 18 +331 435 19 19 +362 445 15 20 +141 596 24 21 +245 589 23 27 +278 557 18 24 +398 438 20 15 +419 412 20 16 +284 405 15 21 +314 376 17 19 +377 377 15 19 +367 406 18 21 +342 401 17 19 +223 406 19 18 +28 366 16 12 +46 380 14 18 +79 382 15 19 +101 405 18 17 +131 405 17 19 +138 383 14 17 +184 407 14 17 +187 428 14 19 +242 390 17 18 +208 377 14 17 +123 368 15 15 +304 612 20 22 +186 649 20 22 +444 463 15 19 +461 431 18 21 +491 474 18 24 +501 499 18 23 +468 545 23 24 +524 568 20 28 +492 655 24 25 +590 631 25 22 +513 614 23 22 +564 560 18 24 +625 565 22 28 +695 607 27 25 +757 581 29 29 +808 648 28 25 +878 536 20 31 +825 592 26 27 +781 507 25 26 +708 551 23 27 +720 578 1 2 +655 532 25 29 +649 497 26 25 +573 514 25 18 +626 458 19 25 +554 470 19 21 +526 467 16 18 +481 457 20 18 +581 444 22 16 +516 401 22 23 +639 431 17 21 +671 432 24 24 +678 466 19 20 +736 442 21 29 +729 512 19 19 +749 471 20 25 +788 449 18 20 +831 441 20 19 +882 477 23 25 +883 418 20 26 +802 419 21 20 +734 424 18 16 +772 409 16 17 +864 395 22 25 +906 395 15 16 +932 407 18 20 +947 451 23 23 +985 417 20 24 +991 376 21 25 +954 501 18 23 +1009 444 15 28 +995 527 24 23 +991 595 24 31 +920 623 25 32 +867 301 19 17 +914 321 21 23 +984 327 19 21 +945 360 19 23 +955 387 19 22 +888 359 19 23 +906 298 18 18 +945 297 18 16 +977 266 18 17 +924 262 13 15 +848 328 17 19 +811 294 18 17 +800 325 18 15 +734 285 14 18 +753 298 19 16 +755 268 17 15 +781 256 14 12 +809 252 15 13 +815 271 15 17 +646 328 18 20 +638 375 20 21 +648 397 0 2 +692 382 17 20 +746 379 18 22 +662 367 20 19 +691 339 14 20 +604 335 17 21 +576 330 19 20 +577 364 16 23 +591 390 20 23 +620 307 20 16 +700 249 13 13 +662 249 16 11 +631 240 15 13 +840 268 15 13 +861 270 15 13 +973 224 15 19 +975 173 13 12 +928 168 12 12 +909 189 12 17 +898 203 14 15 +907 240 14 17 +825 185 14 12 +854 181 15 14 +806 225 14 13 +815 176 10 12 +782 188 12 11 +990 155 15 15 +551 299 16 17 +520 302 16 20 +493 296 17 18 +471 333 17 16 +502 350 13 19 +540 367 19 19 +486 387 19 18 +428 366 17 22 +441 345 18 23 +422 345 14 17 +475 282 16 23 +433 302 17 19 +415 271 16 15 +457 266 13 19 +554 258 13 16 +531 252 14 19 +505 240 15 15 +557 233 15 14 +581 259 14 15 +588 294 15 11 +559 285 15 15 +472 376 17 19 +99 307 16 17 +82 345 18 19 +165 354 18 15 +191 328 15 19 +172 305 18 19 +126 292 14 17 +168 276 15 15 +210 305 16 15 +88 264 13 18 +51 259 16 14 +44 283 19 15 +20 276 18 13 +47 324 16 17 +70 331 15 14 +68 302 14 13 +79 295 17 17 +275 297 16 16 +326 297 16 18 +327 337 18 16 +367 328 14 15 +388 307 14 15 +372 295 14 15 +387 279 13 15 +340 275 13 15 +302 277 15 14 +322 254 13 18 +238 315 17 18 +281 362 18 20 +251 349 20 17 +387 349 15 18 +506 334 19 15 +614 283 19 17 +509 220 15 12 +538 182 13 14 +544 210 14 14 +576 212 14 15 +479 219 14 16 +455 245 17 14 +438 231 14 17 +455 177 14 14 +626 197 12 17 +653 188 11 15 +131 252 14 12 +196 263 14 13 +190 287 16 14 +255 244 12 16 +95 235 14 14 +46 236 13 12 +27 227 15 14 +194 237 11 15 +211 240 15 16 +245 233 11 16 +269 229 13 17 +111 135 11 12 +88 197 16 13 +865 226 13 13 +833 238 15 16 +911 216 11 16 +935 229 13 15 +951 248 13 15 +886 181 10 12 +742 243 16 12 +664 222 15 12 +672 273 16 17 +735 207 15 16 +772 155 12 12 +794 134 13 13 +725 155 14 11 +759 153 10 11 +746 177 12 10 +762 195 14 12 +694 217 13 13 +704 184 14 16 +674 183 14 11 +688 175 12 15 +681 162 11 9 +704 148 9 13 +867 310 2 0 +699 285 13 9 +521 287 14 11 +613 230 12 11 +516 201 14 12 +437 215 15 14 +402 230 13 11 +395 259 16 18 +385 227 15 17 +414 199 14 13 +385 192 14 15 +294 213 13 14 +320 204 11 12 +334 226 13 13 +363 251 12 17 +347 249 13 13 +363 206 14 13 +297 191 11 12 +336 183 12 12 +316 174 12 10 +272 170 13 13 +296 163 13 12 +262 207 12 14 +230 202 11 13 +217 224 11 12 +294 247 13 12 +314 245 10 13 +362 177 14 11 +237 169 14 13 +257 169 9 11 +251 188 10 14 +336 361 16 15 +363 371 13 17 +305 345 17 20 +12 254 17 14 +74 224 15 17 +88 221 19 16 +145 211 11 13 +153 188 15 15 +132 184 13 13 +172 182 13 13 +179 163 14 13 +132 158 13 14 +41 165 14 16 +18 165 17 13 +9 184 15 15 +37 207 16 14 +59 215 13 15 +15 225 13 15 +141 237 13 14 +182 225 13 16 +112 182 14 12 +113 157 12 11 +194 188 15 16 +204 167 12 11 +263 140 11 12 +222 267 14 11 +559 152 14 14 +502 167 14 13 +403 174 11 13 +449 155 12 13 +321 132 13 13 +424 152 11 12 +392 156 15 10 +505 182 10 16 +881 150 12 12 +895 162 13 11 +952 105 14 12 +992 117 13 11 +922 102 11 10 +891 112 13 8 +872 91 12 9 +902 78 10 12 +858 118 11 13 +601 159 13 14 +642 166 10 14 +89 133 12 12 +75 134 10 10 +57 114 13 13 +76 102 10 13 +17 100 9 12 +365 138 14 9 +332 112 12 12 +303 91 13 13 +305 116 14 10 +621 266 16 17 +170 122 13 13 +206 113 14 15 +186 97 15 14 +189 78 13 13 +172 73 12 12 +167 100 13 12 +141 106 12 13 +114 107 10 12 +65 67 12 12 +46 62 9 11 +44 13 11 9 +77 53 10 12 +102 33 9 12 +68 87 11 13 +57 90 9 11 +224 71 11 14 +200 47 13 12 +141 74 11 12 +99 111 13 10 +269 114 12 13 +257 90 13 14 +258 79 16 13 +270 54 12 14 +403 128 11 12 +428 134 12 13 +433 75 11 12 +441 52 10 11 +369 70 10 11 +397 117 10 11 +415 96 12 13 +437 109 12 12 +378 43 11 13 +348 44 10 12 +327 79 14 12 +529 165 13 15 +541 144 13 11 +548 127 15 12 +648 108 13 14 +679 121 10 13 +502 93 10 13 +479 82 12 12 +552 100 14 14 +620 179 15 14 +590 110 12 13 +768 116 12 13 +714 100 12 12 +681 93 12 11 +942 199 11 12 +934 217 12 15 +971 209 9 12 +1008 225 8 8 +993 245 9 13 +1005 251 11 11 +1006 271 12 15 +889 265 14 17 +906 268 11 11 +892 232 14 12 +883 216 10 11 +879 200 10 11 +841 201 12 13 +805 210 13 12 +771 176 8 10 +801 165 10 14 +841 150 9 9 +847 133 6 6 +869 146 10 9 +918 140 9 12 +943 145 12 10 +930 127 7 7 +966 74 10 9 +952 70 10 10 +932 74 11 12 +913 78 8 12 +865 242 14 11 +849 240 8 11 +4 541 15 18 +0 488 17 16 +4 405 11 12 +7 332 16 21 +10 315 8 10 +5 292 15 15 +131 326 12 11 +154 311 13 10 +145 295 13 11 +213 280 13 15 +237 299 16 17 +406 291 9 10 +426 296 15 12 +458 298 9 12 +558 320 17 14 +524 368 13 17 +596 247 14 15 +644 219 12 10 +608 211 12 11 +774 549 22 25 +475 637 18 13 +647 311 16 14 +724 237 8 10 +1003 191 12 11 +877 289 14 12 +716 175 12 12 +666 202 9 9 +817 148 11 9 +602 183 11 11 +566 178 7 10 +582 155 10 8 +598 137 8 8 +616 153 11 9 +627 140 11 14 +622 156 15 11 +646 139 11 13 +656 163 9 10 +720 143 10 16 +718 133 9 7 +746 122 9 8 +736 109 10 9 +694 128 9 7 +744 162 13 10 +749 199 9 8 +730 190 10 11 +849 159 7 12 +791 104 7 10 +788 118 11 11 +829 76 9 6 +833 91 7 9 +841 84 7 7 +854 81 8 7 +865 80 9 6 +884 83 8 10 +914 60 8 8 +881 61 8 9 +1001 97 14 10 +837 50 11 11 +863 41 9 8 +839 39 11 7 +800 69 10 8 +797 72 7 8 +773 63 8 8 +782 50 9 8 +805 33 10 8 +773 34 9 7 +773 98 10 7 +748 70 11 10 +740 51 7 8 +754 48 6 7 +748 15 7 6 +740 13 7 7 +728 31 4 6 +718 15 9 8 +702 9 7 7 +689 13 10 8 +690 26 10 9 +703 49 6 5 +707 58 10 9 +722 57 8 8 +700 73 12 11 +710 91 12 9 +693 111 5 4 +706 117 6 4 +701 126 7 10 +673 113 11 9 +662 108 7 8 +665 95 10 10 +679 75 8 7 +687 55 9 8 +676 56 9 11 +645 79 12 10 +643 61 10 9 +651 42 9 10 +660 29 9 7 +631 34 7 7 +626 57 7 10 +611 70 9 6 +603 89 12 8 +621 95 11 11 +636 98 8 9 +649 97 10 8 +639 86 4 4 +633 90 8 11 +632 119 9 9 +620 110 9 9 +612 120 10 11 +647 12 6 10 +668 16 7 6 +632 9 9 8 +611 13 9 8 +621 23 5 7 +627 28 10 8 +622 50 16 9 +676 148 5 7 +576 166 13 9 +580 125 13 14 +586 105 5 6 +560 117 10 6 +565 132 10 8 +566 145 8 5 +568 86 10 7 +567 102 11 5 +588 79 8 8 +564 73 13 10 +580 68 6 5 +570 57 9 9 +595 63 6 7 +585 25 9 10 +603 19 8 10 +582 10 6 5 +562 18 10 6 +550 11 10 11 +541 10 6 8 +528 11 8 6 +523 47 10 8 +536 34 8 9 +513 68 11 11 +534 69 7 7 +541 76 10 13 +534 98 13 10 +538 119 9 8 +542 107 7 6 +516 107 12 13 +508 114 8 9 +511 129 10 12 +524 134 11 8 +517 150 12 8 +520 164 9 11 +489 147 12 8 +478 140 7 6 +472 154 13 9 +481 183 10 11 +490 180 8 16 +479 204 16 8 +483 100 11 11 +475 109 5 7 +480 110 11 13 +500 72 10 10 +506 51 7 5 +492 41 7 7 +568 6 7 10 +473 4 10 7 +483 71 8 10 +471 74 7 8 +534 60 9 7 +472 49 10 9 +455 77 10 9 +447 74 7 6 +443 83 12 14 +438 99 13 11 +414 86 13 9 +403 89 9 8 +408 69 12 11 +409 50 10 8 +427 55 11 9 +410 45 8 4 +414 36 8 8 +425 46 12 8 +432 36 6 6 +427 28 9 7 +443 34 7 4 +415 21 6 6 +441 22 6 5 +450 26 5 6 +485 36 5 7 +372 33 7 5 +379 13 8 8 +403 17 5 6 +345 27 9 9 +337 37 8 11 +332 41 7 7 +323 36 7 6 +321 56 9 7 +322 46 8 7 +351 74 8 10 +386 72 8 10 +389 62 8 8 +386 93 9 11 +380 89 5 5 +362 87 8 7 +352 92 9 10 +346 85 11 12 +334 92 9 10 +318 97 10 10 +319 70 9 8 +339 69 7 5 +298 64 9 9 +293 55 8 9 +294 39 11 13 +281 47 8 9 +271 35 5 6 +292 27 11 9 +315 15 6 8 +299 10 8 9 +276 88 11 10 +289 99 9 7 +272 99 10 9 +285 129 11 10 +301 137 11 12 +312 133 9 10 +283 83 7 8 +283 75 9 6 +255 45 13 16 +266 41 7 6 +385 34 8 7 +369 60 8 6 +357 70 8 9 +373 118 9 8 +353 129 8 11 +327 155 9 11 +345 166 9 10 +358 166 12 8 +234 114 9 9 +247 107 14 11 +234 91 12 15 +211 81 9 8 +150 98 10 8 +154 145 10 7 +152 165 10 14 +156 161 11 8 +170 144 9 8 +213 140 7 8 +196 151 13 10 +229 144 10 9 +241 139 10 9 +195 126 10 10 +147 124 12 9 +89 158 13 8 +101 171 10 12 +75 149 14 22 +97 97 9 7 +125 100 7 10 +128 88 9 7 +96 61 12 9 +89 70 15 14 +125 64 8 6 +136 65 8 8 +185 62 10 8 +174 30 9 7 +185 31 11 9 +146 21 7 7 +121 26 6 4 +83 27 7 6 +79 43 10 9 +243 18 9 8 +239 41 8 6 +242 52 9 9 +242 71 8 7 +159 31 9 7 +146 56 9 7 +154 70 7 11 +227 50 7 6 +181 14 7 7 +28 23 6 7 +10 51 11 14 +19 74 10 12 +27 97 7 9 +38 95 6 7 +49 103 11 11 +24 123 11 12 +101 79 12 9 +449 142 12 10 +467 131 7 9 +284 211 12 17 +# 2--Demonstration/2_Demonstration_Political_Rally_2_224.jpg +100 172 80 106 +334 84 80 116 +606 108 86 116 +780 242 62 104 +900 260 70 92 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_364.jpg +218 328 132 190 +472 286 124 176 +678 344 128 194 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_771.jpg +53 487 11 16 +133 460 18 20 +190 94 21 41 +143 82 28 54 +276 248 14 17 +256 276 12 16 +294 285 14 19 +271 301 14 17 +278 322 12 12 +297 310 10 15 +268 334 13 20 +341 322 8 17 +346 320 13 18 +373 312 12 17 +308 295 14 22 +247 290 11 21 +245 326 10 21 +252 345 7 15 +248 374 17 19 +351 348 13 16 +317 360 14 16 +273 359 16 19 +331 374 11 16 +380 357 16 22 +284 353 15 21 +293 334 9 11 +392 296 9 12 +370 287 12 17 +674 473 24 33 +800 498 22 21 +725 316 22 42 +688 168 24 37 +731 175 21 37 +759 171 18 32 +667 177 19 34 +916 133 15 34 +895 147 13 28 +589 28 32 65 +337 6 38 67 +797 8 27 53 +7 486 9 16 +470 132 95 134 +975 0 22 30 +# 2--Demonstration/2_Demonstration_Protesters_2_796.jpg +407 242 153 186 +359 75 29 38 +352 129 29 36 +487 131 34 42 +390 9 36 48 +522 101 30 31 +562 94 26 28 +601 61 26 31 +578 141 29 33 +608 141 39 37 +673 76 61 68 +# 2--Demonstration/2_Demonstration_Protesters_2_589.jpg +574 324 60 60 +# 2--Demonstration/2_Demonstration_Protesters_2_1033.jpg +834 424 13 19 +807 429 10 12 +783 429 9 12 +562 427 8 10 +548 416 12 13 +495 344 25 38 +419 314 26 41 +362 266 28 53 +317 194 49 87 +108 51 90 102 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_159.jpg +267 367 56 70 +327 391 22 43 +117 391 27 48 +215 417 20 23 +241 390 25 43 +465 384 18 21 +530 418 31 33 +565 391 25 38 +585 337 52 80 +670 380 32 44 +821 354 97 97 +958 308 66 97 +938 354 26 39 +175 158 45 59 +427 180 41 59 +289 199 21 23 +335 406 21 15 +53 389 17 24 +417 197 20 30 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_914.jpg +78 73 22 25 +164 55 16 32 +231 98 8 13 +239 103 8 15 +257 27 42 56 +379 136 4 7 +392 136 4 5 +51 336 14 20 +104 363 9 13 +123 333 11 15 +162 332 14 18 +185 342 11 14 +233 345 6 11 +250 342 8 12 +278 343 7 10 +314 339 7 13 +388 303 9 13 +377 319 11 16 +481 341 9 14 +509 315 8 10 +513 328 10 13 +546 318 7 10 +548 337 9 13 +577 315 8 7 +606 303 13 16 +637 315 10 12 +726 124 6 6 +902 109 14 17 +718 364 15 14 +777 356 15 18 +831 353 19 20 +911 350 18 19 +438 344 13 14 +465 312 12 13 +# 2--Demonstration/2_Demonstration_Protesters_2_156.jpg +613 344 47 47 +736 441 59 54 +433 319 39 69 +249 315 43 56 +135 329 34 41 +54 295 37 39 +0 321 20 27 +986 0 38 53 +624 112 21 28 +60 143 26 25 +99 162 14 18 +33 150 14 15 +271 172 10 11 +674 140 7 10 +688 140 7 9 +714 141 6 10 +563 142 9 10 +# 2--Demonstration/2_Demonstration_Protesters_2_646.jpg +228 192 50 71 +336 184 21 33 +391 178 20 24 +118 170 41 49 +90 195 28 36 +344 251 112 146 +474 211 78 109 +556 212 52 57 +652 217 80 94 +591 177 23 31 +450 163 47 76 +280 170 15 20 +294 176 10 14 +977 218 47 62 +741 196 19 24 +189 187 20 20 +# 2--Demonstration/2_Demonstration_Protesters_2_92.jpg +965 309 26 39 +935 327 18 28 +828 335 29 43 +740 359 54 52 +687 338 36 47 +650 294 20 36 +590 321 20 29 +580 321 18 22 +498 314 15 32 +466 301 20 28 +429 303 13 17 +381 309 24 34 +346 334 20 30 +317 314 24 42 +246 303 28 35 +374 310 16 22 +256 430 61 72 +82 518 71 123 +82 419 41 47 +201 326 27 37 +67 342 31 47 +0 304 16 35 +92 296 11 16 +# 2--Demonstration/2_Demonstration_Political_Rally_2_329.jpg +513 118 7 8 +487 121 8 10 +534 117 7 9 +553 104 6 6 +561 95 6 7 +575 94 5 7 +584 117 7 7 +603 121 8 13 +595 123 6 8 +524 106 4 6 +535 129 5 8 +542 125 6 8 +548 129 7 8 +556 125 9 11 +572 122 8 11 +587 127 8 11 +566 145 16 22 +542 150 20 21 +517 146 13 17 +497 144 21 21 +477 136 18 21 +457 138 14 16 +465 149 15 15 +410 195 33 34 +611 168 16 21 +665 192 29 25 +617 124 8 11 +736 118 10 11 +746 119 13 17 +740 133 11 13 +768 129 12 14 +782 119 10 11 +803 134 9 13 +785 145 16 20 +772 177 23 30 +693 191 20 20 +801 204 41 49 +831 180 28 38 +844 127 15 20 +864 138 9 12 +881 161 26 32 +904 144 11 15 +919 147 8 12 +908 125 9 12 +816 123 10 11 +846 222 31 42 +936 137 12 16 +947 143 14 16 +966 144 14 18 +973 134 12 12 +988 133 11 11 +997 139 12 13 +1012 150 11 14 +996 176 19 26 +951 193 25 28 +924 208 34 41 +976 218 34 42 +932 281 61 78 +865 333 65 71 +737 389 104 129 +692 322 46 54 +784 286 38 41 +614 209 26 36 +649 230 36 53 +579 226 31 36 +464 239 39 43 +481 308 82 107 +307 312 64 75 +137 341 60 75 +123 227 50 69 +128 176 32 38 +196 196 37 42 +268 197 42 47 +301 186 26 33 +327 178 31 35 +60 201 29 46 +0 189 32 46 +22 150 17 17 +55 167 24 28 +66 151 19 23 +110 154 11 13 +68 133 13 15 +111 131 13 13 +54 140 11 12 +50 133 10 13 +46 127 9 12 +23 124 10 10 +11 135 12 12 +7 112 10 12 +28 112 9 12 +# 2--Demonstration/2_Demonstration_Protesters_2_508.jpg +950 170 36 43 +988 144 25 31 +957 113 23 28 +935 160 22 30 +866 145 24 41 +788 139 22 25 +716 161 39 44 +653 156 20 26 +436 213 29 24 +495 211 14 16 +316 182 34 39 +176 215 31 23 +237 180 25 33 +# 2--Demonstration/2_Demonstration_Protesters_2_881.jpg +332 288 34 47 +43 298 26 29 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_606.jpg +13 286 33 41 +44 263 45 43 +103 310 47 42 +187 248 30 41 +220 242 35 43 +220 294 42 64 +108 230 22 39 +150 256 22 25 +337 287 76 86 +311 238 31 35 +447 225 27 47 +549 293 78 106 +504 229 40 73 +637 252 52 51 +711 258 33 45 +743 275 36 49 +766 259 59 62 +707 218 23 29 +997 284 26 53 +904 267 56 59 +827 236 36 46 +524 212 6 6 +# 2--Demonstration/2_Demonstration_Political_Rally_2_700.jpg +154 54 80 118 +510 64 76 112 +602 102 60 78 +768 174 64 80 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_242.jpg +17 488 28 32 +164 523 54 83 +271 508 30 37 +432 539 38 52 +800 337 53 96 +745 432 9 11 +775 447 17 17 +12 497 6 31 +977 414 9 10 +# 2--Demonstration/2_Demonstration_Protesters_2_65.jpg +930 317 44 63 +734 215 74 103 +560 254 62 83 +431 313 29 52 +320 294 34 46 +275 315 37 49 +272 279 32 37 +161 270 40 55 +125 300 33 41 +96 327 22 29 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_942.jpg +219 99 473 659 +# 2--Demonstration/2_Demonstration_Protesters_2_822.jpg +977 363 14 18 +999 366 19 20 +945 381 30 44 +1006 393 14 40 +710 410 27 63 +723 379 71 119 +671 360 42 56 +585 426 49 79 +517 397 33 43 +301 333 48 85 +457 395 26 31 +205 385 41 56 +133 363 27 35 +95 426 63 63 +27 358 39 63 +80 392 21 28 +372 382 35 59 +# 2--Demonstration/2_Demonstration_Political_Rally_2_204.jpg +137 436 8 14 +227 436 10 15 +234 363 4 6 +126 366 5 6 +190 361 5 5 +56 409 11 12 +405 393 7 9 +339 401 13 11 +446 400 6 9 +466 395 10 10 +497 400 9 11 +661 393 7 7 +519 322 5 5 +569 339 4 5 +556 339 4 4 +766 390 9 12 +921 389 9 10 +1002 388 8 10 +# 2--Demonstration/2_Demonstration_Political_Rally_2_842.jpg +506 294 42 49 +367 595 17 22 +293 625 16 19 +855 382 9 12 +866 380 11 16 +# 2--Demonstration/2_Demonstration_Demonstrators_2_545.jpg +82 278 8 9 +96 273 10 17 +85 311 10 11 +116 349 11 16 +157 321 11 16 +173 347 16 19 +207 309 14 21 +266 349 15 20 +213 353 14 16 +132 304 12 13 +98 351 20 21 +157 284 12 16 +177 309 14 15 +47 345 12 16 +355 295 10 11 +390 304 10 14 +339 324 12 11 +311 294 9 13 +386 369 18 23 +399 352 12 15 +350 364 13 18 +295 322 14 20 +292 360 22 28 +256 404 23 31 +402 401 18 25 +388 288 7 10 +300 266 13 13 +277 264 11 11 +420 294 13 16 +437 355 12 18 +439 310 11 15 +472 284 9 10 +454 278 10 15 +711 298 10 12 +760 352 18 25 +786 348 14 21 +825 286 11 14 +847 298 14 17 +772 301 14 14 +796 290 11 14 +855 331 17 19 +687 303 14 11 +743 289 9 13 +741 329 14 15 +920 292 13 15 +899 300 11 13 +921 348 20 22 +795 409 19 31 +886 439 22 48 +934 423 19 21 +842 374 20 26 +67 423 28 46 +149 418 30 33 +101 391 27 30 +63 491 33 52 +257 493 35 55 +290 431 31 39 +726 396 13 20 +361 418 24 31 +415 449 25 39 +212 380 16 27 +192 410 25 38 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_430.jpg +0 141 13 67 +108 245 50 63 +234 216 33 43 +286 208 53 70 +351 184 40 60 +489 145 59 79 +608 130 45 62 +656 112 35 63 +677 115 56 95 +765 145 45 57 +859 169 38 39 +857 217 55 60 +34 357 44 89 +# 2--Demonstration/2_Demonstration_Protesters_2_738.jpg +219 33 79 97 +309 59 55 75 +474 116 91 112 +766 170 70 100 +809 81 44 47 +32 1224 81 204 +250 884 114 149 +725 1028 119 150 +925 1063 99 156 +672 698 42 32 +# 2--Demonstration/2_Demonstration_Political_Rally_2_335.jpg +933 32 16 19 +915 27 20 20 +936 61 16 20 +905 65 21 23 +880 82 21 23 +865 62 24 30 +858 51 20 28 +832 58 22 25 +817 75 24 26 +859 119 26 29 +915 101 25 26 +941 117 29 36 +919 196 38 41 +879 149 28 39 +842 169 36 41 +889 221 35 35 +845 215 44 41 +817 105 25 33 +773 54 26 33 +783 19 14 13 +749 51 19 27 +730 80 28 35 +729 27 23 24 +665 64 25 33 +745 146 28 31 +799 125 25 29 +768 135 29 27 +772 157 30 29 +753 178 34 39 +778 196 33 44 +698 218 38 38 +767 105 28 26 +658 41 17 22 +593 42 19 22 +578 67 29 22 +615 84 28 31 +644 77 25 37 +657 132 30 35 +681 122 27 32 +635 131 28 30 +567 119 24 28 +577 136 30 40 +604 161 35 41 +661 183 37 38 +572 191 37 43 +546 222 40 41 +541 229 0 2 +521 208 31 36 +476 34 21 19 +467 28 16 23 +472 58 15 20 +403 73 15 18 +414 94 20 24 +457 114 26 28 +486 127 30 36 +339 57 22 29 +334 35 20 25 +305 49 17 22 +323 68 20 23 +329 106 29 35 +304 107 20 33 +338 148 32 45 +363 162 35 32 +445 152 31 44 +248 92 32 39 +207 13 15 17 +178 28 15 22 +186 24 21 26 +185 57 23 26 +170 78 20 23 +210 97 23 25 +189 114 21 33 +136 98 28 33 +108 66 22 25 +117 17 14 14 +12 36 21 22 +29 37 19 22 +15 53 18 21 +28 60 17 19 +67 68 25 25 +69 87 27 33 +39 79 26 30 +23 79 22 32 +0 77 20 22 +32 148 28 32 +0 141 22 38 +61 143 36 36 +108 147 29 33 +145 147 32 41 +73 186 38 36 +243 182 33 32 +172 190 38 45 +144 230 33 37 +65 211 41 41 +36 219 33 42 +0 228 30 48 +85 235 45 52 +110 273 45 45 +158 265 37 39 +194 264 37 46 +234 234 39 44 +210 284 44 62 +282 171 30 31 +271 192 32 43 +275 248 42 44 +271 263 48 62 +347 210 31 32 +355 228 42 49 +339 287 47 49 +405 260 50 59 +475 174 36 47 +489 243 42 55 +532 261 31 36 +456 272 32 35 +475 297 41 51 +641 239 30 30 +658 235 46 49 +756 252 42 46 +706 322 46 55 +564 330 50 58 +878 308 38 52 +895 338 53 66 +928 405 50 59 +826 434 52 57 +822 490 73 62 +786 571 72 78 +610 412 57 53 +670 418 64 70 +735 446 64 83 +658 528 68 75 +378 336 55 56 +448 380 43 58 +509 373 56 71 +485 440 51 69 +559 482 54 71 +552 574 79 85 +332 435 62 58 +353 502 63 73 +279 430 53 64 +253 558 70 80 +309 322 55 66 +276 329 47 49 +117 351 53 61 +65 394 52 64 +27 372 1 1 +4 331 49 61 +4 403 54 62 +147 481 29 76 +0 492 54 68 +104 538 46 50 +0 564 50 81 +460 652 45 34 +682 65 18 26 +157 404 52 65 +925 279 39 54 +1015 290 9 41 +909 4 21 29 +986 6 22 24 +716 12 14 20 +756 13 16 20 +943 2 18 28 +27 0 18 17 +64 25 22 24 +0 30 8 13 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_655.jpg +84 125 18 17 +72 183 18 19 +14 179 8 9 +281 111 20 32 +381 380 27 23 +516 99 17 22 +686 71 21 27 +# 2--Demonstration/2_Demonstration_Protesters_2_268.jpg +74 204 41 56 +189 208 25 58 +282 243 30 40 +366 239 24 36 +396 232 32 40 +471 230 32 52 +534 257 28 33 +592 206 36 44 +691 233 25 35 +843 209 37 45 +987 257 24 26 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_58.jpg +964 288 50 48 +905 325 40 54 +767 276 45 52 +866 247 38 49 +942 192 33 49 +776 242 33 37 +773 197 33 34 +982 151 29 27 +837 173 35 38 +824 141 20 32 +863 119 24 27 +862 99 14 23 +1007 6 9 11 +1001 0 8 8 +984 9 8 10 +969 0 8 7 +902 0 7 6 +820 6 10 9 +782 1 10 8 +769 36 14 17 +743 54 12 19 +744 16 10 10 +704 1 9 11 +714 14 8 10 +692 3 9 10 +671 5 7 10 +688 29 7 11 +688 51 16 19 +694 80 16 18 +640 78 14 19 +634 106 16 24 +691 131 12 21 +782 148 17 26 +685 167 35 43 +701 247 36 50 +654 323 49 69 +623 270 39 50 +589 256 31 38 +591 178 32 37 +580 129 22 28 +545 97 12 19 +542 71 9 17 +507 79 14 20 +470 124 20 24 +457 58 14 19 +536 6 10 13 +459 5 9 11 +393 130 22 29 +351 63 4 18 +351 61 13 18 +351 38 9 16 +359 9 9 12 +356 0 8 7 +327 3 9 11 +296 0 10 13 +242 7 9 11 +256 40 12 17 +253 76 19 18 +264 95 12 16 +316 127 25 31 +256 134 16 17 +214 72 17 22 +183 91 22 23 +211 1 6 10 +374 155 16 26 +334 185 34 37 +316 225 40 44 +159 54 18 16 +128 53 14 18 +149 82 16 20 +154 114 28 31 +148 163 30 32 +161 238 46 45 +423 184 31 33 +435 235 18 36 +471 172 23 35 +517 180 24 30 +90 154 38 35 +111 85 20 26 +78 83 17 25 +108 55 15 20 +46 44 20 19 +0 55 13 17 +16 85 14 16 +0 89 14 20 +74 124 15 26 +0 174 28 42 +45 239 58 67 +162 295 55 76 +249 252 44 51 +303 309 50 60 +385 305 40 39 +287 383 56 84 +181 441 52 55 +29 515 102 109 +451 317 55 71 +516 234 44 59 +578 575 47 75 +783 451 67 73 +26 40 10 13 +673 85 14 22 +# 2--Demonstration/2_Demonstration_Protesters_2_179.jpg +644 155 30 44 +847 157 23 39 +983 189 13 27 +850 123 29 33 +514 199 33 37 +158 188 30 47 +102 316 12 15 +81 324 13 14 +52 319 11 10 +4 320 12 14 +# 2--Demonstration/2_Demonstration_Protesters_2_293.jpg +218 522 90 115 +0 413 31 101 +0 366 34 69 +76 331 59 80 +247 463 40 61 +288 431 58 78 +162 372 39 55 +123 356 33 49 +268 361 31 56 +347 355 42 58 +405 474 44 82 +43 330 35 47 +59 309 26 45 +121 261 21 37 +319 314 39 52 +475 365 46 48 +454 431 53 77 +431 254 20 18 +597 380 45 75 +564 410 53 81 +684 357 30 46 +748 341 25 33 +838 404 41 67 +808 340 22 35 +866 387 33 63 +860 458 74 90 +778 470 73 63 +732 498 45 115 +521 466 89 96 +807 578 100 104 +398 205 15 12 +677 451 41 68 +751 421 59 72 +408 321 47 71 +583 300 27 40 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_420.jpg +2 84 4 10 +27 119 24 28 +7 135 9 18 +99 102 12 12 +15 143 22 35 +0 300 43 52 +72 183 23 43 +193 121 18 23 +209 270 28 55 +211 148 20 35 +311 171 25 42 +167 103 9 11 +226 97 16 23 +301 94 14 14 +307 108 14 20 +370 172 25 34 +420 135 25 31 +390 198 33 55 +407 233 47 72 +473 162 39 43 +553 237 44 61 +516 144 23 31 +517 112 8 17 +570 162 24 37 +597 169 24 47 +598 138 13 20 +630 105 13 17 +639 127 25 32 +648 274 46 42 +692 136 24 32 +720 162 30 46 +703 95 9 10 +811 186 27 39 +775 127 15 17 +828 116 12 16 +802 130 9 22 +859 96 10 13 +737 99 11 15 +838 152 12 23 +840 160 19 42 +909 112 15 20 +862 119 13 9 +910 171 24 33 +882 92 8 10 +921 235 30 49 +976 134 16 27 +991 145 18 30 +919 95 12 15 +746 270 37 56 +833 310 57 42 +1007 98 13 13 +107 284 55 68 +114 153 26 44 +508 86 11 15 +# 2--Demonstration/2_Demonstration_Protesters_2_258.jpg +66 152 108 118 +168 222 122 108 +600 188 86 114 +810 236 106 138 +# 2--Demonstration/2_Demonstration_Political_Rally_2_456.jpg +150 358 28 36 +88 367 20 44 +354 206 16 25 +206 212 13 16 +197 242 17 19 +397 307 29 38 +428 257 14 19 +491 332 20 23 +476 273 13 22 +500 260 16 22 +442 207 11 14 +562 240 13 16 +528 243 7 9 +600 246 13 14 +619 250 13 17 +520 225 11 9 +643 310 16 19 +652 249 12 18 +667 220 13 17 +706 253 14 15 +950 231 15 19 +168 238 11 18 +# 2--Demonstration/2_Demonstration_Demonstrators_2_378.jpg +12 209 35 35 +0 305 27 67 +103 140 23 23 +100 207 33 32 +93 245 33 38 +155 204 33 38 +136 204 24 20 +159 168 24 23 +194 192 26 37 +209 140 24 25 +266 219 32 44 +220 247 35 45 +151 269 40 45 +86 297 49 48 +183 375 51 59 +195 403 65 67 +100 470 71 69 +218 497 70 69 +328 472 40 48 +260 362 59 57 +303 277 44 46 +280 290 30 41 +357 229 33 37 +329 218 29 43 +341 193 25 26 +292 199 25 22 +306 169 23 27 +393 241 42 42 +369 288 44 47 +411 193 36 40 +432 310 36 50 +421 291 39 46 +384 319 51 58 +506 357 42 57 +503 302 50 57 +482 321 48 44 +536 260 36 49 +481 206 39 46 +568 242 33 30 +601 273 32 36 +646 266 41 43 +605 325 49 65 +672 337 44 53 +596 423 54 54 +646 437 52 61 +650 490 60 73 +703 498 40 61 +494 425 55 61 +550 374 41 55 +436 470 65 51 +496 520 81 56 +39 186 37 38 +736 285 23 40 +714 403 39 59 +762 319 40 47 +748 360 49 53 +750 415 48 60 +807 480 63 80 +942 406 54 60 +902 353 42 45 +900 405 39 54 +837 387 43 51 +867 340 40 46 +948 227 31 45 +1004 220 20 24 +926 212 30 27 +885 211 27 27 +923 171 23 28 +961 152 17 32 +907 131 19 23 +1004 125 18 34 +807 263 34 49 +# 2--Demonstration/2_Demonstration_Political_Rally_2_286.jpg +428 719 87 109 +809 655 81 112 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_512.jpg +103 344 57 69 +150 293 40 52 +224 290 31 39 +402 239 26 30 +932 277 34 35 +968 374 56 92 +92 296 33 45 +265 242 24 37 +946 309 41 54 +916 195 14 15 +947 202 12 14 +594 237 14 16 +536 241 14 17 +675 282 23 42 +837 277 19 19 +882 346 28 43 +922 257 13 17 +511 222 11 11 +443 219 8 9 +498 240 7 9 +248 196 12 11 +89 240 20 26 +154 188 20 27 +203 246 16 21 +214 229 14 17 +305 224 14 17 +993 207 11 13 +901 231 8 10 +548 226 14 17 +636 308 27 37 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_402.jpg +188 158 54 54 +316 160 56 56 +472 130 52 46 +392 114 54 60 +606 116 58 54 +668 144 64 54 +798 122 60 62 +# 2--Demonstration/2_Demonstration_Protesters_2_779.jpg +1 351 117 135 +103 421 53 73 +420 349 21 26 +553 398 84 108 +730 371 91 94 +699 437 28 35 +612 234 29 33 +858 343 16 18 +932 414 14 18 +912 423 21 26 +977 425 43 46 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_644.jpg +102 234 30 43 +21 33 17 23 +232 63 25 36 +208 51 12 15 +147 182 25 34 +172 173 25 32 +332 92 18 26 +367 247 23 29 +386 243 16 20 +402 242 22 26 +430 232 21 29 +445 285 21 27 +515 271 26 31 +510 251 22 21 +472 273 16 22 +472 160 18 21 +501 156 14 22 +526 149 15 24 +568 123 17 18 +463 232 17 31 +560 277 25 34 +643 237 21 24 +656 271 23 35 +653 297 24 32 +503 202 14 27 +550 242 19 26 +714 245 21 37 +764 287 26 35 +834 143 18 21 +819 278 24 31 +851 272 21 23 +877 312 28 33 +931 277 21 27 +579 251 23 27 +257 286 29 36 +960 290 18 27 +# 2--Demonstration/2_Demonstration_Protesters_2_221.jpg +707 370 45 86 +889 288 51 78 +225 520 85 56 +83 331 35 71 +282 329 38 77 +411 299 33 76 +516 200 58 87 +572 146 38 64 +400 128 41 61 +223 127 45 65 +192 121 52 63 +2 195 45 86 +525 426 57 87 +743 178 42 75 +712 205 31 49 +740 143 44 48 +858 166 46 59 +828 129 32 45 +909 106 36 50 +919 176 34 69 +991 152 30 65 +950 138 41 44 +400 198 53 85 +297 127 30 61 +236 80 32 44 +121 94 34 42 +80 165 37 71 +0 109 16 56 +387 52 36 55 +462 76 33 62 +311 13 30 39 +120 14 24 39 +14 17 23 40 +135 52 24 30 +156 60 21 27 +692 95 28 44 +654 90 21 32 +545 73 25 40 +715 5 24 26 +892 55 15 18 +865 74 20 21 +675 62 20 23 +724 54 29 37 +627 0 18 15 +596 2 12 17 +141 235 40 70 +231 6 17 30 +179 11 16 29 +118 428 45 82 +962 76 16 20 +907 38 14 16 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_687.jpg +58 59 6 6 +101 61 6 7 +121 64 5 6 +153 71 6 7 +151 84 4 5 +57 84 4 7 +122 80 6 8 +135 95 6 8 +118 120 5 8 +108 117 5 9 +97 119 5 8 +59 118 5 9 +45 130 6 7 +148 117 6 8 +111 152 6 8 +114 172 8 7 +149 160 7 8 +91 178 7 10 +62 180 5 7 +36 115 4 6 +38 180 6 10 +52 187 7 9 +1 207 6 9 +13 207 3 6 +0 180 3 9 +100 74 7 9 +172 107 5 7 +184 83 6 8 +201 84 6 6 +194 137 8 9 +183 167 6 7 +195 167 5 6 +122 168 5 8 +140 161 5 8 +247 61 5 6 +211 80 7 8 +231 80 5 6 +242 80 5 6 +232 92 5 7 +237 113 5 8 +209 135 7 9 +246 86 5 8 +250 83 5 8 +268 82 5 8 +272 77 4 8 +288 74 4 7 +295 77 4 8 +303 83 4 8 +313 78 5 10 +309 89 5 6 +289 97 4 8 +266 94 6 8 +305 97 5 8 +321 83 5 7 +331 82 4 5 +340 79 5 6 +347 78 4 5 +323 100 4 6 +349 98 4 6 +371 80 4 6 +379 82 5 7 +391 78 4 7 +414 85 5 6 +414 78 4 6 +386 90 5 8 +389 106 5 6 +377 132 6 9 +398 141 6 8 +409 129 6 7 +380 163 6 6 +360 163 6 6 +349 175 7 6 +510 70 5 7 +504 63 5 7 +498 73 6 7 +509 85 4 6 +485 92 7 6 +493 79 4 8 +479 77 4 7 +483 72 4 5 +463 79 5 6 +463 70 4 4 +453 81 6 6 +444 72 3 5 +441 81 4 4 +431 85 4 4 +429 74 4 7 +445 101 6 9 +475 104 5 7 +512 152 6 7 +501 158 7 7 +488 151 7 8 +471 154 7 6 +439 142 6 7 +452 172 8 10 +423 167 8 9 +502 143 5 8 +531 132 6 8 +521 152 8 11 +523 130 5 8 +525 89 5 7 +542 90 3 5 +524 74 5 9 +554 76 4 6 +543 79 4 5 +538 82 4 5 +472 88 5 6 +456 92 4 7 +440 89 5 7 +432 95 5 6 +439 102 5 8 +462 127 5 7 +479 127 3 6 +478 136 3 5 +479 151 4 6 +404 162 7 10 +405 185 6 10 +440 197 7 10 +447 207 7 9 +479 210 8 9 +453 220 8 9 +510 189 5 8 +542 189 6 9 +535 190 6 9 +446 155 6 8 +528 62 6 7 +541 61 5 5 +556 41 4 6 +567 69 5 7 +571 87 4 5 +526 33 5 7 +487 44 6 6 +484 31 5 6 +462 40 5 5 +466 33 5 7 +510 15 4 7 +529 41 4 6 +408 64 6 8 +377 64 6 9 +395 65 4 7 +390 41 4 7 +577 47 5 5 +589 56 4 6 +603 50 5 6 +596 60 4 5 +611 63 4 5 +584 85 4 6 +607 70 4 5 +632 70 4 6 +655 81 4 7 +652 115 6 5 +631 117 6 7 +620 118 5 7 +614 113 3 6 +661 132 5 8 +653 127 6 9 +641 132 5 7 +658 152 5 9 +637 161 7 8 +638 147 5 7 +625 138 4 7 +617 149 6 8 +612 127 5 7 +607 134 6 7 +588 129 4 7 +573 136 6 6 +595 165 7 7 +585 158 7 6 +575 147 5 8 +568 161 4 5 +550 141 5 7 +592 143 4 5 +686 79 6 6 +707 80 5 6 +683 90 4 6 +693 104 5 8 +714 90 5 8 +720 103 4 6 +727 104 3 5 +743 113 5 7 +746 108 5 9 +735 132 8 6 +775 71 4 8 +787 96 5 7 +773 113 7 7 +773 138 6 9 +769 160 6 10 +781 179 6 9 +759 157 6 11 +688 125 7 10 +680 134 6 9 +691 138 4 7 +690 155 7 8 +677 148 6 9 +656 164 5 7 +668 171 7 10 +675 184 5 8 +682 185 5 8 +703 170 4 7 +746 195 7 10 +726 204 8 9 +794 82 4 8 +815 82 5 7 +833 89 6 8 +869 75 5 7 +884 74 6 7 +891 56 4 9 +876 85 5 8 +895 81 5 9 +889 88 5 6 +852 101 7 7 +888 109 6 8 +876 121 8 9 +864 115 6 8 +838 115 6 6 +887 134 6 6 +899 154 5 10 +890 160 5 9 +898 167 7 9 +867 181 6 9 +872 210 8 8 +843 209 7 11 +835 182 6 8 +837 172 6 9 +833 156 5 9 +838 151 6 7 +832 151 6 6 +810 113 8 13 +805 137 6 7 +809 158 6 8 +812 178 8 10 +796 177 6 9 +793 193 8 9 +762 212 8 12 +980 44 5 7 +962 51 6 10 +936 46 4 5 +926 59 4 6 +908 45 4 7 +1011 88 6 6 +978 79 6 9 +967 81 7 9 +1013 120 5 9 +961 80 4 6 +949 75 5 8 +941 81 5 7 +929 80 5 8 +919 76 5 8 +916 89 5 5 +974 112 4 7 +928 119 4 8 +902 142 5 7 +939 146 8 8 +967 147 5 7 +958 149 5 7 +921 139 6 8 +923 116 4 7 +885 155 3 5 +922 182 8 9 +974 179 6 9 +1007 199 8 10 +949 203 8 8 +998 242 6 10 +1019 241 4 9 +1001 257 6 8 +1008 266 9 9 +1001 279 9 9 +1008 302 10 12 +1015 276 9 12 +976 231 9 9 +969 247 9 11 +955 233 8 10 +929 235 9 11 +947 248 9 10 +942 253 6 8 +942 272 8 10 +919 266 10 9 +944 295 9 13 +968 326 8 11 +928 292 10 14 +918 279 9 13 +896 265 8 11 +883 276 9 12 +849 219 6 9 +871 220 7 10 +870 247 10 12 +860 265 9 13 +901 278 7 10 +910 281 8 11 +873 310 8 11 +911 322 8 13 +857 226 5 7 +862 233 6 7 +855 236 8 12 +827 294 10 11 +877 270 4 7 +828 218 4 8 +820 221 6 10 +829 237 4 8 +834 251 8 10 +808 227 5 6 +767 246 8 11 +750 245 8 12 +755 255 6 9 +782 267 7 9 +771 271 11 13 +756 270 9 12 +741 271 8 11 +846 319 9 10 +844 328 10 14 +799 306 9 12 +791 318 7 12 +797 329 10 12 +789 300 6 12 +764 313 8 15 +751 329 7 8 +741 319 8 11 +748 319 8 11 +738 295 8 13 +765 301 8 11 +902 334 11 16 +860 353 10 13 +948 326 9 13 +933 337 11 15 +958 336 8 8 +936 350 10 15 +946 367 9 16 +997 405 10 16 +1004 412 10 14 +1013 462 9 22 +959 466 12 15 +924 405 14 19 +928 426 9 16 +904 413 9 16 +881 376 11 15 +881 436 11 17 +867 458 10 15 +557 93 5 7 +546 97 5 6 +507 115 6 7 +582 176 6 8 +599 187 8 13 +950 90 6 10 +955 273 6 10 +956 608 17 25 +964 631 20 22 +765 347 10 13 +790 354 7 9 +819 357 11 13 +825 375 11 14 +822 401 10 15 +812 412 11 16 +791 386 9 12 +779 370 5 9 +768 380 11 15 +771 388 12 15 +783 392 9 14 +789 404 7 10 +768 413 11 15 +788 421 13 13 +778 434 13 17 +741 415 13 18 +794 458 14 19 +826 451 13 17 +813 476 13 17 +740 457 13 15 +558 155 6 7 +591 154 4 6 +638 192 8 9 +646 207 5 5 +660 220 7 9 +682 221 8 9 +696 219 7 10 +721 219 5 10 +708 232 8 11 +693 246 8 11 +726 255 8 10 +719 262 9 10 +715 273 7 10 +723 285 9 13 +706 285 5 8 +688 284 9 12 +686 260 9 9 +671 257 6 8 +669 273 8 10 +682 275 5 10 +676 297 10 12 +669 301 12 12 +669 308 10 13 +673 328 10 13 +702 313 10 13 +720 317 7 10 +705 305 8 8 +708 328 9 13 +592 214 9 11 +618 234 7 12 +633 254 6 10 +650 233 7 9 +663 255 6 8 +639 266 8 10 +649 277 8 10 +654 317 9 15 +644 321 11 11 +647 301 8 9 +654 294 6 12 +628 286 7 13 +637 288 8 8 +620 256 7 10 +619 272 7 9 +619 282 7 11 +617 300 9 12 +608 313 10 14 +601 308 10 12 +607 269 8 11 +596 269 10 12 +581 251 8 12 +576 228 9 10 +560 225 4 8 +550 229 7 10 +555 238 7 10 +564 263 10 13 +585 271 7 11 +591 293 10 13 +584 287 10 12 +572 296 10 14 +576 312 9 13 +558 312 10 13 +568 311 8 11 +557 274 9 13 +555 292 9 11 +561 289 6 9 +549 302 8 11 +549 311 9 13 +739 343 9 13 +719 337 4 10 +711 371 13 16 +693 341 8 11 +652 350 9 15 +669 361 11 13 +675 371 10 13 +671 388 11 15 +694 392 11 15 +701 404 8 12 +693 414 7 9 +696 421 9 16 +685 410 10 15 +562 341 8 9 +577 353 11 14 +584 368 8 16 +616 350 9 12 +627 355 8 11 +644 392 11 15 +637 400 9 13 +614 404 10 13 +557 384 11 15 +577 420 10 13 +609 384 9 13 +604 438 9 12 +605 422 9 12 +566 406 13 16 +564 483 11 18 +598 458 12 19 +607 491 11 16 +586 507 15 18 +672 475 12 14 +618 531 13 14 +645 522 13 18 +703 483 12 19 +726 458 9 18 +689 533 15 21 +712 524 15 22 +754 511 15 20 +765 498 13 19 +765 540 15 21 +700 474 14 15 +634 380 9 9 +599 403 15 17 +596 393 13 16 +661 491 10 15 +766 574 16 21 +836 531 6 12 +773 245 7 9 +728 224 6 11 +759 261 9 11 +306 140 5 8 +329 151 5 8 +339 174 4 7 +331 174 5 8 +322 179 5 9 +288 167 5 9 +271 150 4 6 +262 147 4 7 +242 159 6 8 +275 178 6 8 +351 184 5 8 +315 196 7 10 +279 208 7 9 +224 163 4 8 +227 204 8 10 +221 188 7 13 +375 223 7 12 +378 236 8 10 +385 239 8 10 +379 260 8 10 +366 266 8 12 +375 275 8 10 +380 272 7 11 +374 288 9 11 +383 291 9 13 +310 281 9 10 +315 291 7 11 +276 287 10 13 +285 269 8 11 +282 285 10 10 +251 265 8 12 +256 279 8 10 +254 287 6 9 +230 282 8 12 +208 164 4 9 +158 188 7 7 +143 190 6 9 +149 205 8 12 +162 236 8 10 +129 241 6 11 +92 218 6 8 +224 307 8 13 +183 291 8 12 +180 281 7 11 +185 269 8 14 +177 271 6 12 +162 266 5 12 +155 258 8 14 +147 275 6 13 +149 274 9 15 +121 258 7 13 +88 272 9 15 +109 312 10 12 +156 296 8 13 +100 295 10 15 +116 271 8 11 +391 119 6 13 +350 119 6 9 +305 175 4 9 +391 201 7 11 +409 250 10 11 +412 264 7 10 +428 270 8 11 +406 282 9 13 +417 287 8 11 +416 274 10 14 +420 302 10 12 +408 310 9 10 +418 317 9 13 +393 280 9 12 +392 314 9 13 +361 299 7 13 +363 319 8 12 +347 305 9 13 +337 295 11 12 +334 312 9 12 +301 300 9 11 +295 313 9 12 +281 325 8 13 +304 317 9 13 +559 215 5 7 +529 229 8 13 +480 226 6 9 +467 234 7 10 +490 238 9 14 +476 240 9 8 +458 249 7 11 +459 260 7 10 +473 266 9 11 +486 274 9 12 +501 278 10 14 +524 262 10 12 +535 262 8 15 +549 273 7 11 +533 281 9 10 +522 292 9 11 +521 286 10 10 +507 303 9 13 +500 314 10 15 +491 319 10 11 +451 279 8 12 +438 290 10 13 +467 294 10 9 +480 294 10 12 +447 314 12 15 +432 320 10 16 +463 305 11 11 +477 301 8 14 +457 363 9 13 +474 348 9 12 +488 353 10 13 +504 342 10 13 +529 359 11 15 +538 339 9 15 +545 346 12 15 +521 374 10 15 +538 382 11 19 +520 399 13 16 +478 394 12 15 +463 399 11 13 +443 392 12 15 +31 228 6 10 +40 225 6 8 +85 233 8 10 +53 248 8 12 +10 262 7 10 +76 269 7 12 +63 292 11 12 +77 327 11 14 +91 336 9 13 +117 342 9 10 +51 329 6 12 +28 327 9 12 +10 335 8 12 +14 241 5 8 +215 314 7 10 +248 316 10 14 +232 332 8 11 +217 324 8 12 +205 337 9 12 +209 348 10 15 +235 357 11 15 +225 371 10 17 +232 399 11 13 +189 373 10 9 +156 336 10 12 +100 362 10 12 +155 360 10 12 +176 403 12 17 +212 412 10 16 +239 418 9 14 +196 441 14 18 +214 468 12 16 +200 472 15 17 +162 443 11 16 +155 453 9 16 +145 475 13 18 +84 452 12 16 +25 421 10 15 +76 398 12 17 +34 492 15 19 +91 506 14 19 +135 547 14 19 +162 483 14 22 +188 507 14 17 +228 499 13 17 +93 600 18 26 +249 301 9 13 +291 304 9 13 +273 326 7 14 +309 338 11 12 +328 333 10 12 +380 332 9 15 +333 349 11 15 +358 359 13 13 +386 360 10 14 +408 367 10 15 +229 351 8 10 +258 383 10 12 +259 401 11 15 +262 421 10 15 +260 446 14 17 +277 372 11 11 +315 357 8 14 +295 381 10 14 +282 391 8 10 +307 414 11 13 +312 435 11 17 +304 424 11 16 +320 415 10 14 +342 382 11 14 +332 405 14 10 +339 416 9 15 +359 464 14 20 +370 434 11 17 +382 409 10 17 +386 394 11 14 +391 422 9 17 +411 398 12 16 +427 403 14 19 +394 405 15 15 +437 429 12 15 +440 454 13 19 +421 484 14 16 +399 450 13 18 +417 508 15 21 +309 502 14 22 +502 406 11 16 +536 433 11 18 +543 477 13 19 +517 467 16 19 +490 460 12 16 +482 487 13 22 +486 512 15 18 +533 513 13 19 +419 542 16 19 +444 528 15 22 +380 566 14 18 +364 532 13 19 +354 521 13 19 +280 534 16 19 +251 545 14 23 +422 44 6 8 +503 263 10 16 +498 242 8 10 +# 2--Demonstration/2_Demonstration_Demonstrators_2_100.jpg +412 154 262 344 +438 730 180 167 +687 330 121 196 +# 2--Demonstration/2_Demonstration_Protesters_2_476.jpg +274 388 42 49 +408 383 45 52 +442 387 27 36 +321 395 34 40 +197 361 29 34 +188 417 24 32 +110 376 47 60 +25 339 20 42 +96 316 40 44 +685 386 22 28 +767 362 25 24 +787 469 23 29 +921 379 22 25 +599 340 21 31 +879 381 20 27 +610 395 26 31 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_1.jpg +607 250 41 59 +783 262 38 57 +746 287 28 41 +564 239 37 51 +531 271 29 40 +804 231 23 62 +959 274 45 56 +927 271 31 50 +893 255 38 44 +848 96 35 40 +895 122 41 38 +961 175 43 32 +1003 117 20 42 +480 286 42 53 +481 255 40 36 +409 248 39 48 +357 258 41 50 +299 265 38 42 +344 248 35 43 +205 128 45 50 +173 167 35 49 +205 250 45 44 +145 237 33 58 +718 46 15 15 +615 156 7 8 +623 167 5 8 +544 156 16 15 +481 35 33 40 +347 14 37 39 +357 78 17 18 +224 2 32 41 +46 252 27 54 +111 304 35 39 +243 275 24 33 +265 329 16 21 +315 409 22 26 +402 379 11 14 +377 388 18 22 +361 364 13 16 +386 360 8 14 +398 362 9 9 +411 362 10 9 +353 375 6 8 +357 393 6 9 +814 532 43 53 +737 579 37 57 +694 531 36 68 +620 534 34 59 +517 476 39 43 +517 572 36 69 +322 478 43 48 +311 520 40 61 +227 552 16 20 +120 487 44 62 +195 466 36 56 +196 605 29 62 +# 2--Demonstration/2_Demonstration_Demonstrators_2_309.jpg +132 265 28 32 +152 239 19 19 +82 233 17 21 +67 218 10 10 +299 270 5 7 +452 168 132 121 +817 292 40 55 +991 384 33 50 +955 312 4 6 +916 359 4 5 +930 371 8 9 +988 320 5 7 +1007 319 5 5 +963 402 5 9 +966 383 7 8 +33 227 12 12 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_453.jpg +400 262 37 61 +477 223 81 116 +575 323 28 40 +608 290 21 37 +648 279 20 36 +658 305 23 28 +692 313 19 22 +722 340 14 20 +754 327 22 27 +738 312 14 20 +722 317 15 18 +754 307 9 15 +808 312 16 22 +13 293 36 57 +61 310 32 37 +186 279 55 66 +273 338 14 16 +98 342 12 15 +918 318 15 25 +559 324 8 15 +336 321 23 23 +295 323 15 14 +# 2--Demonstration/2_Demonstration_Protesters_2_16.jpg +284 506 90 105 +626 584 81 111 +# 2--Demonstration/2_Demonstration_Demonstrators_2_470.jpg +396 44 82 130 +632 36 80 118 +882 114 84 150 +460 134 114 146 +480 280 184 258 +8 158 286 416 +# 2--Demonstration/2_Demonstration_Protesters_2_54.jpg +871 137 17 19 +812 107 14 21 +688 165 17 23 +678 127 18 22 +601 141 17 22 +522 138 21 21 +549 59 13 17 +522 70 15 20 +439 134 17 23 +419 100 17 25 +393 77 14 18 +363 79 12 17 +315 139 18 26 +296 87 13 19 +333 67 13 18 +343 67 8 16 +281 54 7 9 +408 262 27 36 +352 232 21 17 +174 50 7 9 +90 59 10 10 +359 43 10 10 +229 163 18 21 +170 162 19 24 +143 164 19 25 +151 208 21 27 +23 212 21 28 +17 161 20 24 +72 96 14 20 +950 57 9 12 +1000 72 6 10 +# 2--Demonstration/2_Demonstration_Political_Rally_2_807.jpg +238 182 126 216 +462 174 156 280 +542 152 160 336 +644 168 130 324 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_54.jpg +1009 194 14 22 +983 233 31 37 +862 136 41 46 +852 137 20 32 +775 141 35 35 +730 166 33 37 +667 140 29 29 +635 157 25 26 +612 167 20 21 +624 117 13 38 +575 203 32 45 +566 156 22 27 +530 153 21 25 +504 145 23 24 +446 138 34 40 +580 105 16 37 +533 126 12 21 +512 119 10 17 +495 116 12 20 +475 111 11 23 +614 134 9 19 +602 139 13 20 +522 84 18 42 +490 63 15 32 +402 120 10 10 +367 116 9 12 +374 94 12 17 +394 67 10 15 +310 42 7 8 +335 40 9 11 +315 16 10 16 +334 9 8 12 +366 63 11 15 +379 64 12 19 +400 172 18 21 +381 167 14 15 +360 181 18 20 +320 210 44 45 +320 157 19 21 +272 197 19 24 +242 153 29 40 +268 149 21 21 +244 119 12 13 +235 118 9 12 +434 149 8 12 +426 147 9 12 +195 60 19 21 +170 113 21 22 +184 178 21 27 +171 150 21 24 +130 157 26 27 +107 163 23 28 +106 92 22 25 +123 97 17 21 +717 164 27 31 +# 2--Demonstration/2_Demonstration_Protesters_2_460.jpg +71 90 31 32 +132 75 19 20 +160 83 21 22 +281 78 12 14 +34 68 28 29 +273 237 41 46 +484 240 34 38 +481 97 39 44 +639 58 42 48 +436 79 20 17 +706 66 22 28 +788 68 31 38 +840 70 29 33 +884 208 28 34 +804 181 39 44 +995 92 14 15 +957 93 19 34 +751 93 10 12 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_915.jpg +417 330 241 311 +60 344 17 18 +200 271 19 28 +299 284 21 28 +263 268 13 14 +768 226 10 13 +799 237 8 11 +914 208 11 14 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_158.jpg +516 182 44 59 +184 256 21 38 +184 220 20 23 +7 200 30 42 +63 266 12 16 +34 250 10 12 +472 236 28 30 +108 256 8 13 +252 259 11 13 +676 255 12 14 +695 260 8 11 +639 253 6 11 +502 237 10 15 +984 225 38 30 +856 241 20 19 +806 250 11 13 +309 259 7 10 +58 256 9 12 +396 257 5 5 +# 2--Demonstration/2_Demonstration_Protesters_2_178.jpg +551 540 66 79 +630 514 61 75 +740 508 68 66 +918 559 65 84 +265 492 66 89 +62 534 70 97 +228 636 67 35 +772 120 40 43 +602 115 35 53 +683 34 35 36 +461 77 45 38 +371 73 36 39 +239 91 35 39 +142 64 34 40 +76 79 42 42 +0 94 27 37 +460 157 39 39 +# 2--Demonstration/2_Demonstration_Demonstrators_2_163.jpg +894 257 33 55 +842 280 35 50 +571 187 97 116 +556 291 23 33 +341 204 72 90 +191 262 64 81 +98 252 43 61 +21 227 40 57 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_304.jpg +35 405 30 46 +80 385 30 45 +192 284 52 62 +260 384 36 50 +297 391 37 40 +373 364 35 46 +432 381 42 53 +524 349 48 62 +547 351 45 58 +683 407 40 50 +723 399 38 56 +764 301 106 111 +919 380 32 53 +# 2--Demonstration/2_Demonstration_Political_Rally_2_137.jpg +12 381 89 113 +233 305 72 90 +299 295 61 92 +372 338 47 62 +418 319 53 66 +396 280 25 32 +555 324 31 47 +651 275 36 48 +196 287 30 35 +0 308 25 43 +757 272 35 43 +458 276 20 28 +479 282 22 28 +907 282 29 32 +929 267 26 33 +841 269 31 31 +787 280 30 40 +706 272 24 25 +369 277 25 30 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_799.jpg +28 212 11 13 +196 225 14 14 +163 225 12 18 +154 257 13 17 +166 279 14 13 +119 271 15 14 +57 263 10 15 +60 252 12 16 +211 314 13 17 +265 283 11 17 +276 302 15 19 +368 202 6 9 +448 188 8 11 +485 197 8 9 +464 242 12 15 +468 270 13 14 +433 228 9 9 +364 304 14 19 +557 198 8 11 +595 204 7 10 +620 237 11 14 +641 222 6 14 +649 229 10 13 +659 197 7 10 +608 253 10 17 +610 284 13 14 +666 323 12 14 +727 299 9 11 +709 302 8 11 +731 286 10 15 +647 309 11 14 +688 200 6 8 +800 234 11 13 +858 245 8 8 +856 272 8 12 +875 309 16 16 +872 233 9 14 +910 240 7 9 +# 2--Demonstration/2_Demonstration_Protesters_2_561.jpg +929 237 16 19 +889 193 17 20 +814 264 15 17 +861 315 16 19 +747 234 18 21 +773 227 17 19 +710 241 17 19 +643 223 17 20 +733 369 14 19 +767 390 17 17 +641 380 19 20 +991 460 15 20 +952 536 18 19 +941 504 20 21 +911 476 12 18 +938 487 15 15 +864 496 19 24 +898 519 17 18 +849 534 18 19 +767 529 17 16 +738 516 19 21 +803 494 16 19 +790 458 20 21 +838 453 14 17 +826 430 12 17 +849 413 13 18 +835 397 15 19 +782 413 16 19 +754 444 15 19 +726 412 12 17 +718 467 20 21 +664 499 17 19 +646 472 16 21 +680 430 16 19 +697 422 14 15 +649 425 13 17 +624 442 16 19 +584 460 18 20 +577 416 17 20 +580 388 16 19 +598 502 19 15 +543 493 18 21 +528 446 15 18 +497 482 18 22 +443 461 20 23 +434 441 16 14 +464 380 17 20 +427 371 16 19 +380 405 15 20 +546 306 18 21 +500 325 17 19 +480 304 16 21 +440 288 19 24 +408 302 16 18 +393 256 14 18 +339 448 17 19 +289 441 17 20 +306 410 17 17 +254 389 17 20 +229 465 16 14 +211 447 14 16 +299 338 18 21 +226 345 17 17 +286 320 16 22 +235 291 18 22 +262 235 16 17 +217 266 15 16 +316 315 17 13 +295 259 12 15 +161 426 16 18 +177 394 19 22 +187 360 17 20 +104 387 18 19 +102 431 18 19 +139 312 19 21 +119 358 15 19 +180 309 16 19 +175 301 14 16 +91 318 20 21 +19 326 17 20 +26 380 16 18 +40 365 17 19 +16 261 15 16 +72 264 14 20 +94 225 15 18 +132 213 16 19 +115 200 11 15 +39 412 18 20 +101 297 11 16 +0 336 8 18 +25 304 14 19 +553 140 17 20 +534 133 17 21 +489 138 19 21 +445 143 17 17 +401 132 18 19 +992 542 17 11 +953 656 18 22 +898 663 14 19 +870 641 16 20 +866 608 15 22 +201 84 16 20 +285 94 17 19 +318 95 14 17 +338 120 14 14 +852 359 18 23 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_367.jpg +43 276 21 38 +51 252 17 20 +91 262 14 19 +123 262 23 31 +181 307 30 28 +202 272 14 20 +261 282 23 33 +306 306 21 26 +292 287 14 12 +383 254 74 82 +507 248 46 52 +500 281 9 23 +780 342 52 63 +# 2--Demonstration/2_Demonstration_Demonstrators_2_547.jpg +0 214 25 42 +166 157 50 63 +217 142 53 69 +415 188 28 42 +456 199 30 41 +485 164 41 54 +640 160 42 62 +681 138 55 79 +821 134 45 62 +879 178 39 54 +932 203 30 41 +995 199 21 62 +766 209 22 33 +# 2--Demonstration/2_Demonstration_Demonstrators_2_196.jpg +0 707 6 9 +28 664 6 9 +61 668 5 8 +36 735 12 14 +86 718 11 10 +130 735 10 10 +157 747 12 11 +182 726 10 12 +152 722 10 11 +109 672 9 10 +134 677 11 11 +146 639 6 8 +123 660 6 6 +129 655 5 7 +185 646 7 9 +210 696 10 11 +251 718 9 11 +270 688 9 10 +317 713 9 9 +349 727 10 12 +374 715 7 10 +363 650 9 10 +17 568 7 6 +164 491 6 7 +64 486 6 6 +73 445 6 6 +91 447 7 7 +119 471 5 6 +147 472 6 7 +178 484 6 6 +176 469 5 6 +193 466 5 6 +167 462 5 5 +144 449 5 5 +161 445 6 5 +2 507 5 6 +0 472 5 6 +35 453 4 6 +37 396 6 6 +29 348 3 4 +62 379 4 4 +77 397 5 6 +93 381 4 5 +68 369 4 4 +93 364 4 5 +93 334 5 5 +112 332 3 5 +112 363 3 4 +117 368 4 4 +134 377 4 6 +126 363 5 5 +127 329 5 5 +144 367 4 4 +145 357 5 6 +152 340 4 5 +158 328 4 5 +162 354 4 5 +171 377 4 5 +191 374 5 5 +181 353 5 4 +181 346 6 5 +187 323 5 4 +23 258 3 3 +40 261 3 4 +91 306 3 4 +97 285 3 3 +112 284 3 5 +279 582 9 10 +240 498 6 6 +194 496 6 6 +343 512 9 9 +358 540 8 8 +385 540 6 7 +411 559 7 7 +387 490 9 8 +372 446 5 5 +332 484 6 7 +340 510 5 7 +304 479 6 7 +297 462 5 6 +218 475 6 7 +219 445 5 7 +238 446 6 7 +231 429 5 5 +277 458 5 5 +231 473 4 5 +267 475 5 6 +233 460 5 6 +263 477 4 5 +248 427 5 7 +297 416 5 5 +296 407 6 6 +305 412 5 7 +332 387 4 5 +287 388 5 6 +289 444 5 5 +223 350 4 5 +202 348 3 3 +198 344 4 4 +200 333 4 3 +217 323 4 5 +238 322 4 4 +253 340 5 5 +282 323 5 5 +296 313 3 5 +302 304 4 4 +261 315 3 4 +258 308 3 5 +234 308 4 3 +224 309 4 4 +203 303 3 3 +207 296 3 4 +189 297 3 4 +186 287 3 3 +247 294 4 3 +257 286 5 6 +275 307 3 4 +304 294 3 3 +295 285 5 5 +281 284 6 5 +221 250 4 3 +188 251 3 3 +190 237 2 3 +209 251 3 3 +164 228 3 3 +177 253 2 4 +193 226 3 3 +11 286 4 4 +6 228 3 2 +14 227 2 2 +61 222 2 2 +57 216 3 2 +55 209 2 2 +62 210 2 2 +114 230 3 3 +104 232 2 3 +148 214 2 3 +125 219 2 2 +116 212 2 2 +129 210 2 2 +212 222 3 4 +200 221 3 3 +274 252 4 3 +252 282 5 4 +314 274 5 8 +223 281 4 4 +237 271 6 7 +252 275 3 4 +310 321 4 3 +326 340 4 5 +340 337 3 5 +305 352 4 5 +324 309 3 3 +320 306 4 6 +335 289 3 4 +350 291 2 3 +354 298 3 3 +365 309 3 4 +344 319 4 4 +344 330 4 5 +339 212 4 4 +349 207 3 5 +365 237 4 5 +375 226 3 4 +366 230 4 5 +348 225 3 4 +354 223 5 5 +363 218 4 5 +362 206 2 2 +347 236 3 4 +432 314 4 5 +449 308 4 5 +376 280 4 4 +395 274 4 5 +408 258 4 5 +415 276 4 4 +426 260 4 4 +441 260 3 4 +435 271 4 4 +433 295 4 5 +407 335 4 3 +410 316 4 5 +486 318 4 5 +456 345 4 4 +472 336 4 6 +463 326 4 4 +352 386 6 6 +373 367 4 5 +394 363 4 4 +404 365 4 5 +433 417 6 6 +464 375 4 5 +439 483 5 9 +468 473 6 9 +495 459 6 7 +531 451 5 8 +523 522 6 7 +530 544 6 7 +585 555 9 11 +437 533 8 10 +414 616 7 7 +396 687 7 11 +477 646 9 10 +531 740 8 11 +431 752 8 10 +443 613 9 9 +482 603 7 10 +523 606 8 11 +563 640 8 11 +487 574 6 6 +509 579 6 8 +437 583 9 9 +420 759 7 8 +409 756 7 10 +446 597 7 7 +495 582 5 7 +489 354 4 5 +485 345 3 4 +526 349 4 4 +517 374 4 5 +549 369 4 5 +559 361 4 5 +549 402 4 6 +585 394 6 7 +621 404 6 6 +591 420 3 6 +626 415 3 5 +601 439 6 7 +620 441 6 7 +630 425 6 6 +569 454 7 7 +644 491 7 7 +634 519 6 7 +675 449 6 9 +675 535 8 8 +631 569 8 8 +636 550 5 9 +705 536 5 7 +722 504 7 10 +753 525 6 8 +746 542 6 9 +834 513 6 7 +880 538 7 8 +928 570 6 9 +893 541 7 9 +879 558 5 8 +618 655 8 10 +598 634 7 10 +631 613 8 10 +642 632 8 8 +635 672 9 12 +683 715 7 10 +678 739 6 8 +762 737 8 10 +747 727 8 10 +769 644 6 10 +769 619 5 9 +749 635 9 10 +739 622 8 11 +705 616 5 9 +682 594 7 9 +830 746 6 11 +829 683 6 8 +857 712 7 11 +946 651 7 11 +930 632 7 11 +906 634 6 10 +899 643 5 11 +831 618 7 9 +820 639 4 8 +907 672 9 10 +991 731 6 10 +956 713 6 8 +965 668 5 8 +917 728 8 8 +926 737 5 9 +899 726 8 8 +# 2--Demonstration/2_Demonstration_Demonstrators_2_41.jpg +952 497 46 62 +889 515 12 14 +820 494 48 59 +630 437 42 60 +391 377 92 160 +402 181 46 51 +206 195 45 50 +151 538 7 9 +255 93 34 36 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_282.jpg +300 220 208 208 +10 262 162 212 +# 2--Demonstration/2_Demonstration_Political_Rally_2_659.jpg +957 241 66 83 +729 247 52 51 +771 221 43 57 +888 230 17 22 +453 278 66 77 +366 277 22 21 +449 274 20 26 +408 280 15 18 +233 286 36 43 +255 251 34 38 +191 277 35 43 +162 292 28 27 +37 249 101 109 +0 328 29 35 +26 314 28 34 +3 296 19 23 +738 207 7 10 +497 206 139 147 +# 2--Demonstration/2_Demonstration_Political_Rally_2_90.jpg +542 275 50 69 +788 379 18 30 +873 382 11 14 +922 380 12 13 +326 365 13 15 +298 361 12 14 +200 354 11 16 +157 359 11 15 +109 359 13 16 +63 319 13 17 +37 370 14 19 +22 314 14 16 +# 2--Demonstration/2_Demonstration_Political_Rally_2_690.jpg +1004 415 10 11 +945 369 16 14 +813 404 68 78 +753 370 9 12 +699 367 6 7 +688 366 7 8 +644 395 27 35 +650 377 7 12 +487 377 139 145 +180 351 8 12 +116 354 7 8 +81 354 9 11 +109 394 7 9 +62 360 9 10 +95 413 9 11 +0 362 9 17 +729 428 15 14 +222 360 6 7 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_604.jpg +665 120 51 107 +404 140 52 70 +295 40 67 112 +95 168 44 61 +168 171 29 35 +11 177 22 35 +26 169 15 29 +537 137 22 26 +469 149 20 27 +518 94 12 13 +776 113 13 16 +713 105 17 20 +565 145 11 19 +480 95 11 14 +563 94 10 11 +316 160 20 24 +73 135 14 18 +90 139 16 17 +# 2--Demonstration/2_Demonstration_Protesters_2_559.jpg +728 412 58 98 +822 218 90 102 +926 196 66 98 +292 288 96 76 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_494.jpg +38 374 108 136 +480 272 62 84 +# 2--Demonstration/2_Demonstration_Protesters_2_362.jpg +808 412 77 77 +867 538 20 30 +885 523 22 29 +385 523 12 16 +368 446 5 11 +417 498 13 13 +309 330 23 27 +375 471 6 10 +448 507 13 15 +710 556 17 26 +# 2--Demonstration/2_Demonstration_Protesters_2_840.jpg +914 153 26 33 +792 169 19 28 +678 191 18 29 +320 173 13 25 +256 196 14 22 +184 186 19 22 +100 187 21 22 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_441.jpg +2 192 30 40 +111 190 39 47 +142 168 21 36 +235 156 37 46 +287 190 33 38 +320 177 19 24 +328 211 38 43 +403 170 34 38 +501 166 34 45 +571 181 31 40 +612 180 37 45 +718 166 22 35 +789 162 41 47 +856 172 35 45 +932 151 34 45 +851 361 28 25 +493 581 47 30 +898 603 45 35 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_314.jpg +130 334 203 267 +559 281 145 200 +798 556 19 27 +842 557 32 33 +878 555 24 31 +# 2--Demonstration/2_Demonstration_Demonstrators_2_488.jpg +169 32 39 63 +229 44 119 149 +471 124 63 91 +383 3 56 47 +511 0 142 140 +981 126 43 99 +0 75 39 49 +92 125 25 27 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_813.jpg +261 330 16 37 +333 345 21 27 +350 362 15 25 +536 351 18 30 +700 390 16 18 +656 450 16 17 +713 478 14 14 +834 370 21 31 +472 366 15 21 +# 2--Demonstration/2_Demonstration_Protesters_2_800.jpg +228 169 43 44 +377 186 28 35 +515 225 24 22 +732 99 182 201 +929 235 16 15 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_487.jpg +16 394 103 174 +454 1067 139 163 +630 1010 87 133 +723 1222 84 163 +198 92 301 432 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_600.jpg +681 344 28 48 +820 308 28 41 +888 273 46 77 +955 262 24 30 +342 307 22 33 +480 299 26 32 +193 290 37 21 +226 389 28 42 +151 308 32 39 +44 236 67 88 +58 152 15 18 +192 146 16 18 +239 161 16 18 +256 108 10 13 +229 103 10 11 +172 105 10 11 +126 111 8 11 +81 103 9 11 +28 103 11 14 +26 43 9 11 +83 44 10 10 +59 61 7 7 +114 35 8 11 +153 57 6 10 +220 49 10 13 +284 153 16 19 +316 105 9 12 +381 149 15 17 +406 108 8 9 +436 105 9 12 +432 164 15 14 +507 145 16 19 +499 111 9 10 +541 112 9 11 +563 125 14 17 +639 150 16 20 +597 109 9 12 +623 96 7 14 +658 114 10 12 +688 104 10 12 +703 128 10 16 +733 145 11 20 +799 150 13 15 +761 119 7 9 +733 111 10 12 +853 139 12 19 +945 145 13 18 +999 85 10 14 +960 122 10 13 +902 125 9 10 +922 113 7 10 +919 159 11 12 +893 143 11 14 +904 154 8 9 +865 222 24 27 +978 119 6 7 +887 109 7 8 +1013 124 8 12 +834 120 9 12 +796 116 8 9 +773 122 8 9 +633 108 7 10 +581 112 9 12 +483 113 8 7 +336 95 6 6 +375 117 7 6 +40 163 10 11 +0 160 11 20 +4 29 12 16 +185 64 8 10 +241 57 10 12 +273 57 9 12 +303 97 6 9 +456 114 7 9 +779 112 7 9 +912 139 6 8 +485 384 38 40 +943 400 36 49 +# 2--Demonstration/2_Demonstration_Political_Rally_2_319.jpg +615 456 37 49 +875 386 14 18 +867 368 5 7 +992 375 7 8 +1000 371 4 5 +1007 371 5 6 +1013 388 6 8 +1018 367 6 6 +854 429 21 26 +954 401 10 13 +669 445 30 35 +712 523 42 56 +830 563 51 73 +1001 487 12 25 +391 464 27 40 +535 449 21 28 +514 469 20 32 +273 453 30 45 +118 447 21 28 +158 486 27 35 +62 458 17 32 +48 457 19 30 +25 458 16 35 +110 370 9 10 +248 371 10 12 +49 561 21 47 +370 408 14 19 +54 372 12 12 +90 356 6 9 +77 386 12 18 +230 357 9 12 +316 360 8 9 +432 411 21 24 +37 392 10 16 +223 376 9 10 +381 363 6 10 +139 686 30 42 +# 2--Demonstration/2_Demonstration_Political_Rally_2_410.jpg +4 525 58 78 +10 416 34 35 +115 421 22 27 +149 413 14 17 +30 384 14 14 +49 385 12 13 +149 395 9 11 +175 412 12 13 +73 401 11 15 +97 400 13 14 +119 404 12 15 +133 502 22 22 +179 498 51 55 +249 500 74 88 +286 416 10 12 +258 420 15 17 +198 416 17 19 +328 420 9 10 +351 433 42 45 +216 411 15 18 +408 420 12 15 +404 451 13 17 +466 456 38 41 +509 427 19 25 +532 418 10 13 +543 418 9 12 +504 488 38 56 +563 417 16 26 +600 436 24 22 +633 447 95 119 +729 469 45 65 +809 447 20 27 +560 512 42 78 +830 430 18 19 +855 415 39 52 +915 413 8 11 +930 448 31 37 +954 426 23 28 +1008 414 10 12 +740 417 21 26 +# 2--Demonstration/2_Demonstration_Demonstrators_2_244.jpg +123 213 25 40 +30 194 5 6 +75 158 10 13 +96 170 23 24 +163 177 11 15 +204 172 15 18 +250 177 27 31 +428 123 50 54 +305 168 9 11 +544 146 22 22 +603 176 24 26 +615 139 21 22 +673 157 15 18 +691 146 24 34 +712 187 22 34 +727 158 61 83 +844 145 24 24 +906 171 24 27 +940 124 22 27 +906 139 16 11 +1011 118 12 20 +970 132 17 22 +524 157 14 16 +# 2--Demonstration/2_Demonstration_Protesters_2_684.jpg +108 499 18 20 +0 483 22 21 +231 423 17 18 +360 436 12 14 +304 493 18 23 +238 548 29 15 +49 601 39 32 +77 404 14 13 +376 539 30 21 +427 460 15 14 +307 391 14 15 +366 373 10 12 +447 377 15 16 +536 375 11 15 +521 427 15 19 +564 457 12 15 +477 444 14 11 +114 359 9 8 +164 352 12 12 +205 375 14 15 +41 365 10 9 +8 374 10 10 +6 338 10 8 +17 335 7 8 +36 326 8 8 +45 329 7 7 +162 337 8 8 +200 348 10 10 +212 308 8 7 +100 299 6 6 +95 326 8 10 +187 343 7 9 +178 329 7 7 +178 304 6 8 +309 325 9 9 +293 321 9 9 +306 346 9 10 +397 362 9 10 +373 356 10 9 +340 356 9 11 +335 336 9 12 +397 329 10 11 +384 318 7 10 +417 339 11 12 +425 348 7 11 +442 325 8 9 +464 337 11 14 +413 377 9 11 +383 285 6 8 +289 301 8 11 +379 338 8 9 +499 357 13 13 +454 307 8 12 +603 482 18 15 +649 476 20 18 +663 443 18 19 +735 433 18 23 +773 414 15 19 +739 378 12 16 +693 365 12 15 +575 362 10 12 +680 544 25 32 +747 480 22 25 +617 421 12 16 +839 569 67 62 +869 428 23 28 +908 417 20 21 +948 474 26 29 +856 386 17 24 +853 367 13 13 +813 340 12 16 +829 326 10 12 +808 307 8 11 +887 338 11 10 +1004 337 12 15 +1002 310 13 18 +957 275 8 8 +648 349 9 11 +689 330 9 11 +754 326 10 12 +731 295 8 11 +715 407 12 17 +646 331 10 10 +592 317 8 9 +505 325 8 9 +535 326 8 9 +572 345 8 9 +603 333 9 12 +867 306 7 9 +601 319 11 12 +658 314 6 8 +393 298 9 11 +412 321 10 14 +252 336 9 12 +52 338 10 9 +# 2--Demonstration/2_Demonstration_Demonstrators_2_170.jpg +926 174 85 85 +867 195 54 61 +736 146 61 76 +721 125 40 48 +589 107 46 57 +556 153 14 21 +539 153 13 15 +488 165 37 48 +322 119 55 83 +303 138 6 8 +258 133 17 21 +191 116 34 44 +39 121 19 27 +6 125 19 21 +72 123 13 19 +90 133 10 14 +# 2--Demonstration/2_Demonstration_Political_Rally_2_301.jpg +677 323 18 16 +503 269 11 13 +247 246 9 12 +179 236 10 14 +211 253 7 9 +136 271 7 9 +197 285 8 9 +165 277 9 11 +141 234 10 13 +339 281 10 13 +352 276 8 11 +369 254 8 12 +418 310 10 13 +438 303 7 10 +475 266 8 12 +453 256 11 16 +403 247 12 13 +313 255 11 11 +472 304 9 13 +29 263 7 14 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_140.jpg +331 519 14 19 +381 477 14 13 +376 514 12 16 +302 610 15 18 +331 606 21 26 +325 645 16 18 +375 631 20 25 +364 602 14 15 +378 572 15 16 +412 562 18 22 +429 624 20 20 +445 613 17 9 +434 593 14 17 +412 658 21 14 +453 643 17 19 +481 642 16 13 +491 634 17 13 +521 634 8 11 +530 617 18 27 +507 576 14 19 +521 573 20 25 +524 529 13 15 +547 527 16 17 +572 539 13 15 +560 499 13 21 +549 479 14 15 +415 481 12 14 +443 481 9 14 +453 487 11 16 +494 475 14 18 +512 502 13 17 +762 625 20 22 +802 646 19 22 +723 631 20 16 +697 592 16 13 +623 628 21 19 +657 642 26 21 +698 652 13 15 +745 636 21 34 +732 647 22 25 +625 612 18 14 +603 664 28 8 +302 664 23 8 +897 284 8 10 +933 284 10 10 +929 312 9 10 +861 327 10 10 +929 327 10 9 +906 323 7 15 +554 420 10 12 +567 430 12 16 +986 262 9 15 +969 264 8 12 +681 164 5 8 +699 168 5 5 +702 154 5 5 +732 169 4 4 +518 283 7 9 +968 217 4 4 +974 216 7 9 +973 197 6 9 +784 265 10 9 +826 280 8 10 +821 275 8 9 +824 270 11 6 +807 278 8 10 +753 272 8 11 +871 240 8 9 +835 250 7 9 +685 253 7 11 +667 252 9 10 +654 197 6 9 +703 220 5 6 +674 211 6 11 +710 198 3 5 +702 203 6 6 +728 252 8 10 +720 258 8 9 +716 250 6 6 +721 244 3 4 +685 242 7 10 +695 246 9 8 +742 244 9 6 +727 234 8 10 +896 203 6 6 +756 172 3 3 +793 172 5 7 +791 175 5 9 +818 191 6 7 +865 182 6 4 +882 183 9 7 +806 169 6 6 +867 225 5 7 +880 225 5 5 +828 190 8 10 +844 201 7 6 +843 164 6 6 +805 298 11 6 +801 291 4 7 +233 463 14 18 +232 485 16 18 +157 545 14 20 +202 541 18 19 +137 583 22 25 +160 593 14 17 +181 617 19 16 +183 655 23 17 +218 625 19 18 +243 604 16 22 +205 587 13 20 +249 566 14 16 +280 526 16 17 +300 520 14 17 +302 561 18 18 +532 493 15 19 +595 545 11 7 +597 556 19 9 +616 526 16 22 +649 530 14 15 +686 505 17 18 +628 473 14 15 +638 486 13 20 +683 478 14 16 +724 475 14 16 +661 566 13 14 +674 546 9 14 +618 569 15 22 +654 603 16 18 +710 573 16 16 +741 547 17 22 +768 544 18 18 +778 523 17 17 +954 639 27 31 +930 643 17 24 +901 644 30 27 +956 592 16 20 +915 608 17 11 +911 578 14 17 +858 599 17 16 +843 631 24 28 +833 541 17 22 +807 567 14 24 +786 563 16 18 +756 576 17 23 +761 597 19 18 +739 228 8 9 +679 223 9 10 +684 215 10 10 +761 264 6 7 +769 265 6 6 +759 251 5 6 +769 237 8 8 +764 228 8 7 +791 232 7 7 +799 245 6 7 +796 239 6 7 +808 245 4 6 +738 192 5 5 +762 210 4 11 +779 205 8 9 +794 220 5 6 +825 241 6 6 +843 225 5 7 +847 245 5 5 +862 243 5 4 +889 233 6 6 +894 246 7 9 +932 248 7 7 +914 230 6 10 +935 209 6 7 +946 210 6 6 +947 192 5 4 +939 200 4 6 +925 187 4 6 +928 204 3 6 +890 191 5 6 +534 300 8 9 +455 398 13 13 +493 433 7 13 +502 442 13 9 +512 388 10 11 +549 398 11 15 +515 366 7 11 +559 357 12 13 +569 288 8 8 +578 330 10 13 +583 315 10 12 +542 259 6 9 +565 249 7 7 +859 292 9 10 +877 289 8 9 +592 458 13 16 +608 435 12 14 +644 464 12 12 +702 462 14 18 +555 384 13 15 +564 469 13 14 +601 382 11 13 +590 385 9 11 +657 334 8 7 +594 346 10 12 +623 378 11 13 +638 386 12 15 +673 370 12 14 +680 395 11 8 +687 419 12 18 +713 391 11 11 +740 397 9 13 +596 289 7 8 +601 278 6 8 +606 316 8 11 +613 288 7 9 +787 321 9 10 +804 303 8 12 +821 325 10 9 +682 352 10 12 +672 318 8 11 +583 231 7 8 +597 245 7 8 +654 275 9 11 +686 310 9 10 +679 300 9 11 +721 314 9 13 +686 283 7 9 +693 271 7 10 +751 287 8 10 +761 300 8 9 +771 297 10 8 +852 268 10 11 +610 250 7 9 +629 249 8 6 +964 234 6 6 +940 231 7 7 +953 218 7 7 +962 218 4 5 +851 218 11 13 +936 219 6 6 +792 282 6 5 +641 239 8 6 +570 302 7 6 +723 353 7 7 +747 324 7 7 +486 531 16 18 +413 529 15 21 +390 618 13 22 +281 594 18 26 +219 596 7 8 +831 577 14 18 +332 227 7 9 +680 205 5 8 +689 206 8 5 +975 254 6 6 +992 256 8 10 +1002 259 9 11 +888 273 6 7 +840 331 8 9 +918 332 6 7 +780 253 5 7 +735 271 8 9 +817 204 7 8 +774 188 10 11 +740 200 8 8 +766 192 7 11 +789 200 5 9 +871 192 8 12 +885 204 6 7 +889 218 5 6 +897 217 4 6 +921 200 6 7 +912 197 6 6 +903 208 5 8 +956 209 4 5 +956 244 6 7 +953 249 6 6 +865 213 5 6 +836 203 6 7 +855 191 5 7 +841 191 6 6 +807 219 6 7 +795 205 5 8 +828 208 8 12 +776 219 8 9 +765 251 8 9 +776 249 7 9 +735 208 12 16 +755 205 8 11 +750 156 7 11 +800 149 5 5 +812 158 6 6 +820 232 4 5 +804 235 4 5 +929 228 4 6 +931 221 4 5 +1016 225 5 8 +655 222 6 6 +689 167 6 7 +668 175 4 6 +768 174 6 6 +746 177 4 6 +777 157 3 5 +974 154 3 5 +694 145 7 7 +207 638 11 16 +844 614 15 13 +464 552 10 14 +840 306 7 5 +1000 220 6 7 +692 300 7 7 +968 489 14 14 +30 660 23 12 +# 2--Demonstration/2_Demonstration_Demonstrators_2_518.jpg +188 0 30 31 +87 248 64 84 +198 279 28 69 +243 189 28 73 +336 148 35 68 +391 133 50 72 +476 144 45 60 +611 127 25 50 +697 135 36 54 +748 126 34 45 +765 49 22 51 +836 79 14 47 +848 90 18 37 +944 44 26 42 +619 408 55 75 +984 254 40 65 +939 417 59 87 +569 137 25 48 +954 122 13 18 +1010 83 14 29 +# 2--Demonstration/2_Demonstration_Political_Rally_2_264.jpg +709 505 16 20 +652 467 13 17 +613 472 12 16 +581 462 14 19 +515 453 8 13 +513 415 12 14 +619 428 12 15 +678 591 19 22 +736 608 22 16 +622 567 15 22 +645 578 13 17 +601 519 17 17 +532 511 17 21 +440 504 17 19 +386 550 20 23 +334 457 16 19 +394 426 15 18 +455 451 16 17 +334 402 10 13 +731 439 10 13 +786 461 11 12 +812 480 13 15 +237 577 20 24 +140 596 23 28 +219 546 18 22 +179 510 16 22 +66 472 14 18 +26 459 18 18 +240 391 13 16 +212 371 10 12 +43 365 10 13 +92 355 13 18 +98 416 14 18 +144 394 13 19 +153 334 13 14 +280 348 10 13 +211 329 11 12 +124 322 10 11 +17 298 12 13 +599 369 13 16 +480 399 10 14 +403 298 9 14 +495 332 8 14 +550 340 8 13 +988 525 13 18 +969 456 13 16 +862 415 11 14 +906 408 11 14 +958 385 11 14 +968 430 11 12 +693 374 8 12 +751 359 8 12 +793 367 7 9 +733 354 8 11 +701 341 8 12 +929 596 22 28 +855 495 14 15 +# 2--Demonstration/2_Demonstration_Demonstrators_2_654.jpg +153 106 33 42 +152 151 35 50 +243 127 33 36 +296 149 41 45 +450 164 38 35 +465 119 26 30 +596 119 29 34 +588 171 36 41 +710 131 23 25 +717 191 30 38 +826 131 30 27 +957 144 32 38 +953 191 39 36 +# 2--Demonstration/2_Demonstration_Political_Rally_2_940.jpg +400 174 260 314 +60 8 166 200 +208 36 122 152 +302 390 186 230 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_586.jpg +26 103 13 17 +25 130 19 18 +140 143 23 33 +173 127 17 19 +131 121 15 15 +109 129 16 15 +119 146 20 23 +126 103 11 11 +168 102 10 11 +68 96 7 9 +69 130 16 20 +241 122 20 23 +200 122 10 12 +187 103 8 17 +283 155 18 29 +387 133 15 23 +344 95 13 14 +356 88 10 12 +411 107 12 16 +439 126 11 17 +448 115 13 20 +471 148 26 33 +380 185 30 37 +580 152 16 31 +523 120 13 18 +540 93 10 11 +639 164 20 35 +692 146 28 35 +758 119 17 22 +786 113 14 15 +804 216 16 38 +834 203 32 37 +960 104 16 20 +999 143 15 30 +476 227 15 22 +263 232 14 20 +432 98 10 11 +311 171 15 23 +570 122 14 18 +598 115 12 17 +528 108 10 12 +379 103 10 12 +281 123 14 17 +239 84 12 14 +159 219 9 15 +98 91 6 6 +158 60 6 7 +167 64 4 4 +62 96 4 4 +53 98 6 7 +20 79 5 6 +88 97 6 5 +399 165 12 14 +293 89 7 11 +289 103 8 9 +12 88 10 12 +4 112 10 13 +# 2--Demonstration/2_Demonstration_Protesters_2_577.jpg +810 547 27 29 +608 561 19 20 +477 449 18 21 +161 466 21 25 +# 2--Demonstration/2_Demonstration_Political_Rally_2_695.jpg +883 480 35 45 +866 363 32 43 +816 291 26 35 +813 274 27 30 +963 650 61 34 +764 560 57 73 +612 569 32 63 +683 471 43 49 +583 369 32 39 +497 375 34 40 +438 347 33 38 +420 462 39 57 +563 332 27 34 +693 333 30 32 +314 534 38 46 +297 422 39 44 +359 423 37 46 +327 472 42 50 +185 441 41 45 +171 435 40 47 +127 397 41 36 +223 349 32 43 +283 415 39 32 +99 357 39 42 +70 462 46 51 +9 437 40 41 +0 379 36 44 +407 377 33 37 +404 298 27 35 +189 226 23 28 +161 256 24 26 +101 235 23 24 +168 216 20 24 +257 283 31 35 +255 210 13 16 +807 174 20 21 +687 185 19 20 +704 201 20 22 +684 149 17 19 +635 173 17 19 +608 203 17 20 +792 201 19 21 +662 211 21 23 +806 229 24 29 +853 163 21 28 +672 286 27 19 +1010 107 11 13 +958 129 14 17 +964 94 14 17 +216 133 15 16 +186 138 17 20 +141 130 14 16 +200 115 10 9 +134 110 11 11 +123 121 10 9 +43 242 28 27 +59 221 23 25 +15 269 30 28 +14 226 20 25 +57 136 16 16 +22 125 16 16 +522 457 40 44 +472 96 11 13 +471 47 9 11 +463 86 8 10 +561 8 7 7 +589 189 16 18 +583 174 18 21 +611 131 12 15 +718 282 15 23 +267 160 18 20 +255 89 10 17 +241 99 9 11 +188 50 12 10 +217 52 11 12 +230 37 8 11 +57 60 15 15 +384 3 7 6 +439 4 6 10 +479 590 21 60 +203 269 27 31 +480 329 27 42 +855 261 30 35 +836 386 36 47 +392 523 44 44 +0 18 16 17 +328 262 27 31 +# 2--Demonstration/2_Demonstration_Protesters_2_91.jpg +959 214 38 38 +784 241 22 27 +815 250 14 17 +731 251 14 16 +666 273 10 19 +632 252 13 17 +594 258 12 17 +535 243 15 18 +406 241 15 20 +341 242 27 31 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_895.jpg +1 281 9 18 +24 284 20 26 +53 268 12 18 +68 284 16 20 +67 310 16 23 +47 295 14 20 +112 283 19 23 +139 271 13 21 +198 273 13 17 +218 281 9 15 +208 371 21 40 +256 267 10 15 +275 302 23 34 +156 289 8 12 +230 316 16 27 +207 306 23 30 +299 311 17 24 +374 318 21 32 +279 288 10 13 +330 292 15 20 +337 278 18 21 +388 275 11 14 +300 272 20 25 +408 286 8 14 +416 278 11 16 +428 309 11 15 +452 301 10 22 +462 297 17 21 +519 290 15 21 +574 353 16 24 +485 349 51 68 +608 280 11 18 +678 276 10 13 +746 297 16 17 +673 331 32 42 +633 337 28 38 +616 330 19 31 +803 305 22 27 +822 313 12 15 +808 335 22 33 +830 345 31 30 +926 310 13 16 +965 305 16 20 +866 328 25 32 +1011 294 12 16 +248 343 21 30 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_102.jpg +1003 499 13 19 +981 514 16 24 +954 491 17 21 +758 578 17 26 +700 538 14 22 +668 544 26 27 +798 524 21 22 +624 653 15 28 +635 582 13 26 +425 610 22 33 +265 584 20 27 +240 585 17 29 +222 512 14 20 +233 605 17 24 +80 605 20 30 +84 589 19 20 +755 536 16 19 +359 534 17 27 +408 610 15 27 +# 2--Demonstration/2_Demonstration_Political_Rally_2_406.jpg +806 360 12 14 +747 337 13 17 +810 616 214 152 +780 591 36 49 +622 606 137 162 +557 611 39 52 +490 580 24 30 +508 693 59 75 +325 544 26 31 +378 620 46 67 +251 546 20 27 +266 591 91 126 +250 609 35 53 +207 628 27 25 +138 562 22 25 +124 586 25 32 +68 555 20 24 +0 584 17 34 +20 670 48 91 +# 2--Demonstration/2_Demonstration_Protesters_2_519.jpg +549 120 59 75 +796 100 51 66 +898 176 39 45 +161 138 65 73 +364 250 34 35 +# 2--Demonstration/2_Demonstration_Political_Rally_2_171.jpg +1003 629 16 19 +948 625 18 19 +850 663 58 77 +742 734 41 34 +618 672 56 83 +550 666 28 36 +447 652 29 42 +399 708 52 60 +41 715 45 52 +147 713 31 48 +100 629 48 47 +126 571 16 20 +# 2--Demonstration/2_Demonstration_Political_Rally_2_985.jpg +984 375 35 44 +803 555 138 184 +550 474 61 74 +314 404 44 121 +392 395 18 28 +360 405 25 31 +415 421 13 15 +115 448 35 37 +70 471 40 45 +18 446 27 41 +93 424 20 26 +208 378 7 8 +198 380 8 9 +187 379 8 8 +76 543 14 18 +95 550 14 20 +114 557 13 16 +57 561 15 15 +7 414 9 13 +65 421 14 15 +657 299 12 14 +640 296 12 13 +624 289 13 15 +609 304 12 12 +153 424 14 16 +130 424 10 13 +384 372 5 6 +394 373 7 8 +# 2--Demonstration/2_Demonstration_Political_Rally_2_584.jpg +651 489 126 144 +232 416 54 56 +204 193 42 52 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_114.jpg +914 236 30 36 +908 124 23 35 +943 138 22 32 +825 123 24 34 +811 232 24 32 +769 379 47 60 +745 294 24 24 +587 290 71 94 +499 326 27 54 +364 142 81 142 +123 306 84 126 +# 2--Demonstration/2_Demonstration_Political_Rally_2_726.jpg +114 86 130 164 +518 428 138 186 +# 2--Demonstration/2_Demonstration_Protesters_2_117.jpg +240 324 116 94 +726 312 74 114 +# 2--Demonstration/2_Demonstration_Protesters_2_456.jpg +847 446 96 133 +859 336 28 35 +779 226 44 73 +308 486 114 163 +657 476 54 79 +765 514 46 54 +973 472 51 91 +1 486 70 69 +# 2--Demonstration/2_Demonstration_Demonstrators_2_162.jpg +654 354 47 53 +622 362 22 33 +585 351 38 48 +501 391 41 61 +555 442 41 61 +479 360 7 12 +235 354 85 71 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_395.jpg +553 340 53 87 +750 338 51 78 +872 338 61 74 +804 478 136 132 +981 277 43 105 +954 246 29 45 +# 2--Demonstration/2_Demonstration_Demonstrators_2_231.jpg +42 474 55 54 +95 469 11 14 +138 472 11 16 +225 466 48 58 +266 477 37 53 +494 456 113 133 +359 465 9 11 +925 431 45 47 +980 420 15 19 +917 426 32 45 +902 436 12 15 +# 2--Demonstration/2_Demonstration_Protesters_2_542.jpg +608 179 62 81 +919 97 20 22 +853 89 19 24 +13 107 64 76 +301 148 53 70 +# 2--Demonstration/2_Demonstration_Protesters_2_563.jpg +808 316 118 111 +872 221 67 65 +982 203 37 36 +819 188 31 36 +726 247 72 84 +669 258 51 59 +669 169 30 36 +633 188 26 31 +771 200 31 38 +392 199 66 84 +565 222 29 35 +498 187 33 33 +301 235 26 28 +237 215 25 31 +228 259 39 42 +100 243 87 103 +0 182 59 168 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_200.jpg +94 31 12 18 +23 71 15 19 +4 92 12 20 +86 130 29 43 +180 223 33 37 +222 189 30 40 +16 128 19 24 +71 269 39 49 +276 221 36 44 +13 370 42 48 +285 170 29 31 +275 70 15 18 +309 98 15 20 +399 51 14 17 +387 154 26 30 +357 153 21 27 +390 226 23 36 +356 26 16 14 +463 12 9 16 +351 117 19 25 +502 7 13 16 +902 116 21 23 +844 133 18 26 +887 163 22 34 +853 326 39 47 +939 297 47 55 +951 167 35 50 +812 0 12 10 +904 1 13 13 +929 83 16 18 +924 79 11 17 +314 363 41 52 +398 478 51 44 +671 385 45 64 +791 387 46 53 +806 500 61 50 +73 90 14 22 +801 14 11 17 +894 301 39 38 +546 168 15 23 +# 2--Demonstration/2_Demonstration_Protesters_2_148.jpg +708 251 100 142 +332 144 63 83 +411 203 29 46 +608 239 56 74 +393 379 89 121 +969 169 55 115 +668 215 63 97 +153 220 33 53 +47 225 25 50 +85 250 26 24 +475 190 49 62 +554 254 19 25 +946 234 9 11 +# 2--Demonstration/2_Demonstration_Demonstrators_2_595.jpg +270 164 196 194 +664 84 84 92 +# 2--Demonstration/2_Demonstration_Political_Rally_2_884.jpg +134 228 156 200 +# 2--Demonstration/2_Demonstration_Political_Rally_2_339.jpg +619 289 33 65 +700 360 9 12 +955 434 40 47 +981 413 37 55 +886 452 33 42 +790 473 66 114 +857 420 10 12 +781 434 8 10 +778 417 6 7 +128 445 9 10 +156 433 10 13 +204 424 7 8 +222 423 8 8 +# 2--Demonstration/2_Demonstration_Demonstrators_2_12.jpg +292 564 134 121 +# 2--Demonstration/2_Demonstration_Demonstrators_2_456.jpg +2 286 112 108 +168 208 92 118 +342 30 110 144 +846 222 100 136 +460 356 78 84 +634 414 80 116 +904 480 78 97 +208 456 72 78 +# 2--Demonstration/2_Demonstration_Political_Rally_2_491.jpg +747 538 18 24 +791 525 18 25 +862 525 19 24 +633 375 21 46 +653 506 27 48 +566 386 20 38 +589 475 43 107 +544 492 28 60 +387 459 21 29 +907 543 19 20 +970 536 14 21 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_496.jpg +318 87 50 72 +217 172 47 86 +541 101 30 56 +672 164 50 48 +732 249 57 97 +940 82 49 60 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_578.jpg +71 309 20 29 +148 327 22 25 +214 358 19 22 +255 334 22 29 +324 321 23 33 +231 274 16 20 +294 222 10 12 +384 252 15 21 +423 307 24 30 +502 305 24 30 +569 309 28 31 +580 279 18 23 +586 251 14 17 +492 256 19 25 +613 222 11 14 +704 214 11 13 +668 296 29 32 +779 347 27 32 +853 336 30 33 +748 253 16 19 +809 232 15 20 +882 249 15 20 +906 225 12 12 +813 211 9 11 +697 244 15 17 +959 224 17 21 +982 231 16 21 +940 319 28 38 +314 222 10 10 +282 224 7 9 +400 239 9 10 +437 228 5 6 +549 224 7 6 +574 217 10 12 +667 191 7 8 +656 212 5 6 +673 206 4 5 +662 212 4 7 +521 192 5 5 +491 192 4 6 +886 208 7 13 +750 188 7 8 +735 188 5 7 +804 203 6 8 +0 310 23 26 +96 271 15 16 +306 245 12 17 +527 199 4 6 +513 206 6 8 +549 198 6 8 +560 192 4 6 +# 2--Demonstration/2_Demonstration_Political_Rally_2_117.jpg +882 188 8 43 +837 202 27 36 +958 189 21 28 +790 212 23 27 +808 189 13 16 +726 219 29 34 +699 209 19 25 +781 205 12 15 +790 192 11 13 +588 197 33 39 +631 206 24 29 +478 193 10 12 +535 195 8 10 +546 189 6 7 +525 191 6 8 +376 218 31 32 +416 206 20 25 +453 197 11 16 +374 184 5 9 +363 204 7 8 +398 191 6 10 +287 195 23 30 +175 198 9 13 +143 210 15 18 +123 219 14 19 +62 199 20 25 +23 200 23 27 +204 200 11 14 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_368.jpg +6 219 13 16 +35 251 13 17 +52 213 10 9 +72 235 9 11 +87 216 6 10 +93 229 11 19 +129 209 6 8 +147 220 9 15 +173 244 18 11 +201 247 20 10 +301 234 15 21 +374 226 17 20 +428 232 8 9 +460 246 14 9 +495 242 9 13 +519 223 19 23 +565 247 11 12 +607 242 25 24 +643 234 19 23 +723 241 14 16 +734 220 31 35 +777 247 11 14 +828 243 13 13 +878 243 20 33 +952 266 6 9 +957 282 10 12 +976 261 14 18 +1015 261 6 11 +1008 263 8 12 +# 2--Demonstration/2_Demonstration_Demonstrators_2_546.jpg +153 176 16 24 +185 189 13 16 +228 154 17 22 +301 228 17 23 +324 224 11 19 +328 198 19 32 +382 186 12 20 +432 179 22 29 +479 168 17 28 +527 159 16 23 +605 150 12 31 +538 266 16 23 +442 149 13 23 +699 158 24 34 +744 141 14 24 +672 158 19 29 +801 198 27 40 +842 175 15 24 +943 168 26 36 +942 150 11 20 +900 220 12 24 +986 152 12 19 +987 167 10 20 +# 2--Demonstration/2_Demonstration_Demonstrators_2_672.jpg +73 175 27 39 +224 165 31 44 +267 168 30 38 +140 242 58 60 +453 172 27 35 +581 217 60 79 +644 181 137 169 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_32.jpg +0 1262 57 109 +0 1092 17 56 +51 1049 38 48 +25 1080 25 31 +169 1087 54 75 +307 1099 51 79 +242 1250 78 102 +290 1085 39 56 +271 1061 39 52 +457 1069 49 62 +193 1041 33 32 +149 1038 37 43 +302 1035 19 26 +338 1036 27 29 +372 1035 19 22 +409 1032 15 24 +427 1044 36 34 +293 1032 11 14 +205 1014 18 18 +257 1049 25 30 +518 1063 28 36 +528 1118 30 40 +628 1067 26 34 +692 1046 17 26 +637 1038 16 18 +523 1158 59 83 +703 1106 36 54 +727 1106 56 65 +774 1060 18 22 +748 1077 23 25 +835 1101 18 23 +866 1117 23 27 +895 1025 26 27 +938 1083 18 21 +969 1126 22 26 +964 1071 15 20 +981 1063 15 17 +997 1061 11 14 +1008 1106 16 28 +723 1056 19 17 +286 821 16 15 +355 867 14 18 +841 1057 15 16 +576 1081 22 30 +547 1061 20 29 +524 1033 17 16 +491 1020 12 18 +# 2--Demonstration/2_Demonstration_Protesters_2_493.jpg +924 165 35 41 +982 160 42 51 +878 217 39 52 +825 186 36 41 +771 133 34 39 +869 93 23 30 +917 77 25 27 +937 133 34 43 +963 70 17 21 +940 67 21 24 +821 76 28 30 +841 78 17 22 +780 87 23 25 +736 94 23 25 +952 57 12 17 +928 51 16 18 +889 51 18 21 +845 48 16 19 +823 49 14 19 +808 49 17 18 +804 21 11 14 +845 20 11 14 +787 61 15 22 +761 46 15 17 +746 50 16 18 +720 137 34 39 +709 108 30 34 +720 80 15 20 +714 62 15 19 +704 52 17 20 +693 38 9 9 +711 30 9 11 +687 27 12 15 +635 106 21 28 +651 93 24 26 +648 181 44 48 +666 161 31 41 +554 135 37 40 +602 84 26 29 +580 129 22 36 +522 118 23 35 +614 67 18 22 +659 64 22 22 +663 48 19 22 +637 53 15 20 +615 54 15 19 +585 61 20 22 +567 54 15 16 +542 77 16 20 +553 76 13 19 +521 88 20 23 +508 76 18 22 +518 69 20 21 +545 56 12 17 +550 42 9 15 +565 31 11 14 +609 43 12 15 +654 35 10 12 +656 21 9 11 +751 198 32 39 +453 138 39 39 +384 149 40 44 +409 115 24 29 +486 87 21 30 +501 99 17 27 +374 99 26 29 +341 117 32 42 +302 158 39 48 +277 119 27 30 +313 87 22 27 +339 93 23 27 +386 75 18 23 +433 71 20 24 +482 61 17 23 +501 12 8 12 +554 17 8 10 +386 51 17 19 +426 51 12 16 +421 35 11 15 +438 35 15 18 +322 58 16 17 +291 71 23 26 +264 74 25 27 +278 41 9 12 +305 48 13 19 +334 42 12 15 +547 113 33 39 +470 119 27 40 +258 101 22 24 +229 87 18 26 +247 84 15 21 +201 83 22 25 +219 67 20 23 +248 63 13 18 +255 46 10 16 +237 47 12 15 +161 83 21 24 +134 106 22 28 +188 117 21 28 +210 117 19 27 +92 113 27 33 +130 86 19 27 +81 92 21 26 +66 65 22 24 +17 102 26 25 +30 70 22 27 +12 56 19 23 +128 66 13 16 +96 56 13 15 +173 61 11 13 +176 76 15 25 +186 59 16 18 +159 47 16 13 +139 49 10 11 +149 64 14 16 +210 31 11 11 +143 38 8 8 +78 43 13 15 +371 59 14 18 +381 32 11 15 +283 30 9 12 +355 25 9 11 +508 40 12 17 +619 23 11 15 +366 49 15 17 +845 144 33 43 +0 413 80 163 +34 54 12 14 +0 50 12 16 +0 89 14 24 +4 138 23 24 +# 2--Demonstration/2_Demonstration_Protesters_2_583.jpg +799 245 65 76 +586 201 43 47 +814 129 9 12 +831 114 10 12 +903 129 12 14 +866 118 6 8 +895 111 7 9 +928 110 8 9 +997 117 9 12 +958 109 6 9 +799 129 9 10 +793 111 6 8 +425 137 11 12 +511 121 7 9 +# 2--Demonstration/2_Demonstration_Political_Rally_2_746.jpg +581 83 76 89 +586 369 64 73 +514 361 56 75 +280 208 16 22 +355 217 11 15 +333 192 14 15 +269 196 10 15 +833 193 13 15 +966 124 41 52 +782 1355 69 90 +722 1378 32 51 +848 2255 58 79 +753 2320 31 39 +591 2297 27 37 +646 2284 26 36 +356 2330 89 95 +168 2374 91 106 +202 2255 37 54 +131 2304 38 48 +57 2335 40 58 +306 2280 26 31 +255 2170 32 42 +323 2166 30 42 +488 1349 12 11 +178 1331 9 16 +179 1311 10 13 +829 212 15 22 +# 2--Demonstration/2_Demonstration_Protesters_2_352.jpg +133 460 35 53 +135 407 41 52 +614 616 41 67 +678 530 40 64 +922 507 46 42 +928 205 14 14 +589 188 44 58 +677 232 11 13 +394 230 7 9 +149 228 11 10 +69 233 10 9 +11 224 4 6 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_79.jpg +943 601 52 60 +900 628 29 33 +862 570 18 24 +854 510 9 11 +818 523 11 13 +755 497 8 10 +729 553 8 9 +701 542 13 11 +738 553 10 14 +744 615 9 16 +673 564 13 15 +661 570 11 16 +641 576 11 15 +604 566 12 15 +648 521 11 10 +691 511 9 10 +570 588 13 14 +639 625 11 15 +552 594 12 12 +538 598 9 15 +756 636 23 38 +856 476 11 10 +# 2--Demonstration/2_Demonstration_Political_Rally_2_577.jpg +986 1300 26 29 +934 1251 31 36 +871 1238 28 27 +898 1266 25 28 +642 1187 27 30 +746 1204 25 28 +556 1155 25 27 +482 1144 25 32 +377 1128 25 29 +181 1096 29 38 +85 1081 29 36 +# 2--Demonstration/2_Demonstration_Political_Rally_2_816.jpg +510 42 96 107 +584 107 68 98 +701 501 55 57 +802 559 56 75 +942 585 64 70 +833 491 51 72 +949 495 41 39 +933 451 46 51 +754 437 37 46 +683 469 36 47 +232 432 67 82 +95 466 79 115 +57 299 60 72 +155 399 41 63 +721 434 28 34 +785 425 31 40 +687 426 26 37 +875 435 34 50 +1002 509 22 59 +791 477 33 44 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_385.jpg +3 348 8 8 +26 363 8 10 +25 379 13 12 +72 348 11 14 +20 403 15 14 +16 386 9 5 +77 405 10 15 +93 394 18 18 +101 354 8 8 +87 345 6 5 +95 341 4 6 +74 452 15 15 +85 455 22 23 +23 553 23 36 +100 521 24 28 +130 503 23 28 +213 473 24 25 +190 521 28 30 +144 436 10 14 +169 406 11 15 +179 383 11 13 +155 370 7 9 +207 396 12 15 +224 375 11 7 +208 358 10 10 +182 358 4 7 +219 362 7 8 +230 360 9 9 +265 385 9 10 +264 372 9 8 +225 353 7 5 +293 352 5 7 +282 345 8 7 +273 341 4 5 +294 365 10 10 +327 348 4 6 +296 406 9 11 +331 358 8 10 +313 405 11 10 +322 429 20 21 +337 474 20 23 +360 456 24 27 +353 406 16 15 +373 365 5 9 +384 365 8 9 +414 404 12 14 +455 393 13 12 +456 412 11 19 +437 351 6 9 +419 363 8 7 +414 345 7 6 +374 354 6 8 +432 372 7 9 +444 365 7 9 +455 370 8 11 +475 375 9 13 +465 356 7 4 +538 416 15 16 +546 388 11 13 +515 364 7 8 +494 367 7 9 +534 351 7 10 +557 444 11 20 +286 540 28 38 +397 533 29 36 +527 338 8 8 +532 333 6 6 +546 336 5 6 +556 346 5 8 +590 368 12 11 +568 418 19 17 +592 390 15 15 +621 410 13 12 +633 417 15 19 +620 344 9 10 +580 331 4 4 +634 353 7 6 +672 381 10 12 +686 336 6 7 +679 351 8 7 +507 545 29 31 +581 457 22 26 +615 498 24 26 +688 466 20 25 +679 559 31 40 +724 480 17 21 +709 348 10 10 +708 359 9 10 +714 373 10 12 +703 332 4 8 +718 335 6 8 +734 356 9 6 +729 352 5 6 +740 367 8 12 +735 424 21 20 +785 388 10 13 +764 355 8 9 +787 348 6 8 +780 349 7 9 +768 334 7 6 +803 370 13 11 +819 392 13 18 +806 432 19 19 +809 349 8 8 +817 342 7 7 +847 362 7 7 +826 351 5 8 +848 349 6 7 +876 354 7 9 +872 339 5 8 +872 392 17 16 +865 421 16 18 +889 349 5 6 +862 333 7 8 +754 474 26 32 +780 463 21 23 +864 462 13 16 +902 478 9 23 +886 491 18 22 +903 345 6 9 +953 345 7 9 +959 355 6 10 +976 355 10 10 +985 368 11 11 +988 351 9 10 +983 343 6 7 +959 337 6 9 +1013 370 11 11 +920 421 14 15 +970 428 22 22 +939 460 15 28 +1017 337 5 5 +831 594 30 54 +217 454 25 27 +689 408 12 18 +667 400 10 17 +699 380 12 18 +739 395 10 16 +# 2--Demonstration/2_Demonstration_Political_Rally_2_752.jpg +810 196 182 269 +644 169 181 234 +488 153 114 151 +340 138 90 129 +15 398 48 63 +98 392 27 43 +# 2--Demonstration/2_Demonstration_Protesters_2_174.jpg +657 47 118 157 +509 183 57 70 +991 202 27 27 +441 279 35 40 +42 370 40 48 +18 441 25 31 +0 461 28 33 +235 256 65 69 +585 248 70 78 +# 2--Demonstration/2_Demonstration_Political_Rally_2_891.jpg +488 23 164 205 +306 270 136 183 +181 385 47 56 +233 291 32 45 +31 390 50 53 +31 263 22 37 +58 265 16 20 +158 259 12 17 +# 2--Demonstration/2_Demonstration_Protesters_2_291.jpg +435 619 301 394 +3 904 196 323 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_360.jpg +39 541 10 8 +31 565 30 27 +40 594 29 24 +74 572 31 28 +102 553 15 13 +39 624 35 44 +119 606 23 29 +143 605 34 33 +144 575 22 23 +88 551 10 10 +61 549 13 15 +141 551 15 13 +54 637 59 65 +195 636 48 46 +248 563 24 27 +327 650 44 33 +322 631 23 27 +344 589 21 38 +319 567 20 23 +338 558 16 21 +373 561 13 11 +367 582 15 13 +514 606 43 46 +433 604 24 42 +478 580 29 35 +500 558 21 23 +436 580 13 13 +433 558 11 11 +418 547 11 8 +380 546 12 11 +400 538 7 6 +276 536 14 20 +279 568 7 10 +369 536 6 5 +310 551 9 10 +518 531 11 13 +536 540 22 26 +555 574 24 13 +505 535 7 10 +421 534 5 8 +563 649 47 33 +569 532 7 7 +581 557 21 21 +577 542 11 9 +615 551 21 19 +614 538 9 8 +599 537 10 11 +619 598 25 33 +649 622 52 52 +647 575 17 12 +636 545 10 18 +656 541 10 12 +665 546 17 18 +681 567 28 25 +673 532 7 5 +650 537 6 6 +756 573 16 21 +750 535 10 9 +715 537 18 17 +694 539 14 10 +685 532 7 7 +779 587 25 30 +781 553 22 22 +764 535 9 8 +781 530 8 8 +787 630 22 33 +857 599 66 74 +956 586 56 53 +979 569 30 32 +799 532 15 17 +855 538 11 18 +905 521 7 8 +961 534 11 16 +817 524 7 10 +867 526 12 14 +878 516 8 7 +929 575 16 35 +849 377 23 24 +912 358 10 11 +888 377 27 27 +932 350 15 16 +# 2--Demonstration/2_Demonstration_Political_Rally_2_57.jpg +895 252 32 45 +928 215 22 32 +945 169 28 35 +826 123 37 42 +917 169 26 37 +783 144 30 44 +757 143 27 39 +661 184 35 37 +595 132 25 47 +527 152 31 39 +493 166 32 42 +465 154 23 33 +445 80 20 25 +360 142 33 39 +382 123 33 40 +281 147 28 35 +276 201 30 44 +215 145 32 46 +145 132 33 40 +79 140 30 44 +101 116 30 40 +11 128 18 36 +0 122 14 44 +107 156 21 31 +486 312 48 60 +1019 141 5 32 +256 131 21 31 +642 146 20 34 +587 147 19 35 +275 141 21 34 +481 135 26 39 +579 129 14 15 +# 2--Demonstration/2_Demonstration_Demonstrators_2_330.jpg +484 412 110 148 +700 284 102 132 +642 346 56 82 +440 340 58 80 +# 2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_76.jpg +18 372 124 116 +166 314 118 150 +322 380 82 108 +572 220 108 174 +706 200 110 182 +# 2--Demonstration/2_Demonstration_Demonstrators_2_268.jpg +75 288 34 34 +0 311 6 6 +9 308 5 5 +31 266 16 18 +70 257 21 26 +103 269 9 12 +136 272 10 14 +160 265 16 17 +150 269 8 9 +166 286 17 22 +241 263 11 14 +113 290 10 12 +258 262 4 5 +269 262 26 25 +292 266 11 17 +362 264 23 27 +410 249 8 10 +426 296 4 5 +432 289 3 4 +447 261 11 16 +508 267 8 13 +574 283 26 30 +602 258 9 10 +538 252 12 14 +525 257 5 6 +681 246 22 26 +673 256 5 7 +645 250 4 8 +641 295 6 6 +645 298 3 5 +568 258 3 4 +628 297 4 7 +706 254 6 5 +784 258 4 6 +839 278 24 28 +748 312 5 8 +772 303 4 5 +730 259 5 6 +771 262 5 6 +782 260 4 5 +766 259 3 5 +971 266 46 43 +947 264 18 22 +880 257 18 15 +821 260 5 5 +723 360 22 29 +928 255 7 9 +916 253 8 10 +315 264 5 7 +220 273 11 11 +222 252 10 14 +15 330 11 12 +# 2--Demonstration/2_Demonstration_Political_Rally_2_823.jpg +573 363 51 60 +264 143 68 82 +690 251 17 22 +705 249 17 25 +# 2--Demonstration/2_Demonstration_Political_Rally_2_545.jpg +906 143 77 106 +924 97 76 83 +784 147 34 48 +713 145 48 49 +675 196 49 53 +658 154 43 47 +590 143 35 37 +534 164 40 41 +480 147 52 58 +451 109 60 68 +328 83 66 68 +326 133 44 46 +290 139 32 37 +265 162 21 23 +190 90 56 59 +111 77 71 86 +588 72 31 33 +569 171 23 23 +# 2--Demonstration/2_Demonstration_Political_Rally_2_609.jpg +963 617 18 28 +805 544 26 29 +766 567 24 30 +932 572 20 28 +888 596 22 27 +886 542 23 29 +852 559 19 30 +844 520 22 24 +921 493 18 22 +981 490 19 23 +846 487 19 22 +884 660 31 20 +931 646 25 28 +769 497 18 16 +885 461 19 18 +961 451 16 18 +988 468 16 16 +1014 477 10 23 +829 476 18 18 +901 476 16 14 +936 469 16 15 +778 466 21 20 +753 453 18 17 +714 463 18 20 +737 478 19 20 +737 405 15 16 +787 440 17 17 +689 413 16 18 +633 393 18 19 +675 404 15 16 +710 407 12 14 +770 412 14 15 +749 373 14 16 +719 429 14 15 +671 458 21 22 +614 434 17 19 +641 436 17 18 +736 619 30 33 +768 599 21 29 +742 540 21 25 +725 518 23 21 +535 343 11 11 +508 341 14 13 +491 390 13 15 +460 366 13 15 +418 395 14 14 +447 345 10 11 +440 405 14 15 +408 420 13 14 +384 403 12 14 +436 436 15 15 +587 386 15 15 +554 364 13 14 +607 326 13 14 +531 404 18 19 +962 413 20 19 +905 433 19 21 +939 377 18 19 +997 352 15 17 +997 379 17 18 +966 350 16 16 +813 332 12 14 +759 326 13 14 +806 361 14 15 +800 299 12 15 +804 281 11 12 +769 300 12 14 +717 301 11 13 +820 313 11 11 +391 366 11 12 +413 360 12 11 +319 374 14 14 +342 362 9 12 +391 335 11 12 +302 363 12 13 +279 386 13 15 +641 351 13 14 +663 324 12 14 +263 370 11 12 +256 353 11 12 +360 388 11 15 +325 336 12 13 +111 250 93 111 +158 152 26 30 +350 327 10 10 +330 294 10 10 +351 287 8 9 +405 296 11 15 +361 279 8 8 +712 333 9 10 +735 309 11 12 +796 284 11 13 +816 407 15 14 +878 362 13 12 +786 389 16 10 +851 397 13 13 +867 419 13 15 +955 477 16 18 +496 306 11 13 +474 324 10 13 +487 343 14 16 +288 358 12 13 +471 346 11 13 +301 421 13 17 +542 321 12 17 +676 367 12 16 +570 346 10 13 +582 362 12 15 +553 350 11 12 +556 322 12 15 +762 530 19 22 +911 336 13 16 +998 283 12 14 +959 290 9 13 +976 310 11 15 +303 480 18 19 +287 329 11 14 +413 382 15 17 +453 387 13 17 +479 391 14 15 +598 321 11 15 +411 441 17 21 +454 444 15 16 +456 284 10 14 +473 276 9 10 +846 609 18 28 +978 572 22 21 +957 542 22 23 +827 434 13 15 +884 388 16 19 +706 385 15 18 +865 477 17 18 +331 408 10 14 +615 391 13 17 +516 397 11 16 +363 311 9 11 +613 368 11 13 +558 440 18 21 +755 415 10 12 +# 2--Demonstration/2_Demonstration_Political_Rally_2_172.jpg +390 256 355 522 +# 2--Demonstration/2_Demonstration_Political_Rally_2_391.jpg +673 265 116 141 +583 360 54 63 +589 393 37 50 +983 222 41 100 +867 656 157 112 +413 617 150 147 +314 104 94 107 +21 109 81 122 +536 388 24 35 +# 2--Demonstration/2_Demonstration_Protesters_2_714.jpg +188 406 64 94 +396 476 60 72 +626 376 62 86 +# 2--Demonstration/2_Demonstration_Political_Rally_2_960.jpg +706 240 63 84 +543 288 50 53 +633 321 26 27 +591 312 26 37 +585 786 37 49 +206 209 144 179 +111 271 52 61 +396 275 60 67 +0 249 62 124 +# 20--Family_Group/20_Family_Group_Family_Group_20_108.jpg +254 66 66 92 +104 320 72 100 +282 326 106 84 +456 220 62 88 +684 190 70 102 +820 116 72 98 +726 390 76 100 +# 20--Family_Group/20_Family_Group_Family_Group_20_360.jpg +389 228 58 61 +486 308 53 67 +532 264 66 79 +732 272 69 89 +# 20--Family_Group/20_Family_Group_Family_Group_20_326.jpg +870 339 23 23 +422 378 21 24 +148 219 34 34 +220 223 28 35 +345 236 25 34 +394 250 25 31 +437 240 26 29 +452 280 25 29 +515 253 20 25 +631 200 37 40 +701 229 34 36 +816 229 24 29 +859 204 29 33 +805 291 25 27 +# 20--Family_Group/20_Family_Group_Family_Group_20_1003.jpg +192 66 132 152 +336 88 142 200 +480 102 132 212 +628 148 100 148 +# 20--Family_Group/20_Family_Group_Family_Group_20_760.jpg +278 278 74 100 +458 176 74 102 +638 136 76 104 +# 20--Family_Group/20_Family_Group_Family_Group_20_579.jpg +142 166 68 92 +338 172 54 88 +454 178 58 88 +556 204 74 100 +650 128 60 102 +776 130 64 86 +862 186 74 104 +236 452 68 88 +488 412 58 98 +726 398 66 86 +# 20--Family_Group/20_Family_Group_Family_Group_20_483.jpg +121 427 77 98 +204 318 65 92 +348 154 81 119 +687 237 75 106 +812 250 83 98 +512 156 81 114 +# 20--Family_Group/20_Family_Group_Family_Group_20_1037.jpg +0 170 83 145 +140 121 76 118 +219 43 73 112 +343 45 70 100 +568 48 64 84 +759 104 67 77 +968 108 56 129 +913 137 72 98 +# 20--Family_Group/20_Family_Group_Family_Group_20_672.jpg +110 951 24 27 +351 1028 28 32 +254 1025 25 34 +163 1032 28 34 +869 313 43 50 +801 290 39 47 +755 352 40 47 +688 280 44 44 +748 234 37 39 +682 243 41 42 +641 240 35 44 +609 217 35 38 +679 187 24 27 +714 186 29 40 +663 144 34 37 +564 276 36 44 +587 344 37 45 +476 270 38 49 +508 206 36 41 +538 213 28 39 +436 158 34 40 +423 270 40 41 +403 231 29 38 +362 230 31 31 +359 279 37 41 +370 349 42 48 +277 296 36 43 +310 258 33 39 +284 216 31 35 +305 147 32 39 +214 311 41 46 +191 229 35 42 +144 276 34 42 +103 275 42 49 +238 492 38 51 +168 498 37 38 +109 493 37 44 +573 490 40 51 +854 849 27 34 +827 847 25 30 +810 903 24 32 +743 894 23 30 +772 885 15 24 +735 846 25 32 +676 870 29 30 +652 853 23 31 +608 848 25 31 +568 880 29 28 +555 829 25 29 +818 1042 27 33 +739 1048 27 31 +644 1035 28 37 +506 949 19 25 +506 1016 33 32 +449 1028 28 36 +496 842 25 32 +458 831 22 33 +429 843 25 32 +396 876 23 30 +355 850 27 34 +306 826 26 30 +309 870 29 30 +263 876 27 31 +233 867 21 29 +205 865 28 31 +134 874 22 29 +88 864 28 28 +# 20--Family_Group/20_Family_Group_Family_Group_20_72.jpg +738 150 42 50 +581 274 48 59 +519 226 51 52 +475 174 53 53 +424 265 48 51 +387 323 44 52 +270 306 35 29 +328 204 43 50 +283 205 24 37 +211 230 42 59 +706 218 44 51 +# 20--Family_Group/20_Family_Group_Family_Group_20_282.jpg +60 356 35 51 +139 339 40 38 +272 301 31 41 +340 284 27 28 +389 271 22 32 +465 267 21 27 +584 300 24 20 +700 265 27 35 +814 301 32 28 +246 376 20 51 +# 20--Family_Group/20_Family_Group_Family_Group_20_648.jpg +106 498 118 146 +326 484 94 136 +452 474 100 142 +586 284 92 146 +726 178 124 182 +# 20--Family_Group/20_Family_Group_Family_Group_20_843.jpg +141 329 180 255 +520 166 161 257 +371 758 174 244 +753 675 196 244 +581 1088 149 188 +# 20--Family_Group/20_Family_Group_Family_Group_20_696.jpg +111 391 83 108 +270 283 76 127 +369 410 99 105 +483 324 73 111 +642 308 73 111 +798 328 80 105 +560 572 76 89 +# 20--Family_Group/20_Family_Group_Family_Group_20_739.jpg +372 232 134 192 +528 204 124 176 +# 20--Family_Group/20_Family_Group_Family_Group_20_339.jpg +30 96 44 47 +145 168 40 35 +239 140 41 41 +363 133 38 42 +449 177 31 34 +521 193 34 38 +618 186 31 34 +698 170 35 36 +767 153 38 41 +855 110 44 40 +666 217 22 25 +834 178 24 25 +# 20--Family_Group/20_Family_Group_Family_Group_20_227.jpg +300 206 122 182 +638 172 132 206 +476 478 106 124 +# 20--Family_Group/20_Family_Group_Family_Group_20_427.jpg +824 93 50 65 +734 266 41 45 +578 224 41 43 +461 354 38 39 +363 169 41 47 +244 178 41 46 +94 160 48 54 +# 20--Family_Group/20_Family_Group_Family_Group_20_33.jpg +87 335 36 37 +123 299 33 41 +166 299 33 37 +219 337 30 40 +251 310 33 37 +327 289 27 34 +361 335 27 34 +437 309 27 32 +470 319 28 37 +550 343 25 28 +553 378 37 46 +262 401 35 42 +352 449 35 41 +456 400 30 37 +592 412 35 44 +702 405 31 39 +635 342 26 29 +663 310 27 33 +745 389 27 34 +772 347 28 30 +830 346 33 38 +791 410 39 42 +898 336 34 39 +# 20--Family_Group/20_Family_Group_Family_Group_20_101.jpg +38 66 41 49 +135 93 37 41 +229 66 36 44 +384 30 36 47 +492 52 37 46 +555 76 37 39 +637 77 38 44 +732 82 43 46 +845 101 40 48 +971 60 47 56 +894 293 51 60 +784 274 43 56 +667 260 42 58 +562 247 39 45 +203 213 52 62 +72 271 44 51 +# 20--Family_Group/20_Family_Group_Family_Group_20_90.jpg +692 620 92 138 +450 548 76 92 +164 658 62 46 +442 28 92 164 +# 20--Family_Group/20_Family_Group_Family_Group_20_453.jpg +450 444 133 150 +# 20--Family_Group/20_Family_Group_Family_Group_20_775.jpg +71 335 32 37 +149 367 25 29 +342 375 22 31 +629 308 33 34 +423 395 212 255 +291 607 199 227 +851 457 22 28 +# 20--Family_Group/20_Family_Group_Family_Group_20_636.jpg +193 131 14 23 +268 111 17 23 +301 92 15 18 +348 103 13 17 +390 112 15 14 +431 120 13 15 +465 105 15 16 +509 107 15 16 +554 99 14 13 +621 110 16 16 +664 120 14 17 +732 109 15 17 +713 88 14 16 +775 128 11 21 +805 84 15 15 +839 81 13 24 +886 98 20 20 +947 68 17 20 +# 20--Family_Group/20_Family_Group_Family_Group_20_193.jpg +316 800 115 132 +617 858 120 155 +562 843 80 161 +# 20--Family_Group/20_Family_Group_Family_Group_20_277.jpg +156 192 55 57 +271 130 46 58 +385 214 44 45 +587 256 44 52 +746 157 66 77 +1000 182 24 44 +# 20--Family_Group/20_Family_Group_Family_Group_20_849.jpg +382 484 54 76 +524 528 70 72 +# 20--Family_Group/20_Family_Group_Family_Group_20_411.jpg +365 73 58 70 +527 99 65 68 +675 87 65 66 +763 409 50 53 +600 393 54 56 +481 399 57 57 +321 414 57 55 +222 404 50 57 +# 20--Family_Group/20_Family_Group_Family_Group_20_22.jpg +760 270 33 38 +714 285 31 35 +640 292 28 38 +620 234 32 35 +544 245 25 29 +468 229 31 32 +409 260 23 27 +345 256 28 33 +310 247 30 35 +248 283 31 32 +243 324 22 22 +178 291 29 34 +610 526 22 24 +# 20--Family_Group/20_Family_Group_Family_Group_20_750.jpg +351 75 168 252 +563 200 171 247 +# 20--Family_Group/20_Family_Group_Family_Group_20_599.jpg +391 272 89 93 +678 490 85 98 +640 644 72 67 +410 675 72 74 +216 687 77 88 +# 20--Family_Group/20_Family_Group_Family_Group_20_412.jpg +668 284 53 75 +411 175 67 85 +255 163 66 87 +# 20--Family_Group/20_Family_Group_Family_Group_20_100.jpg +162 150 70 90 +306 156 72 92 +494 172 86 100 +688 194 82 106 +874 36 102 134 +# 20--Family_Group/20_Family_Group_Family_Group_20_540.jpg +47 280 37 43 +129 235 36 43 +31 236 35 34 +190 251 35 43 +252 238 35 42 +318 271 28 37 +331 244 29 34 +383 330 28 38 +394 241 30 38 +458 252 28 37 +499 227 32 39 +572 240 30 34 +655 218 29 37 +707 209 31 38 +551 318 28 27 +574 349 27 32 +576 492 33 36 +744 556 32 32 +1 542 33 45 +139 554 42 43 +278 533 41 46 +339 557 33 34 +639 579 33 27 +460 514 34 34 +757 256 29 34 +779 217 32 34 +812 229 33 38 +895 234 35 41 +971 250 36 35 +# 20--Family_Group/20_Family_Group_Family_Group_20_64.jpg +48 142 56 54 +276 31 41 44 +249 179 41 43 +318 165 45 50 +489 175 50 55 +655 129 50 58 +340 287 50 50 +464 325 59 82 +308 331 66 74 +176 437 55 61 +173 312 51 56 +150 235 46 42 +5 377 62 61 +617 395 70 90 +758 494 75 76 +859 490 41 39 +720 340 58 59 +541 171 41 48 +613 327 48 46 +754 286 53 53 +800 60 43 45 +984 211 22 36 +888 315 50 55 +# 20--Family_Group/20_Family_Group_Family_Group_20_255.jpg +604 436 59 55 +489 419 63 68 +308 446 54 62 +140 451 64 67 +199 346 45 43 +370 311 46 42 +516 132 49 55 +660 237 46 50 +787 226 51 52 +891 379 64 62 +745 389 59 68 +# 20--Family_Group/20_Family_Group_Family_Group_20_87.jpg +465 384 90 132 +321 435 72 78 +198 517 60 81 +628 507 78 111 +730 541 84 105 +# 20--Family_Group/20_Family_Group_Family_Group_20_799.jpg +902 492 86 101 +545 176 99 132 +846 363 14 16 +352 348 7 7 +433 350 8 10 +390 362 27 33 +516 508 109 139 +114 633 80 93 +60 314 10 14 +305 346 4 6 +253 344 6 9 +234 335 6 10 +190 333 5 7 +170 335 6 8 +85 169 15 22 +93 168 18 25 +# 20--Family_Group/20_Family_Group_Family_Group_20_663.jpg +781 261 37 48 +653 194 41 43 +526 188 50 54 +543 328 43 48 +481 300 33 36 +338 245 44 56 +235 294 45 51 +# 20--Family_Group/20_Family_Group_Family_Group_20_272.jpg +116 41 47 56 +276 30 49 59 +384 93 42 58 +481 53 42 63 +548 37 48 65 +654 86 43 58 +727 26 41 62 +832 47 44 56 +# 20--Family_Group/20_Family_Group_Family_Group_20_730.jpg +128 136 196 248 +398 178 180 260 +542 210 200 274 +# 20--Family_Group/20_Family_Group_Family_Group_20_374.jpg +91 331 322 456 +483 27 270 362 +# 20--Family_Group/20_Family_Group_Family_Group_20_1026.jpg +220 235 75 75 +368 285 60 88 +591 250 55 68 +678 233 73 115 +473 483 65 88 +203 453 63 85 +# 20--Family_Group/20_Family_Group_Family_Group_20_387.jpg +283 346 118 168 +491 263 100 138 +463 456 108 150 +709 328 108 128 +588 661 108 130 +# 20--Family_Group/20_Family_Group_Family_Group_20_556.jpg +127 246 50 60 +183 241 39 47 +238 301 40 37 +270 331 46 47 +351 320 32 31 +354 267 46 38 +431 261 44 55 +503 284 39 43 +623 273 42 49 +673 259 36 43 +779 270 33 31 +744 239 41 46 +931 245 44 43 +414 365 47 43 +605 439 41 42 +703 501 40 42 +729 360 42 47 +211 498 44 45 +243 561 46 47 +120 316 47 55 +# 20--Family_Group/20_Family_Group_Family_Group_20_294.jpg +386 316 30 32 +465 307 30 34 +382 225 31 43 +396 187 31 36 +484 210 33 39 +534 182 31 33 +584 229 28 34 +654 221 30 33 +698 199 31 34 +563 280 32 34 +677 262 32 34 +550 364 29 32 +652 366 29 32 +743 250 30 38 +790 193 32 42 +775 278 35 34 +842 257 31 36 +174 183 31 39 +116 269 33 28 +214 238 38 39 +279 221 34 41 +135 392 36 44 +238 338 30 32 +350 233 37 45 +# 20--Family_Group/20_Family_Group_Family_Group_20_914.jpg +859 175 31 32 +815 164 29 33 +628 230 25 29 +604 375 24 24 +526 290 25 33 +589 232 25 31 +460 206 28 35 +387 317 28 38 +263 260 23 28 +229 199 29 30 +160 121 24 29 +147 173 24 26 +# 20--Family_Group/20_Family_Group_Family_Group_20_835.jpg +741 275 37 40 +180 272 33 34 +72 336 35 54 +3 287 35 39 +839 308 21 45 +799 121 14 16 +880 122 30 36 +# 20--Family_Group/20_Family_Group_Family_Group_20_447.jpg +394 512 166 204 +196 310 304 242 +522 372 252 256 +170 106 268 192 +438 32 208 300 +642 200 198 188 +# 20--Family_Group/20_Family_Group_Family_Group_20_318.jpg +180 160 72 104 +424 44 78 108 +750 272 66 94 +872 200 106 134 +# 20--Family_Group/20_Family_Group_Family_Group_20_493.jpg +18 188 45 48 +222 178 33 33 +248 140 42 49 +220 310 52 50 +379 124 48 55 +566 105 40 61 +619 150 48 56 +754 162 46 53 +877 123 29 57 +972 182 37 44 +586 463 23 50 +# 20--Family_Group/20_Family_Group_Family_Group_20_109.jpg +179 29 60 84 +291 160 63 79 +404 92 64 87 +450 150 50 62 +# 20--Family_Group/20_Family_Group_Family_Group_20_27.jpg +909 335 65 84 +786 187 76 104 +718 230 59 82 +595 315 72 96 +342 392 53 82 +302 152 66 96 +178 136 79 108 +522 41 58 72 +# 20--Family_Group/20_Family_Group_Family_Group_20_759.jpg +103 259 49 56 +308 295 50 55 +445 312 44 47 +573 305 46 48 +602 386 38 40 +759 236 51 56 +798 107 44 48 +657 125 43 50 +502 89 38 46 +359 73 46 49 +222 165 39 45 +97 113 47 48 +# 20--Family_Group/20_Family_Group_Family_Group_20_1015.jpg +222 363 26 28 +382 279 71 105 +516 351 88 101 +647 333 98 100 +987 243 14 22 +# 20--Family_Group/20_Family_Group_Family_Group_20_544.jpg +147 113 66 82 +256 58 75 83 +385 133 64 79 +558 90 67 90 +703 63 73 88 +851 109 69 90 +695 257 71 82 +637 325 60 68 +# 20--Family_Group/20_Family_Group_Family_Group_20_702.jpg +754 344 60 64 +658 248 72 87 +518 399 54 57 +275 239 75 85 +# 20--Family_Group/20_Family_Group_Family_Group_20_62.jpg +106 112 39 40 +189 170 41 43 +240 129 33 40 +305 180 38 41 +335 135 34 41 +366 210 28 27 +416 141 34 42 +447 170 35 43 +540 173 31 40 +517 137 31 41 +443 251 37 39 +627 173 35 42 +624 132 31 40 +717 142 33 43 +724 241 27 27 +763 190 36 43 +838 155 35 41 +866 207 39 48 +645 311 33 35 +559 312 34 39 +631 389 29 34 +241 222 38 45 +171 295 25 25 +# 21--Festival/21_Festival_Festival_21_491.jpg +444 523 207 255 +# 21--Festival/21_Festival_Festival_21_943.jpg +978 210 5 8 +942 200 5 7 +881 178 9 16 +829 196 10 13 +805 192 9 13 +758 188 9 11 +740 191 8 13 +709 198 10 12 +570 195 5 9 +563 189 7 8 +551 185 6 11 +541 192 8 10 +517 190 9 11 +495 195 6 7 +421 193 7 11 +390 188 5 10 +355 181 9 11 +324 195 7 11 +229 188 7 10 +190 197 10 12 +159 201 9 10 +171 195 9 8 +114 196 9 14 +102 180 10 15 +68 198 11 12 +294 193 16 17 +242 245 7 9 +488 194 5 9 +# 21--Festival/21_Festival_Festival_21_22.jpg +30 317 9 10 +266 361 10 14 +304 317 8 9 +261 272 7 7 +322 266 7 8 +350 380 14 15 +209 492 37 50 +312 491 25 39 +332 439 20 22 +353 448 16 24 +394 272 6 6 +411 269 5 6 +451 268 7 7 +526 348 11 14 +881 456 15 23 +914 399 12 18 +910 420 12 17 +932 421 19 21 +886 376 9 12 +972 496 19 23 +1006 484 15 22 +709 195 6 6 +625 207 8 7 +429 204 6 8 +579 212 6 5 +276 425 15 24 +197 276 10 12 +# 21--Festival/21_Festival_Festival_21_193.jpg +206 440 33 43 +32 493 28 62 +172 390 15 25 +130 377 14 18 +207 382 16 24 +268 388 11 18 +277 378 14 22 +376 403 18 28 +394 450 17 47 +550 405 9 23 +622 436 26 43 +716 457 23 38 +818 427 22 31 +793 381 9 16 +801 347 11 15 +955 358 14 14 +990 404 14 24 +962 328 8 10 +# 21--Festival/21_Festival_Festival_21_785.jpg +328 422 277 337 +# 21--Festival/21_Festival_Festival_21_225.jpg +383 446 46 62 +667 215 46 60 +621 466 54 67 +747 281 48 63 +844 307 55 66 +822 281 7 10 +965 232 9 14 +423 233 9 11 +# 21--Festival/21_Festival_Festival_21_601.jpg +16 330 18 21 +304 337 26 24 +403 353 26 26 +512 368 16 21 +645 350 16 21 +768 336 16 25 +871 336 17 22 +933 321 24 29 +# 21--Festival/21_Festival_Festival_21_741.jpg +354 363 123 213 +# 21--Festival/21_Festival_Festival_21_777.jpg +497 477 15 16 +575 473 18 18 +670 485 19 20 +900 519 19 21 +# 21--Festival/21_Festival_Festival_21_664.jpg +211 182 255 390 +615 238 279 404 +# 21--Festival/21_Festival_Festival_21_373.jpg +79 183 37 51 +403 246 24 22 +398 203 17 22 +550 224 13 14 +598 273 26 25 +646 196 8 10 +830 171 18 21 +848 187 12 16 +871 182 17 20 +858 220 21 22 +935 170 19 19 +686 223 15 13 +566 192 9 11 +727 188 8 11 +747 191 9 11 +803 186 7 10 +# 21--Festival/21_Festival_Festival_21_100.jpg +668 84 142 190 +442 432 120 168 +108 294 62 84 +# 21--Festival/21_Festival_Festival_21_797.jpg +963 146 15 42 +836 166 40 47 +778 285 43 62 +743 219 28 37 +663 69 31 45 +503 265 46 60 +641 138 105 127 +418 223 53 60 +401 194 24 42 +286 202 26 37 +291 241 50 57 +313 325 55 63 +186 84 19 37 +173 293 41 50 +95 340 51 54 +85 248 48 57 +115 273 30 40 +205 225 16 34 +44 186 19 44 +# 21--Festival/21_Festival_Festival_21_513.jpg +214 118 75 102 +511 111 74 92 +760 114 39 90 +62 166 31 44 +370 152 13 30 +648 212 27 38 +986 368 18 23 +# 21--Festival/21_Festival_Festival_21_811.jpg +877 271 32 30 +751 258 28 32 +595 266 34 32 +233 400 32 22 +95 351 35 36 +559 363 13 19 +662 366 18 16 +827 333 11 16 +488 274 30 30 +# 21--Festival/21_Festival_Festival_21_340.jpg +170 248 7 9 +193 243 8 10 +214 226 6 8 +207 259 11 16 +259 266 13 11 +233 240 9 12 +269 281 10 14 +295 263 14 17 +289 235 8 11 +304 229 7 11 +336 228 7 9 +361 266 12 15 +334 255 11 15 +351 247 8 13 +381 238 7 11 +400 254 10 12 +429 287 13 18 +403 325 15 24 +341 290 11 12 +417 235 8 11 +434 226 7 11 +458 235 6 12 +482 234 8 10 +471 249 9 9 +462 265 12 11 +566 266 10 14 +594 257 9 13 +621 260 11 9 +660 332 13 18 +661 279 11 14 +703 291 11 18 +709 240 7 10 +742 246 7 10 +633 221 5 6 +793 210 60 107 +784 278 5 7 +774 302 6 7 +763 260 5 7 +774 270 3 4 +16 358 10 39 +17 348 18 26 +56 356 17 28 +73 348 18 24 +96 326 16 14 +54 298 12 16 +76 275 8 11 +98 283 11 13 +124 272 9 14 +141 259 9 12 +125 261 7 8 +171 265 10 14 +179 286 10 12 +187 284 11 13 +165 330 15 16 +170 372 20 27 +223 337 16 26 +236 335 14 26 +253 356 15 26 +162 248 6 10 +502 170 5 6 +664 239 7 10 +767 281 4 7 +657 259 7 7 +479 264 8 10 +447 256 8 13 +289 295 13 18 +219 299 9 15 +# 21--Festival/21_Festival_Festival_21_354.jpg +272 142 455 561 +# 21--Festival/21_Festival_Festival_21_881.jpg +519 49 220 262 +# 21--Festival/21_Festival_Festival_21_201.jpg +40 376 34 49 +217 425 21 28 +323 401 32 41 +292 345 18 22 +391 369 27 31 +437 339 17 28 +398 338 14 27 +502 349 23 28 +553 312 20 28 +610 330 18 20 +642 295 16 24 +645 326 18 24 +680 296 26 38 +717 381 24 28 +770 310 38 47 +905 309 15 23 +918 281 17 22 +942 302 22 34 +277 363 13 19 +# 21--Festival/21_Festival_Festival_21_395.jpg +128 514 15 16 +308 425 31 36 +73 568 19 26 +407 267 32 38 +548 128 29 37 +514 280 41 52 +687 267 33 42 +560 398 39 54 +763 417 34 40 +665 504 15 16 +419 592 15 13 +921 479 13 17 +978 505 17 24 +1004 524 14 14 +959 491 15 21 +996 504 15 21 +0 496 16 26 +54 491 13 17 +20 466 12 16 +# 21--Festival/21_Festival_Festival_21_414.jpg +595 330 44 92 +285 341 24 47 +167 325 31 32 +39 355 46 42 +135 334 43 42 +# 21--Festival/21_Festival_Festival_21_604.jpg +660 494 8 16 +764 556 16 22 +698 579 9 13 +9 544 18 23 +109 474 10 13 +64 478 11 15 +46 408 12 16 +103 410 8 11 +419 445 8 10 +300 596 11 16 +397 440 7 8 +392 429 8 9 +400 482 7 9 +412 465 6 9 +244 430 6 9 +152 565 16 19 +23 449 9 12 +330 429 6 11 +337 425 9 13 +368 480 9 13 +376 475 9 10 +479 464 7 14 +34 566 9 23 +56 563 17 23 +149 424 12 13 +200 495 13 16 +191 512 9 14 +199 541 12 15 +277 623 13 20 +731 584 15 20 +792 535 15 16 +746 608 11 20 +140 493 13 16 +133 550 13 15 +256 514 9 13 +289 504 9 13 +344 497 9 13 +295 470 7 12 +277 480 7 10 +344 471 8 10 +307 446 12 14 +354 451 12 12 +392 484 8 11 +221 512 9 15 +295 497 10 15 +860 511 12 16 +705 479 7 10 +764 447 7 11 +842 449 7 10 +896 459 10 13 +798 649 16 21 +899 658 16 21 +921 537 13 12 +974 503 9 13 +951 523 10 18 +872 463 8 11 +864 495 9 14 +895 486 9 14 +960 534 10 20 +959 643 15 24 +1002 454 8 11 +987 474 10 14 +907 489 9 12 +54 448 9 16 +219 454 9 15 +206 436 9 12 +245 464 9 12 +177 493 9 11 +629 444 10 13 +662 458 11 13 +684 460 9 12 +689 507 11 15 +715 511 11 13 +764 556 14 20 +717 581 13 19 +8 478 8 16 +10 476 10 15 +63 506 8 14 +56 507 9 15 +27 471 13 7 +83 477 10 11 +167 557 9 17 +335 523 14 14 +243 558 9 20 +247 584 13 17 +263 559 10 16 +262 452 7 9 +506 442 7 11 +26 669 24 10 +20 632 13 20 +71 590 16 24 +134 588 14 18 +127 618 9 17 +27 495 13 16 +39 481 12 13 +46 469 10 12 +209 596 14 19 +383 502 10 10 +255 483 9 14 +4 412 8 11 +34 450 6 11 +83 414 9 12 +192 431 14 14 +219 454 9 15 +206 436 9 13 +248 464 7 12 +162 628 23 24 +203 626 26 28 +111 413 9 10 +925 588 13 21 +104 538 12 17 +960 441 7 11 +699 551 10 15 +235 536 14 13 +139 475 11 12 +157 451 8 12 +# 21--Festival/21_Festival_Festival_21_462.jpg +413 149 119 176 +# 21--Festival/21_Festival_Festival_21_378.jpg +62 286 10 13 +89 271 13 15 +30 210 10 14 +47 215 10 11 +61 210 7 9 +80 215 10 9 +98 210 7 8 +122 209 7 10 +127 231 9 11 +137 211 9 12 +149 211 6 9 +172 214 10 14 +192 214 10 11 +210 204 8 11 +225 210 8 8 +233 224 9 11 +255 207 8 8 +259 218 8 9 +283 204 9 11 +288 211 11 14 +300 208 10 11 +336 209 7 8 +351 217 9 8 +364 209 8 9 +367 217 9 13 +376 207 8 10 +383 220 9 12 +310 264 9 9 +396 223 7 7 +404 219 7 11 +446 213 9 13 +426 277 11 9 +478 215 10 12 +486 222 6 8 +493 217 7 10 +519 219 9 11 +546 228 9 11 +581 216 8 10 +597 226 7 10 +630 224 9 10 +636 244 21 25 +681 228 9 11 +682 276 8 9 +691 216 9 10 +708 223 9 10 +718 220 10 11 +763 295 10 12 +743 214 11 14 +780 215 8 13 +821 219 9 13 +838 227 10 11 +859 222 10 12 +872 226 10 13 +896 226 9 10 +913 230 10 16 +947 226 8 11 +968 219 11 12 +996 238 11 11 +837 275 14 11 +896 276 22 25 +3 193 12 12 +899 241 9 10 +409 207 6 8 +# 21--Festival/21_Festival_Festival_21_42.jpg +104 1332 16 20 +79 1327 12 15 +44 1323 12 14 +42 1338 13 15 +35 1353 16 13 +31 1337 8 15 +141 1279 14 16 +78 1276 12 15 +39 1282 14 18 +39 1269 13 11 +62 1269 12 16 +79 1255 10 11 +160 1268 12 15 +114 1256 10 15 +113 1234 11 13 +101 1244 12 13 +81 1228 11 15 +64 1228 13 17 +50 1238 10 10 +50 1212 12 15 +206 1281 8 15 +127 1228 10 14 +175 1226 12 12 +160 1243 11 12 +18 1167 14 14 +10 1190 7 16 +97 1169 15 15 +124 1186 9 12 +142 1175 11 14 +20 1106 10 13 +145 1151 13 14 +108 1115 13 14 +43 1095 10 13 +41 1072 8 10 +54 1061 8 9 +16 1053 7 8 +12 1051 5 12 +24 1044 9 9 +92 1093 10 11 +95 1071 9 12 +82 1053 8 10 +94 1047 8 11 +112 1057 10 10 +118 1052 8 9 +121 1050 9 9 +129 1076 9 11 +138 1065 9 11 +127 1099 10 12 +160 1088 10 13 +151 1119 9 12 +181 1138 9 13 +195 1137 9 12 +200 1130 12 12 +168 1203 12 16 +193 1155 9 12 +220 1123 9 11 +175 1113 10 12 +179 1111 8 9 +197 1086 9 12 +164 1070 8 10 +179 1069 9 11 +193 1069 11 12 +166 1051 10 12 +173 1040 10 14 +204 1066 10 13 +209 1057 10 13 +200 1047 10 11 +229 1055 11 12 +226 1041 8 12 +211 1038 10 10 +237 1032 9 10 +240 1050 10 12 +247 1044 11 13 +260 1036 10 11 +160 1189 16 16 +43 972 8 9 +59 967 10 11 +65 968 9 8 +74 962 7 8 +75 1029 7 8 +103 1018 9 13 +118 1014 8 9 +132 1011 8 9 +153 1020 8 10 +104 995 9 11 +192 1032 8 10 +208 1017 8 11 +182 1024 8 10 +219 1018 6 10 +238 1017 9 8 +256 1010 8 10 +258 992 8 8 +208 984 9 9 +220 979 7 9 +183 997 5 10 +104 953 10 9 +200 970 7 7 +32 828 6 7 +35 797 5 7 +259 962 6 10 +264 964 8 7 +233 953 9 10 +254 946 8 8 +259 950 5 6 +272 954 9 8 +230 935 8 8 +241 937 7 7 +230 941 4 5 +204 938 8 8 +193 958 8 10 +193 911 5 11 +201 914 8 7 +257 920 8 9 +311 931 10 13 +268 903 6 7 +259 903 6 7 +286 897 8 10 +279 884 7 10 +260 870 8 8 +290 875 6 6 +210 870 6 8 +144 872 8 7 +304 864 7 7 +279 854 6 6 +228 842 7 6 +264 836 7 8 +277 832 9 9 +301 850 8 8 +310 847 8 6 +328 830 7 8 +343 832 6 5 +308 821 7 8 +297 819 8 6 +267 826 5 6 +255 821 6 7 +246 820 6 6 +224 821 8 6 +179 821 5 8 +215 848 8 8 +200 858 8 6 +289 826 6 7 +281 818 6 8 +288 815 6 7 +298 809 7 6 +305 808 6 7 +312 801 7 8 +286 807 7 7 +280 811 7 6 +260 808 5 7 +264 801 6 5 +258 796 6 6 +216 788 6 8 +277 794 7 8 +296 793 7 7 +290 794 6 7 +324 790 6 6 +330 781 6 10 +290 784 5 5 +233 761 7 8 +218 771 9 7 +228 757 6 10 +258 758 7 6 +328 820 7 6 +538 1287 13 15 +308 1321 9 14 +386 1299 10 14 +552 1326 10 16 +900 1325 17 20 +875 1331 14 17 +797 1348 12 15 +772 1319 13 15 +800 1288 13 18 +753 1317 14 13 +725 1343 11 14 +698 1347 16 17 +668 1349 11 13 +626 1338 11 15 +661 1333 13 13 +697 1320 14 18 +703 1292 13 17 +670 1289 10 19 +656 1308 11 12 +769 1281 13 16 +726 1277 12 15 +683 1273 12 13 +638 1260 13 17 +664 1264 12 13 +602 1269 12 13 +630 1235 13 18 +627 1234 9 13 +660 1255 9 12 +715 1256 12 12 +750 1265 11 13 +774 1222 12 19 +710 1219 10 14 +672 1226 8 15 +614 1228 11 13 +592 1208 13 14 +573 1203 13 13 +626 1196 12 14 +653 1195 12 17 +677 1208 9 13 +699 1195 10 11 +742 1194 12 13 +764 1196 12 18 +728 1178 12 16 +642 1175 11 15 +594 1164 13 15 +645 1154 10 12 +666 1159 8 15 +722 1168 11 14 +668 1140 9 12 +720 1121 10 13 +733 1107 7 9 +752 1096 8 11 +686 1102 12 16 +705 1103 7 8 +696 1075 9 15 +680 1073 11 13 +657 1063 9 9 +636 1071 10 10 +608 1080 10 9 +638 1110 9 10 +594 1080 9 12 +687 1050 11 12 +646 1044 11 14 +607 1043 10 13 +590 1055 7 8 +628 1026 10 8 +574 1024 9 8 +571 1004 9 8 +586 1006 8 10 +604 1006 9 10 +624 1012 10 11 +690 1022 8 12 +701 1011 7 8 +689 1000 8 12 +645 998 11 13 +606 994 7 8 +620 979 9 10 +633 982 7 9 +597 987 9 11 +576 985 6 7 +557 980 9 8 +571 972 8 10 +580 955 7 8 +601 951 9 9 +614 962 8 8 +642 943 7 7 +665 945 8 8 +617 939 7 10 +630 928 8 9 +607 936 9 8 +591 922 8 8 +598 906 8 9 +610 911 7 7 +627 915 7 8 +633 916 6 7 +653 921 7 7 +640 922 7 9 +655 931 6 6 +594 886 7 7 +623 895 7 9 +631 900 8 9 +648 890 6 7 +639 887 7 7 +535 914 7 8 +543 865 8 9 +574 868 6 10 +589 867 5 7 +608 874 6 9 +600 877 7 6 +597 872 6 6 +635 870 6 8 +609 864 6 6 +584 832 7 9 +345 752 4 5 +352 747 4 5 +858 1225 12 13 +898 1203 14 21 +945 1155 11 14 +986 1162 10 13 +917 1137 11 13 +872 1174 9 13 +835 1091 12 16 +827 1090 12 12 +822 1063 9 11 +838 1055 10 13 +973 1047 10 15 +888 1027 8 11 +923 991 9 11 +792 1013 11 13 +811 1010 11 12 +828 1003 9 12 +845 975 8 11 +844 961 8 8 +829 950 8 10 +820 944 8 8 +872 906 8 9 +797 971 8 9 +789 964 7 9 +768 955 7 9 +789 950 7 9 +770 941 7 10 +747 951 8 9 +755 925 8 9 +748 914 7 10 +775 903 6 8 +801 874 6 7 +826 869 5 8 +741 903 6 7 +728 898 8 10 +712 897 6 8 +712 884 7 8 +733 878 6 8 +704 886 6 7 +721 869 6 8 +715 860 6 5 +698 844 7 8 +671 858 6 8 +726 833 6 8 +690 841 5 8 +683 859 6 7 +678 853 6 5 +671 848 5 6 +677 826 5 7 +637 843 6 6 +628 812 6 7 +647 816 5 5 +661 824 5 5 +694 826 6 7 +690 821 5 7 +693 808 6 7 +678 805 6 9 +663 793 6 7 +692 798 6 7 +704 788 5 5 +639 832 5 6 +644 831 5 6 +651 831 6 5 +654 822 5 7 +699 829 5 6 +704 814 5 7 +664 810 6 5 +620 789 5 6 +637 776 6 7 +587 771 4 5 +648 779 5 6 +675 775 4 5 +686 789 6 5 +646 760 5 5 +642 769 6 5 +365 995 9 11 +453 1009 7 10 +526 1003 9 8 +441 970 8 7 +510 812 5 6 +508 803 5 6 +497 807 4 6 +619 1161 10 16 +0 1329 10 14 +0 1266 6 17 +120 926 8 10 +111 917 7 10 +106 915 6 9 +102 905 8 10 +126 883 8 9 +100 875 6 8 +# 21--Festival/21_Festival_Festival_21_275.jpg +247 268 42 54 +686 232 10 13 +751 229 10 14 +860 345 8 9 +867 349 10 8 +883 344 7 9 +903 347 8 11 +899 350 4 6 +920 353 7 9 +926 349 6 5 +968 332 14 17 +964 336 8 13 +997 346 19 20 +688 107 9 11 +711 116 7 9 +656 97 7 9 +767 114 7 9 +789 99 9 11 +# 21--Festival/21_Festival_Festival_21_97.jpg +234 29 42 68 +504 576 39 52 +607 758 15 19 +135 603 48 53 +260 610 43 57 +# 21--Festival/21_Festival_Festival_21_218.jpg +102 154 41 59 +310 114 34 58 +315 192 34 39 +404 234 22 33 +540 206 29 52 +569 188 32 60 +634 172 35 42 +856 117 44 54 +946 146 38 43 +426 256 31 45 +530 248 30 35 +# 21--Festival/21_Festival_Festival_21_290.jpg +112 186 85 143 +297 206 65 130 +467 286 58 96 +552 264 63 98 +809 284 67 121 +# 21--Festival/21_Festival_Festival_21_107.jpg +83 237 65 96 +179 212 59 86 +349 205 38 60 +129 101 28 42 +470 195 43 67 +563 193 32 59 +594 181 48 73 +685 212 37 55 +720 211 33 59 +785 194 33 51 +841 185 19 31 +810 198 16 37 +882 191 17 28 +917 181 24 30 +941 188 21 22 +958 181 23 30 +997 188 23 25 +# 21--Festival/21_Festival_Festival_21_727.jpg +578 102 112 122 +# 21--Festival/21_Festival_Festival_21_254.jpg +52 508 14 21 +213 497 26 28 +223 372 50 66 +493 233 96 133 +# 21--Festival/21_Festival_Festival_21_936.jpg +378 205 299 393 +251 269 139 205 +# 21--Festival/21_Festival_Festival_21_605.jpg +177 121 63 73 +312 74 72 87 +447 38 79 108 +633 64 65 89 +808 82 60 82 +940 260 28 39 +992 269 30 32 +934 209 17 21 +986 187 17 22 +758 164 18 27 +286 143 11 16 +146 194 15 14 +77 161 12 16 +86 269 16 26 +58 241 18 24 +71 229 14 27 +28 230 15 22 +6 249 14 24 +0 236 9 16 +# 21--Festival/21_Festival_Festival_21_585.jpg +116 286 15 21 +28 184 8 10 +315 228 10 12 +0 347 12 21 +0 395 9 25 +131 282 14 18 +141 264 14 15 +158 240 13 14 +92 258 16 18 +46 286 16 21 +214 292 16 19 +203 315 15 25 +181 337 20 27 +15 303 18 16 +18 327 19 19 +256 339 17 25 +258 307 17 26 +308 324 19 23 +309 279 15 20 +8 207 10 12 +25 197 8 10 +35 198 9 11 +32 210 11 11 +51 206 9 12 +75 204 7 9 +92 200 9 9 +130 204 7 12 +100 216 9 12 +76 222 7 10 +175 191 8 14 +266 264 12 17 +230 243 14 21 +116 201 7 8 +154 198 8 10 +183 255 11 15 +196 192 8 13 +218 246 10 14 +106 255 9 15 +96 230 7 10 +123 335 22 26 +198 248 13 17 +262 233 11 12 +288 246 15 18 +60 260 12 14 +59 350 19 23 +22 236 12 14 +52 234 11 13 +67 229 11 13 +26 260 15 17 +# 21--Festival/21_Festival_Festival_21_219.jpg +66 396 14 19 +52 395 12 17 +35 398 11 15 +319 540 9 11 +317 446 10 13 +564 279 85 113 +558 446 8 11 +583 436 9 12 +610 437 8 11 +631 451 8 11 +645 441 8 12 +671 447 8 11 +666 444 7 9 +696 443 6 9 +731 168 13 16 +827 139 10 13 +862 140 9 13 +872 144 9 13 +943 141 11 14 +358 531 6 8 +412 542 7 8 +375 548 8 8 +358 548 6 9 +397 532 7 8 +947 239 5 6 +904 282 5 6 +853 124 5 6 +879 120 6 7 +# 21--Festival/21_Festival_Festival_21_830.jpg +477 106 307 536 +823 506 54 80 +927 497 52 93 +4 497 60 69 +# 21--Festival/21_Festival_Festival_21_526.jpg +623 631 16 18 +123 633 12 18 +21 550 6 10 +261 679 12 14 +423 676 7 12 +467 774 7 20 +578 611 8 9 +465 605 5 11 +409 608 4 13 +653 638 6 17 +739 593 6 14 +987 622 7 14 +705 550 6 9 +# 21--Festival/21_Festival_Festival_21_976.jpg +535 278 56 61 +817 107 34 44 +878 19 16 23 +839 0 16 21 +763 45 22 27 +473 34 24 26 +526 12 19 22 +360 72 28 33 +366 3 16 21 +323 18 18 24 +148 48 25 28 +23 22 17 23 +162 4 16 20 +# 21--Festival/21_Festival_Festival_21_640.jpg +666 486 11 12 +# 21--Festival/21_Festival_Festival_21_660.jpg +386 331 354 463 +# 21--Festival/21_Festival_Festival_21_210.jpg +392 165 69 109 +121 376 23 27 +233 387 17 28 +13 369 25 38 +759 184 79 110 +# 21--Festival/21_Festival_Festival_21_331.jpg +140 318 24 29 +161 282 21 22 +149 404 89 105 +236 278 29 34 +261 275 35 36 +308 394 34 40 +353 293 41 58 +517 300 36 34 +490 289 24 23 +461 273 10 11 +662 281 37 45 +744 226 57 72 +986 269 38 46 +882 241 30 43 +59 364 39 46 +# 21--Festival/21_Festival_Festival_21_140.jpg +350 482 20 20 +430 463 19 27 +517 498 48 46 +# 21--Festival/21_Festival_Festival_21_562.jpg +173 158 39 44 +50 243 29 30 +400 272 18 32 +357 292 14 19 +609 121 51 57 +944 244 33 47 +479 340 7 6 +455 362 4 5 +786 373 10 13 +820 387 12 12 +# 22--Picnic/22_Picnic_Picnic_22_541.jpg +90 634 42 65 +360 1109 12 19 +263 1081 5 10 +269 1092 5 9 +903 846 18 17 +447 1184 3 5 +454 1188 3 4 +459 1195 3 4 +467 1199 4 3 +479 1198 4 5 +486 1196 5 8 +# 22--Picnic/22_Picnic_Picnic_22_152.jpg +130 238 43 61 +121 374 25 61 +420 226 27 35 +592 142 40 47 +861 364 45 93 +# 22--Picnic/22_Picnic_Picnic_22_313.jpg +303 311 26 35 +512 243 16 21 +619 192 24 30 +700 276 25 34 +646 339 21 30 +770 358 14 34 +159 305 26 38 +76 212 10 15 +35 205 8 14 +0 203 8 16 +567 242 11 11 +# 22--Picnic/22_Picnic_Picnic_22_564.jpg +398 220 156 188 +646 334 134 198 +# 22--Picnic/22_Picnic_Picnic_22_290.jpg +277 426 21 35 +347 413 27 36 +415 410 29 41 +616 416 29 39 +# 22--Picnic/22_Picnic_Picnic_22_928.jpg +619 352 69 65 +615 291 48 48 +541 188 4 4 +# 22--Picnic/22_Picnic_Picnic_22_140.jpg +316 345 21 29 +418 350 26 29 +569 371 21 27 +786 387 24 29 +921 377 17 24 +868 379 19 24 +922 429 14 27 +635 390 23 22 +# 22--Picnic/22_Picnic_Picnic_22_732.jpg +27 572 10 9 +60 658 13 16 +149 592 10 11 +283 604 10 16 +342 613 7 15 +407 571 9 10 +368 566 8 10 +426 573 9 12 +214 551 7 9 +244 569 6 10 +228 588 6 11 +296 544 8 12 +516 564 9 10 +526 604 7 12 +585 543 8 9 +726 583 7 13 +851 620 11 14 +792 550 7 10 +606 610 9 16 +327 583 7 11 +51 549 6 10 +115 564 7 9 +191 605 7 13 +216 632 13 14 +214 610 11 10 +150 522 6 7 +580 561 9 10 +6 519 6 8 +77 519 6 7 +371 588 5 11 +# 22--Picnic/22_Picnic_Picnic_22_654.jpg +411 437 101 120 +# 22--Picnic/22_Picnic_Picnic_22_308.jpg +384 331 32 39 +948 189 68 104 +514 296 36 34 +562 272 27 39 +587 235 24 32 +697 253 17 24 +742 267 14 19 +815 260 14 18 +891 257 10 13 +890 310 11 16 +638 238 22 28 +242 254 11 11 +859 274 8 11 +868 269 10 14 +# 22--Picnic/22_Picnic_Picnic_22_933.jpg +648 192 42 44 +686 290 45 55 +632 405 47 52 +257 42 135 161 +716 34 180 208 +0 483 22 24 +# 22--Picnic/22_Picnic_Picnic_22_36.jpg +131 508 28 66 +183 475 33 58 +290 507 22 32 +213 683 27 54 +458 440 36 45 +385 282 8 13 +553 284 12 13 +460 312 7 9 +636 226 35 45 +639 657 37 44 +481 353 7 10 +46 365 7 18 +# 22--Picnic/22_Picnic_Picnic_22_208.jpg +706 192 76 116 +118 290 66 96 +# 22--Picnic/22_Picnic_Picnic_22_354.jpg +473 323 51 52 +# 22--Picnic/22_Picnic_Picnic_22_537.jpg +129 1000 168 183 +381 760 156 198 +571 601 204 171 +# 22--Picnic/22_Picnic_Picnic_22_357.jpg +383 270 30 33 +486 217 30 44 +547 227 31 41 +# 22--Picnic/22_Picnic_Picnic_22_483.jpg +534 414 57 80 +# 22--Picnic/22_Picnic_Picnic_22_561.jpg +392 504 102 125 +209 459 115 173 +# 22--Picnic/22_Picnic_Picnic_22_10.jpg +276 114 60 104 +404 120 54 82 +570 218 60 70 +776 170 52 84 +# 22--Picnic/22_Picnic_Picnic_22_688.jpg +646 6 166 221 +# 22--Picnic/22_Picnic_Picnic_22_444.jpg +141 346 9 14 +209 402 15 16 +296 396 8 13 +377 416 10 17 +427 372 9 12 +504 340 7 9 +533 332 10 13 +487 414 8 15 +502 389 11 12 +810 348 10 12 +607 326 9 12 +567 335 10 12 +644 349 6 10 +548 419 9 13 +569 433 7 17 +404 351 7 12 +620 428 12 14 +# 22--Picnic/22_Picnic_Picnic_22_241.jpg +234 258 52 78 +364 98 74 82 +# 22--Picnic/22_Picnic_Picnic_22_594.jpg +497 275 53 68 +797 316 36 62 +# 22--Picnic/22_Picnic_Picnic_22_310.jpg +267 206 48 70 +450 212 22 32 +39 283 13 16 +10 273 11 19 +145 279 15 16 +27 250 12 13 +107 279 14 18 +255 251 11 26 +651 229 38 52 +521 278 19 22 +598 187 20 29 +701 195 13 28 +762 192 22 34 +856 227 25 24 +498 239 7 8 +417 238 5 7 +825 218 6 7 +732 228 3 5 +# 23--Shoppers/23_Shoppers_Shoppers_23_640.jpg +354 235 164 211 +619 274 86 155 +750 366 80 83 +831 387 71 92 +217 277 89 107 +6 336 95 119 +# 23--Shoppers/23_Shoppers_Shoppers_23_854.jpg +292 144 294 368 +# 23--Shoppers/23_Shoppers_Shoppers_23_271.jpg +20 276 29 40 +166 308 23 31 +266 273 23 32 +500 227 44 72 +603 222 62 83 +374 312 11 17 +943 341 9 9 +925 347 6 9 +938 360 6 10 +921 367 7 12 +838 353 11 10 +956 429 10 12 +975 423 11 14 +832 428 17 18 +846 374 15 17 +825 372 10 15 +891 374 8 14 +# 23--Shoppers/23_Shoppers_Shoppers_23_812.jpg +476 141 114 135 +# 23--Shoppers/23_Shoppers_Shoppers_23_122.jpg +258 38 78 94 +730 4 134 126 +# 23--Shoppers/23_Shoppers_Shoppers_23_514.jpg +646 52 54 92 +# 23--Shoppers/23_Shoppers_Shoppers_23_302.jpg +297 629 309 374 +# 23--Shoppers/23_Shoppers_Shoppers_23_708.jpg +397 41 31 47 +# 23--Shoppers/23_Shoppers_Shoppers_23_823.jpg +669 172 85 135 +265 516 71 110 +302 471 85 102 +# 23--Shoppers/23_Shoppers_Shoppers_23_243.jpg +274 425 30 39 +567 418 27 35 +745 423 31 38 +# 23--Shoppers/23_Shoppers_Shoppers_23_328.jpg +547 192 11 16 +577 183 14 18 +621 184 14 19 +315 209 33 43 +478 204 27 42 +334 169 23 34 +321 172 16 32 +130 338 76 100 +# 23--Shoppers/23_Shoppers_Shoppers_23_599.jpg +142 19 43 85 +659 43 33 63 +960 170 18 24 +782 363 21 27 +804 359 22 28 +# 23--Shoppers/23_Shoppers_Shoppers_23_25.jpg +453 58 34 69 +368 105 14 26 +887 80 24 32 +# 23--Shoppers/23_Shoppers_Shoppers_23_232.jpg +153 191 14 35 +482 87 21 35 +696 187 7 9 +771 191 6 13 +826 199 11 23 +644 183 5 11 +781 196 4 9 +# 23--Shoppers/23_Shoppers_Shoppers_23_500.jpg +437 119 40 71 +506 130 36 51 +476 224 15 19 +506 214 16 17 +91 207 22 22 +# 23--Shoppers/23_Shoppers_Shoppers_23_10.jpg +62 383 40 53 +275 308 37 47 +469 332 25 35 +586 310 19 43 +639 296 24 37 +1003 334 14 24 +165 342 15 30 +535 311 17 19 +761 329 17 24 +# 23--Shoppers/23_Shoppers_Shoppers_23_801.jpg +539 222 93 135 +308 177 120 141 +# 23--Shoppers/23_Shoppers_Shoppers_23_167.jpg +362 184 102 102 +# 23--Shoppers/23_Shoppers_Shoppers_23_65.jpg +351 167 31 39 +478 95 28 59 +878 129 34 46 +# 23--Shoppers/23_Shoppers_Shoppers_23_223.jpg +474 350 82 114 +# 23--Shoppers/23_Shoppers_Shoppers_23_817.jpg +658 23 170 346 +# 23--Shoppers/23_Shoppers_Shoppers_23_91.jpg +202 205 19 31 +226 308 20 24 +309 268 23 31 +367 227 30 47 +636 117 45 59 +971 243 24 37 +# 23--Shoppers/23_Shoppers_Shoppers_23_777.jpg +254 158 154 226 +548 394 206 238 +# 23--Shoppers/23_Shoppers_Shoppers_23_259.jpg +202 96 160 218 +518 80 150 188 +762 66 140 196 +# 23--Shoppers/23_Shoppers_Shoppers_23_364.jpg +580 184 100 116 +456 228 60 62 +278 210 76 96 +# 23--Shoppers/23_Shoppers_Shoppers_23_665.jpg +346 180 180 180 +# 23--Shoppers/23_Shoppers_Shoppers_23_60.jpg +147 504 23 43 +392 579 15 25 +695 607 5 9 +718 619 6 8 +708 609 8 9 +775 611 7 13 +583 572 10 15 +666 614 6 9 +665 633 5 7 +# 23--Shoppers/23_Shoppers_Shoppers_23_571.jpg +78 612 7 14 +277 571 23 34 +354 484 20 26 +419 518 17 30 +503 509 10 25 +563 566 9 23 +574 562 11 27 +589 560 11 27 +628 571 12 26 +644 574 9 22 +642 459 18 66 +683 452 21 79 +881 352 89 143 +# 23--Shoppers/23_Shoppers_Shoppers_23_461.jpg +650 187 17 24 +688 141 18 26 +744 145 18 31 +604 174 12 23 +759 80 7 12 +772 85 9 10 +448 185 8 14 +385 158 9 14 +918 143 12 19 +# 23--Shoppers/23_Shoppers_Shoppers_23_485.jpg +490 114 78 126 +814 144 60 120 +312 128 82 134 +38 136 80 112 +# 23--Shoppers/23_Shoppers_Shoppers_23_607.jpg +238 313 184 217 +# 23--Shoppers/23_Shoppers_Shoppers_23_802.jpg +348 188 251 316 +585 340 208 223 +# 23--Shoppers/23_Shoppers_Shoppers_23_459.jpg +112 114 11 17 +103 118 10 14 +135 122 8 16 +339 138 5 7 +377 130 5 7 +782 127 7 10 +816 135 8 10 +775 108 7 9 +800 123 8 10 +408 299 4 6 +402 268 3 6 +468 312 5 8 +449 305 5 7 +699 366 8 12 +256 432 6 8 +276 432 7 11 +307 418 7 10 +324 417 6 9 +252 491 11 14 +553 562 5 8 +674 550 5 8 +663 552 4 6 +673 594 5 8 +689 593 6 7 +576 485 2 5 +655 521 5 6 +671 503 4 9 +661 494 4 6 +# 23--Shoppers/23_Shoppers_Shoppers_23_561.jpg +235 241 60 63 +228 136 32 19 +460 124 33 19 +462 221 64 70 +701 116 34 19 +684 215 65 68 +235 518 30 25 +449 504 31 26 +445 615 58 57 +676 498 29 23 +652 609 51 53 +# 23--Shoppers/23_Shoppers_Shoppers_23_880.jpg +189 114 72 136 +# 23--Shoppers/23_Shoppers_Shoppers_23_511.jpg +786 6 8 11 +821 17 8 13 +796 5 6 7 +674 28 8 10 +533 13 6 12 +562 34 5 9 +616 6 8 10 +173 627 11 13 +47 590 11 17 +733 31 6 8 +374 32 3 8 +261 34 5 7 +248 36 7 8 +291 137 8 11 +247 164 11 13 +97 37 7 11 +139 51 5 7 +48 43 10 12 +294 177 8 9 +344 214 10 10 +326 216 9 11 +364 242 9 12 +386 251 8 11 +422 283 7 14 +451 304 5 10 +484 324 8 13 +504 318 9 14 +651 309 7 10 +716 336 10 15 +750 279 7 10 +758 288 5 11 +632 332 7 9 +627 312 5 11 +566 328 5 10 +763 187 4 5 +817 225 8 9 +845 223 5 8 +832 239 7 9 +918 334 7 12 +842 319 10 14 +57 305 7 9 +80 325 8 9 +56 324 8 9 +142 347 10 11 +183 380 8 14 +280 402 8 13 +475 542 4 8 +497 578 7 10 +452 560 8 12 +366 478 5 7 +749 575 9 12 +709 600 11 12 +851 560 6 12 +129 526 8 10 +200 550 8 10 +37 605 10 10 +78 543 7 10 +477 9 7 11 +480 51 7 10 +316 45 6 9 +339 51 6 8 +363 37 5 7 +161 96 11 13 +199 119 9 13 +235 98 7 10 +64 282 6 8 +15 297 8 11 +# 23--Shoppers/23_Shoppers_Shoppers_23_450.jpg +573 197 5 8 +589 197 4 6 +595 200 5 7 +663 357 15 17 +456 333 6 15 +431 680 18 24 +855 633 18 28 +784 591 23 30 +247 433 19 25 +344 268 7 8 +# 23--Shoppers/23_Shoppers_Shoppers_23_543.jpg +384 482 319 381 +# 23--Shoppers/23_Shoppers_Shoppers_23_43.jpg +67 149 36 46 +73 211 36 68 +166 229 57 74 +353 231 64 82 +388 132 19 52 +579 144 37 64 +606 120 27 52 +792 134 25 42 +806 136 25 40 +944 121 18 33 +870 146 19 27 +925 199 48 67 +712 338 48 50 +154 390 69 91 +238 149 12 31 +# 23--Shoppers/23_Shoppers_Shoppers_23_197.jpg +437 84 168 201 +# 23--Shoppers/23_Shoppers_Shoppers_23_294.jpg +205 154 42 50 +154 153 30 55 +571 136 33 38 +752 126 34 56 +767 140 22 54 +# 23--Shoppers/23_Shoppers_Shoppers_23_22.jpg +415 288 38 53 +350 293 24 44 +620 252 42 67 +340 254 30 53 +26 113 35 55 +241 242 28 56 +728 249 23 54 +796 294 37 62 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_268.jpg +481 449 143 153 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_703.jpg +236 52 66 88 +344 56 72 88 +480 8 74 80 +634 38 68 76 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_633.jpg +874 396 50 61 +0 275 29 45 +757 394 46 50 +472 448 27 48 +842 428 37 40 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_372.jpg +288 138 82 92 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_329.jpg +870 200 128 176 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_405.jpg +321 378 63 84 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_887.jpg +615 538 82 91 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_702.jpg +831 83 41 46 +639 115 20 26 +385 169 16 21 +417 169 12 21 +346 127 7 7 +373 148 7 8 +175 58 47 56 +401 174 12 17 +349 165 8 9 +472 170 9 10 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_67.jpg +488 214 66 82 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_254.jpg +174 104 68 96 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_264.jpg +278 64 58 69 +518 220 13 18 +583 220 14 20 +744 141 55 77 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_281.jpg +387 300 13 15 +185 314 13 15 +714 413 15 18 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_95.jpg +526 202 80 112 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_691.jpg +824 383 33 41 +881 453 31 42 +956 299 23 25 +877 326 32 35 +820 303 25 24 +776 321 31 34 +742 293 28 35 +727 307 20 24 +804 365 31 37 +667 320 25 27 +622 306 24 26 +579 267 22 22 +562 328 24 25 +547 272 23 25 +503 314 27 27 +643 310 17 23 +419 340 26 29 +440 311 23 27 +411 247 23 26 +365 306 25 24 +293 278 27 32 +271 307 31 36 +264 270 27 30 +236 219 26 27 +104 121 30 51 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_890.jpg +750 56 63 72 +477 158 20 24 +389 163 21 24 +302 152 25 24 +215 136 27 25 +129 154 26 28 +42 149 27 28 +61 218 48 51 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_129.jpg +345 318 96 144 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_315.jpg +306 265 63 66 +148 14 53 61 +277 85 16 18 +296 80 11 15 +11 63 33 26 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_824.jpg +210 154 408 580 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_1037.jpg +726 122 146 180 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_644.jpg +552 275 21 38 +396 188 20 35 +274 171 30 38 +207 153 20 33 +54 98 21 28 +126 110 30 31 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_601.jpg +860 303 35 38 +736 321 36 40 +597 308 39 39 +985 338 11 16 +981 295 10 13 +393 312 30 34 +472 304 32 37 +295 319 30 35 +308 291 26 35 +518 286 33 37 +26 333 13 13 +176 319 32 40 +87 315 29 34 +409 288 33 38 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_931.jpg +497 96 71 87 +638 161 66 78 +364 65 25 29 +438 52 19 25 +258 49 21 26 +733 56 19 25 +915 96 11 15 +877 110 8 10 +841 112 8 9 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_10.jpg +168 164 52 64 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_523.jpg +324 178 172 94 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_368.jpg +390 158 136 134 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_15.jpg +484 200 111 161 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_431.jpg +214 98 70 104 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_812.jpg +760 90 60 94 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_901.jpg +104 246 88 84 +454 110 98 104 +700 156 70 88 +844 234 64 78 +928 182 82 100 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_763.jpg +807 110 50 49 +555 322 36 50 +401 168 45 49 +123 113 37 54 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_115.jpg +274 102 86 122 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_133.jpg +479 307 54 46 +547 380 36 58 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_904.jpg +278 4 456 470 +# 24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_540.jpg +174 697 75 123 +432 661 93 99 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_734.jpg +26 438 19 32 +152 435 17 28 +306 396 15 30 +543 389 26 33 +691 562 21 28 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_467.jpg +768 55 33 55 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_563.jpg +227 61 48 68 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_527.jpg +108 283 39 49 +330 194 46 72 +508 268 28 30 +698 157 28 53 +753 190 31 37 +865 152 41 59 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_640.jpg +440 12 240 334 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_463.jpg +780 150 42 55 +798 115 36 53 +349 178 49 62 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_1045.jpg +48 46 286 344 +658 36 294 412 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_700.jpg +56 395 12 20 +108 401 17 26 +82 427 24 37 +170 390 26 38 +218 405 19 26 +382 420 12 24 +409 422 12 23 +431 415 14 24 +477 414 20 27 +538 369 34 46 +584 413 17 25 +598 415 8 21 +910 421 11 17 +886 385 19 27 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_761.jpg +166 68 46 52 +312 65 44 55 +327 186 50 43 +492 104 44 47 +687 94 44 51 +804 92 53 53 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_882.jpg +772 508 98 62 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_440.jpg +556 78 60 66 +30 180 60 62 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_469.jpg +272 100 72 98 +606 182 54 74 +914 176 64 74 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_728.jpg +614 94 110 144 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_883.jpg +268 118 108 156 +534 48 64 96 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_419.jpg +470 86 86 120 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_936.jpg +211 109 56 70 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_437.jpg +369 381 30 41 +703 283 25 47 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_271.jpg +521 336 20 24 +141 51 18 23 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_121.jpg +688 80 58 80 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_873.jpg +43 227 12 15 +63 212 13 15 +102 205 17 18 +218 199 15 15 +231 188 24 23 +332 183 11 13 +408 138 37 38 +488 132 44 48 +747 106 28 30 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_1026.jpg +202 224 42 48 +406 241 24 28 +586 231 17 26 +535 135 13 17 +581 142 8 11 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_912.jpg +333 268 31 43 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_1046.jpg +252 396 26 40 +862 350 38 49 +749 396 32 40 +709 428 31 39 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_16.jpg +266 136 86 120 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_683.jpg +676 410 204 204 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_614.jpg +265 85 47 56 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_173.jpg +302 116 84 132 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_374.jpg +489 71 54 69 +231 279 45 64 +51 290 51 60 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_869.jpg +27 212 20 23 +159 212 17 22 +221 229 17 23 +317 233 36 46 +667 171 131 164 +138 159 9 9 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_986.jpg +399 128 143 169 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_324.jpg +528 142 96 44 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_585.jpg +549 126 114 146 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_59.jpg +462 84 76 100 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_747.jpg +319 122 56 71 +405 190 48 60 +848 277 15 17 +918 250 8 13 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_325.jpg +98 202 49 66 +277 192 46 59 +425 127 59 70 +526 199 50 53 +671 205 41 47 +763 275 36 41 +820 297 32 37 +849 305 32 36 +881 322 24 30 +906 326 14 28 +915 339 12 24 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_707.jpg +331 135 49 74 +694 180 35 53 +700 45 16 18 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_18.jpg +229 50 55 47 +587 437 12 14 +506 378 16 15 +830 413 14 14 +751 398 19 17 +905 443 16 16 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_515.jpg +56 308 14 21 +366 297 14 20 +373 320 12 20 +394 312 11 19 +414 318 13 18 +441 333 9 17 +459 314 11 19 +476 320 14 21 +521 318 11 16 +537 313 13 18 +563 340 14 17 +574 323 16 21 +614 320 15 21 +636 313 18 20 +673 314 16 22 +698 316 17 26 +719 300 20 31 +734 326 14 24 +786 307 18 27 +803 293 17 27 +835 311 15 28 +860 304 16 23 +884 325 19 31 +910 308 13 25 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_174.jpg +111 38 30 29 +292 96 23 23 +373 88 20 20 +394 171 33 29 +199 163 26 23 +631 202 41 45 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_436.jpg +302 152 132 90 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_513.jpg +127 124 176 198 +504 275 204 245 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_343.jpg +302 596 39 48 +679 536 42 57 +70 809 17 17 +891 829 39 35 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_169.jpg +292 90 76 132 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_1029.jpg +254 360 58 67 +246 181 45 57 +151 202 56 57 +455 181 49 52 +515 387 39 59 +118 441 65 72 +608 300 40 39 +659 311 38 39 +549 329 35 37 +579 141 31 45 +493 184 38 45 +655 198 27 36 +713 188 42 43 +734 152 66 64 +873 170 43 47 +387 183 9 10 +435 162 7 10 +413 183 6 8 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_9.jpg +720 140 58 168 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_679.jpg +31 121 17 19 +83 175 31 39 +120 198 40 46 +199 172 49 57 +307 98 31 33 +307 124 39 55 +435 61 38 44 +492 146 28 40 +597 151 85 104 +866 33 41 53 +180 152 5 7 +163 164 4 7 +144 154 5 7 +# 25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_993.jpg +186 48 110 170 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_750.jpg +0 223 128 210 +140 325 10 16 +161 309 12 20 +323 278 47 61 +456 251 102 114 +768 168 58 163 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_336.jpg +637 218 38 41 +84 234 37 45 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_567.jpg +15 279 15 20 +171 242 14 20 +355 292 95 87 +445 135 40 71 +549 108 119 99 +907 135 42 51 +511 157 36 47 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_764.jpg +67 142 11 11 +84 133 13 17 +690 125 23 31 +741 169 9 13 +539 148 8 12 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_719.jpg +461 214 28 40 +63 239 22 25 +126 250 20 27 +205 148 23 29 +383 250 18 20 +816 243 27 29 +760 298 32 32 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_85.jpg +58 349 32 43 +215 357 30 37 +385 379 11 16 +1 600 28 57 +598 397 32 33 +328 389 6 7 +349 399 5 6 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_942.jpg +99 199 39 41 +353 200 39 42 +229 214 24 44 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_64.jpg +49 164 49 54 +109 179 29 36 +454 177 54 66 +585 189 26 33 +660 164 51 68 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_886.jpg +686 100 54 74 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_307.jpg +72 391 30 31 +175 378 25 30 +289 364 33 29 +413 354 29 33 +530 360 25 41 +729 360 15 21 +907 343 21 24 +390 210 26 34 +263 350 23 32 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_710.jpg +281 421 603 746 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_520.jpg +824 98 128 176 +198 204 106 140 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_262.jpg +801 285 24 29 +854 291 22 28 +100 256 14 29 +866 251 20 28 +918 283 29 34 +951 294 25 30 +966 236 20 25 +1001 262 23 39 +835 277 19 29 +353 232 13 13 +87 214 15 17 +122 238 17 21 +5 264 23 26 +36 273 22 27 +61 254 19 29 +36 249 14 17 +75 261 25 28 +126 265 21 25 +148 249 16 23 +160 260 16 20 +167 266 25 29 +168 212 13 19 +177 226 16 21 +215 210 16 16 +218 230 15 23 +114 222 13 17 +224 262 19 24 +245 256 22 30 +527 267 15 24 +537 268 22 29 +575 272 23 30 +609 273 22 29 +618 316 25 30 +634 282 23 26 +649 237 14 15 +679 220 9 15 +686 223 13 16 +691 276 25 30 +509 266 20 28 +474 309 24 29 +689 248 21 24 +723 283 26 33 +763 249 20 25 +759 325 23 29 +782 272 20 25 +250 215 14 18 +301 213 12 17 +326 219 17 18 +372 219 13 16 +297 263 23 26 +321 258 13 25 +334 264 15 28 +342 254 23 32 +274 292 24 32 +385 217 11 16 +405 193 13 16 +398 226 13 18 +438 219 16 18 +451 220 10 16 +474 229 12 16 +495 224 14 16 +486 241 14 19 +387 263 20 26 +421 257 16 26 +432 261 18 27 +447 271 23 29 +527 220 13 14 +584 229 11 15 +600 226 14 14 +594 243 12 16 +49 212 17 20 +42 223 16 17 +44 235 17 19 +714 262 20 25 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_529.jpg +176 237 41 47 +363 123 44 65 +439 162 43 59 +528 126 49 55 +469 328 54 60 +747 313 55 63 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_259.jpg +406 348 130 153 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_204.jpg +374 264 21 30 +437 198 19 24 +448 144 15 19 +498 107 10 12 +587 88 8 9 +597 116 15 17 +622 155 14 21 +889 150 7 16 +941 170 12 19 +918 202 23 35 +963 149 10 16 +966 197 17 24 +982 202 16 19 +943 116 7 12 +968 117 7 12 +1015 124 9 16 +952 270 19 35 +1010 284 14 40 +989 157 12 20 +898 180 15 29 +891 251 23 38 +920 224 13 28 +240 68 7 13 +195 80 9 10 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_1022.jpg +253 416 36 47 +161 567 20 30 +61 574 24 35 +25 242 11 21 +41 258 13 24 +57 282 11 19 +83 302 15 24 +122 292 14 20 +143 314 16 25 +194 322 13 19 +285 339 13 18 +313 350 12 19 +390 347 11 18 +475 353 12 16 +943 507 8 17 +926 535 11 19 +610 455 8 10 +494 457 7 10 +506 440 7 14 +514 430 5 10 +58 223 12 18 +68 270 12 20 +73 249 11 19 +84 256 11 13 +82 273 10 19 +92 240 17 30 +134 264 10 18 +144 273 8 13 +175 292 10 22 +188 296 8 11 +199 302 4 10 +219 310 12 18 +321 335 12 16 +342 349 8 17 +330 350 8 13 +643 454 7 10 +624 452 9 12 +584 456 8 11 +830 554 11 18 +888 554 15 24 +859 521 13 18 +707 581 14 21 +661 559 11 24 +846 546 11 18 +867 550 12 21 +250 315 11 17 +749 511 9 16 +765 502 10 16 +807 497 8 17 +559 456 9 10 +682 463 7 12 +695 453 6 15 +707 452 8 13 +87 494 18 23 +32 456 22 30 +21 505 24 29 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_178.jpg +876 110 108 154 +702 46 84 136 +554 86 88 120 +450 96 90 110 +344 140 80 92 +290 118 68 96 +186 122 68 74 +122 76 70 86 +640 102 74 96 +756 94 72 112 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_832.jpg +440 206 58 98 +540 250 56 84 +788 214 68 92 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_223.jpg +16 128 22 27 +68 133 19 26 +91 132 21 25 +113 140 13 16 +125 151 19 21 +163 143 20 21 +203 124 21 28 +277 113 25 32 +381 111 25 29 +404 92 29 37 +514 79 35 41 +614 94 29 37 +718 98 34 45 +912 113 36 43 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_405.jpg +140 345 26 36 +400 204 44 53 +550 180 54 55 +853 264 25 31 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_345.jpg +161 195 29 32 +342 199 26 33 +467 191 28 37 +660 185 29 38 +853 194 32 39 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_606.jpg +220 74 126 170 +642 102 124 184 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_192.jpg +70 299 32 38 +91 404 27 33 +516 292 35 48 +574 355 32 37 +637 290 36 45 +563 322 20 30 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_393.jpg +74 128 64 118 +378 64 80 132 +510 64 72 134 +750 38 70 136 +886 118 72 122 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_689.jpg +628 207 25 32 +167 158 22 34 +245 115 76 101 +336 145 87 137 +392 163 55 101 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_390.jpg +488 174 56 56 +426 406 60 68 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_236.jpg +166 294 11 14 +361 368 101 91 +474 203 41 75 +587 180 126 110 +979 242 44 53 +544 229 42 50 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_893.jpg +430 113 34 54 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_184.jpg +417 299 101 127 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_359.jpg +661 334 78 99 +515 681 61 79 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_991.jpg +51 144 9 12 +97 146 8 12 +113 136 7 11 +127 132 10 12 +174 123 9 15 +213 119 13 18 +243 121 12 17 +272 112 14 18 +304 94 15 18 +346 104 15 18 +391 114 11 12 +436 103 12 17 +478 99 14 16 +538 100 11 14 +551 95 14 17 +584 85 14 16 +608 92 10 15 +630 85 12 16 +673 78 12 16 +701 76 13 18 +729 83 11 13 +784 65 13 16 +835 61 13 17 +945 50 12 17 +194 118 7 14 +370 113 9 13 +408 118 8 10 +506 89 9 14 +664 81 7 14 +869 95 7 8 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_934.jpg +1 169 23 63 +68 164 43 60 +146 127 34 61 +145 389 81 109 +478 59 93 127 +679 214 73 82 +731 172 49 77 +972 212 49 60 +802 551 97 117 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_149.jpg +238 220 92 116 +466 188 100 128 +758 236 100 118 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_9.jpg +505 222 33 40 +620 212 24 29 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_619.jpg +64 238 22 26 +127 249 21 28 +211 147 19 26 +382 251 18 20 +460 209 30 44 +762 298 30 30 +816 243 26 29 +705 247 10 19 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_610.jpg +592 46 62 76 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_245.jpg +593 280 9 14 +159 257 15 31 +288 354 97 100 +793 522 45 38 +# 26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_31.jpg +236 187 36 49 +382 175 38 51 +528 211 44 50 +659 197 43 42 +808 158 42 50 +# 27--Spa/27_Spa_Spa_27_420.jpg +56 324 306 212 +# 27--Spa/27_Spa_Spa_27_212.jpg +252 84 466 256 +# 27--Spa/27_Spa_Spa_27_360.jpg +162 260 318 218 +# 27--Spa/27_Spa_Spa_27_691.jpg +366 182 200 210 +# 27--Spa/27_Spa_Spa_27_728.jpg +192 122 682 446 +# 27--Spa/27_Spa_Spa_27_782.jpg +406 170 86 114 +# 27--Spa/27_Spa_Spa_27_329.jpg +449 87 349 434 +# 27--Spa/27_Spa_Spa_27_121.jpg +790 338 88 108 +128 76 70 110 +# 27--Spa/27_Spa_Spa_27_168.jpg +456 382 54 68 +516 395 58 75 +# 27--Spa/27_Spa_Spa_27_851.jpg +451 143 31 44 +722 140 26 50 +859 155 36 47 +14 63 53 110 +682 47 19 21 +# 27--Spa/27_Spa_Spa_27_656.jpg +196 48 56 74 +356 179 22 29 +939 211 57 76 +# 27--Spa/27_Spa_Spa_27_716.jpg +198 359 29 39 +769 486 18 17 +182 509 21 25 +# 27--Spa/27_Spa_Spa_27_157.jpg +246 42 260 120 +# 27--Spa/27_Spa_Spa_27_393.jpg +542 138 348 380 +# 27--Spa/27_Spa_Spa_27_225.jpg +553 407 36 33 +# 27--Spa/27_Spa_Spa_27_768.jpg +566 246 51 79 +736 316 48 82 +198 332 51 83 +339 268 48 76 +# 27--Spa/27_Spa_Spa_27_38.jpg +510 204 188 248 +# 27--Spa/27_Spa_Spa_27_512.jpg +364 644 89 89 +# 27--Spa/27_Spa_Spa_27_486.jpg +460 189 44 81 +771 33 55 86 +# 27--Spa/27_Spa_Spa_27_322.jpg +38 196 442 328 +# 27--Spa/27_Spa_Spa_27_109.jpg +322 406 148 120 +188 356 114 72 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_868.jpg +684 31 43 64 +511 246 35 44 +864 176 40 46 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_590.jpg +47 39 30 36 +30 270 29 35 +199 293 26 45 +238 249 30 35 +549 171 29 36 +549 308 30 33 +598 613 34 34 +771 462 32 38 +916 216 30 32 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_656.jpg +270 250 517 484 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_989.jpg +480 116 106 170 +58 210 92 122 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_327.jpg +168 292 99 108 +655 270 37 33 +735 277 33 37 +829 192 90 102 +722 313 15 15 +775 288 29 32 +982 139 40 58 +953 249 22 25 +1001 276 22 47 +2 282 17 23 +92 309 34 39 +86 248 43 46 +351 262 34 37 +349 99 107 196 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_90.jpg +414 122 170 246 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_683.jpg +248 381 315 392 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_643.jpg +176 64 106 140 +360 46 106 142 +520 140 98 140 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_751.jpg +523 231 70 82 +190 140 53 57 +14 158 43 56 +127 0 39 51 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_357.jpg +434 108 180 180 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_866.jpg +448 130 106 168 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_959.jpg +132 32 330 426 +496 50 460 656 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_118.jpg +5 371 74 101 +483 325 93 126 +637 310 99 118 +711 199 31 59 +542 225 42 55 +14 204 76 83 +329 169 28 44 +49 84 30 37 +849 457 48 61 +94 130 33 32 +580 247 21 35 +463 131 47 62 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_862.jpg +435 125 129 161 +212 491 43 61 +295 508 46 62 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_535.jpg +184 453 184 208 +317 144 248 285 +29 421 56 69 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_826.jpg +355 187 241 322 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_877.jpg +750 88 112 156 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_124.jpg +123 108 667 931 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_244.jpg +591 78 50 65 +394 85 54 66 +251 114 54 53 +893 316 26 31 +953 292 32 33 +928 278 22 31 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_86.jpg +104 816 163 245 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_480.jpg +978 240 30 34 +696 168 86 107 +435 95 71 140 +490 217 59 73 +8 203 28 34 +668 240 29 41 +539 258 24 26 +173 171 17 21 +911 197 16 12 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_448.jpg +366 180 88 118 +500 218 90 114 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_7.jpg +170 95 10 12 +158 99 8 9 +204 101 9 12 +213 95 9 15 +76 47 8 11 +59 33 8 15 +286 39 11 11 +265 127 16 13 +260 95 10 13 +275 85 8 10 +281 99 10 11 +304 110 12 13 +322 51 10 12 +330 92 11 19 +184 37 10 14 +340 29 6 11 +331 22 7 10 +223 12 8 15 +340 17 9 12 +335 0 14 10 +356 47 12 16 +374 50 9 14 +350 99 13 15 +361 126 11 12 +380 32 12 14 +391 82 10 13 +225 84 11 15 +15 196 6 16 +0 1106 58 133 +0 868 28 96 +85 872 69 87 +153 873 45 71 +824 1229 51 111 +878 1151 97 122 +948 858 60 89 +469 656 117 138 +51 784 52 57 +151 680 47 58 +215 668 27 43 +327 642 38 64 +429 669 45 65 +593 637 29 36 +544 604 31 34 +499 587 18 23 +10 569 25 49 +34 570 17 36 +59 563 15 27 +92 567 36 54 +197 561 26 36 +173 529 18 30 +298 566 23 29 +383 562 28 33 +656 586 19 32 +606 513 15 18 +66 507 21 31 +298 435 20 24 +190 423 22 20 +63 625 20 37 +17 522 15 23 +22 20 10 14 +91 14 12 15 +102 0 9 8 +73 18 10 12 +49 11 10 11 +111 39 14 15 +110 73 13 14 +40 130 11 15 +12 76 12 16 +0 83 4 12 +0 138 7 24 +188 275 7 11 +209 318 9 13 +75 339 10 14 +98 241 10 14 +23 351 10 30 +33 392 17 21 +92 392 19 31 +107 386 18 20 +133 385 13 23 +205 400 16 15 +250 406 16 30 +677 640 16 22 +698 601 19 20 +579 621 26 28 +674 542 11 21 +716 549 16 28 +718 726 47 74 +771 696 22 31 +815 808 30 43 +928 692 39 43 +200 132 12 17 +262 160 11 16 +323 157 13 13 +350 142 11 13 +445 90 9 14 +484 69 9 12 +439 74 7 9 +409 115 12 14 +301 149 9 14 +750 116 10 11 +752 132 7 11 +757 15 9 9 +776 9 8 8 +894 491 11 17 +1003 494 12 19 +737 514 16 21 +879 500 14 14 +668 514 13 17 +778 383 16 15 +808 420 19 18 +846 456 12 14 +879 404 15 16 +863 353 18 19 +914 387 14 16 +936 396 11 19 +939 350 11 14 +963 453 14 18 +970 420 11 18 +1006 421 9 13 +1015 439 9 18 +1008 364 10 14 +1007 341 16 18 +820 365 11 22 +923 369 17 19 +913 444 11 17 +779 280 12 14 +799 262 13 14 +832 329 17 17 +883 302 11 16 +922 312 14 14 +944 279 13 14 +926 284 12 15 +976 298 10 16 +1010 303 13 16 +987 275 12 14 +810 299 11 14 +858 279 8 18 +866 260 10 15 +889 240 13 13 +924 234 13 15 +946 263 6 10 +968 234 7 16 +860 238 8 10 +833 230 10 10 +872 225 11 12 +961 204 11 13 +997 198 8 14 +1006 234 11 13 +815 191 9 13 +895 176 9 15 +813 167 10 11 +862 171 8 8 +942 136 11 13 +918 189 13 13 +706 112 11 13 +741 148 11 11 +690 142 9 12 +996 136 6 11 +967 24 7 8 +984 52 10 10 +748 83 8 11 +946 89 9 12 +944 3 8 11 +889 16 6 10 +979 15 7 10 +999 11 6 11 +975 39 10 11 +972 52 7 11 +847 427 11 14 +891 376 10 12 +891 269 9 15 +867 631 12 19 +902 689 13 22 +921 652 20 23 +988 605 12 30 +647 144 10 10 +688 66 6 8 +609 124 8 11 +632 108 6 9 +653 105 7 8 +623 81 8 11 +632 51 9 14 +657 41 9 13 +619 40 7 12 +623 22 7 11 +635 23 9 10 +641 5 6 9 +668 44 6 9 +674 96 11 13 +703 89 7 12 +712 36 12 13 +684 35 10 15 +712 73 10 11 +405 64 5 11 +455 63 6 10 +467 52 9 12 +465 33 10 13 +444 17 10 14 +434 18 8 13 +474 11 10 12 +493 42 7 10 +476 28 7 13 +522 38 8 10 +554 30 6 12 +564 7 6 10 +551 55 10 16 +503 96 8 12 +517 100 9 13 +537 116 8 13 +549 114 10 14 +522 11 7 14 +393 2 7 10 +413 19 7 7 +393 24 7 10 +508 46 8 9 +481 56 9 13 +571 53 7 10 +588 93 12 14 +605 54 10 13 +683 5 6 9 +550 82 11 12 +608 97 9 10 +580 2 5 10 +608 3 6 13 +592 28 8 9 +590 36 5 10 +559 23 8 11 +573 21 7 12 +578 25 11 13 +775 87 6 11 +720 92 8 11 +729 39 8 10 +540 104 8 11 +522 90 9 14 +507 67 9 13 +564 140 8 14 +181 5 8 10 +173 40 8 9 +214 3 11 11 +155 51 13 13 +145 44 8 12 +134 45 7 12 +427 32 6 10 +501 11 6 9 +513 6 6 7 +528 3 6 9 +144 84 10 13 +834 242 10 11 +841 257 10 10 +820 236 8 13 +821 258 6 12 +804 142 11 11 +842 148 9 9 +820 121 11 12 +836 124 8 10 +840 542 21 26 +820 18 8 10 +795 30 7 8 +876 9 9 11 +904 31 8 9 +917 40 9 10 +932 13 7 10 +920 15 6 11 +963 38 8 11 +980 80 9 14 +953 75 8 10 +848 48 9 11 +860 72 9 11 +847 65 8 12 +828 66 7 9 +885 68 9 12 +915 70 9 12 +955 110 11 11 +976 109 6 9 +974 129 8 11 +969 120 8 11 +963 127 9 14 +932 113 11 12 +914 94 8 9 +902 89 9 11 +875 89 10 13 +878 128 9 10 +916 154 8 11 +907 144 12 12 +912 130 10 9 +941 169 11 13 +925 172 11 11 +989 181 8 11 +1004 120 7 10 +800 128 9 12 +785 121 9 10 +908 57 10 8 +919 52 7 9 +893 49 6 11 +885 40 5 9 +764 85 8 12 +739 91 8 13 +740 60 7 12 +751 57 10 11 +833 48 8 11 +845 24 9 12 +884 154 8 12 +738 569 28 40 +819 541 20 21 +818 502 15 22 +839 514 10 16 +984 667 27 34 +1006 666 13 18 +997 600 15 20 +917 602 13 20 +913 544 15 26 +980 528 15 26 +93 89 8 14 +147 19 10 11 +130 13 7 9 +246 31 12 13 +133 130 12 13 +163 121 10 16 +49 225 17 21 +45 200 18 18 +86 204 10 15 +96 207 7 10 +77 236 12 19 +56 269 14 15 +27 294 19 22 +41 324 21 23 +140 213 14 14 +164 220 12 13 +205 199 12 19 +159 236 7 13 +174 243 13 15 +143 263 15 19 +132 273 12 19 +91 281 9 14 +139 323 11 19 +223 225 12 15 +211 227 9 10 +205 238 7 9 +196 307 13 16 +221 308 9 11 +120 356 18 21 +255 314 11 17 +217 357 19 18 +264 349 16 20 +283 367 15 14 +297 358 13 18 +271 230 11 12 +294 283 10 14 +271 285 9 12 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_1018.jpg +380 194 251 275 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_663.jpg +90 129 826 1024 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_557.jpg +402 184 98 120 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_770.jpg +554 244 206 224 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_487.jpg +463 210 39 43 +1 332 19 48 +45 446 40 58 +130 444 38 47 +0 460 33 47 +57 387 37 47 +170 508 41 49 +671 272 39 53 +163 5 22 35 +207 12 24 33 +161 144 40 45 +213 110 33 41 +108 270 37 44 +159 273 33 45 +50 113 34 54 +35 85 38 44 +147 231 27 37 +220 194 32 39 +326 64 35 43 +817 274 43 51 +824 492 35 51 +914 530 41 55 +798 4 34 37 +604 12 25 21 +544 14 24 37 +838 344 36 45 +797 399 31 49 +859 427 41 45 +663 490 41 59 +546 506 43 46 +620 531 30 48 +746 562 35 43 +815 565 47 59 +726 595 31 30 +427 477 41 51 +188 390 34 38 +990 337 33 42 +1002 415 22 57 +992 114 32 38 +195 311 37 40 +425 205 26 30 +604 182 34 47 +665 153 34 45 +735 236 40 45 +964 490 36 47 +912 452 33 45 +882 306 37 50 +954 352 41 45 +966 281 39 51 +874 171 36 38 +938 150 37 40 +953 213 40 44 +861 242 35 44 +262 555 42 51 +338 559 38 36 +406 582 37 43 +457 510 41 55 +549 567 44 51 +488 384 44 45 +511 437 34 43 +593 445 43 40 +568 340 33 49 +590 382 42 51 +618 317 34 41 +785 84 34 42 +675 12 29 38 +733 284 35 46 +709 323 39 49 +557 241 29 43 +454 289 38 44 +479 256 33 44 +418 259 30 46 +787 215 42 50 +427 66 31 39 +471 35 27 43 +411 21 30 43 +620 94 33 42 +670 81 28 40 +713 108 32 40 +343 1 34 36 +48 0 25 10 +849 0 26 23 +893 0 31 22 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_198.jpg +325 203 80 123 +736 309 75 112 +837 309 83 109 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_130.jpg +64 28 860 660 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_144.jpg +117 236 665 826 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_835.jpg +231 308 166 213 +648 363 135 189 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_165.jpg +122 98 122 162 +268 128 86 120 +514 108 118 172 +692 164 118 192 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_126.jpg +310 256 74 86 +458 206 88 100 +416 374 72 80 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_711.jpg +436 132 132 178 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_22.jpg +470 242 170 244 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_782.jpg +406 88 186 232 +608 68 116 216 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_792.jpg +378 128 146 190 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_880.jpg +424 98 148 206 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_265.jpg +92 168 270 352 +500 84 218 344 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_282.jpg +292 184 466 652 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_2.jpg +220 320 124 150 +622 216 146 186 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_267.jpg +347 332 18 21 +292 267 11 18 +327 277 13 18 +358 258 18 21 +354 228 18 22 +404 205 17 22 +419 245 21 25 +444 226 20 27 +439 387 17 21 +504 346 18 25 +503 325 17 19 +563 372 15 19 +595 403 17 23 +633 346 18 25 +625 282 18 24 +669 257 15 19 +681 242 14 18 +632 197 21 20 +339 195 20 18 +458 197 17 17 +314 219 16 21 +623 328 8 12 +544 219 14 14 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_723.jpg +284 50 446 472 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_507.jpg +154 144 82 102 +380 110 68 94 +588 112 64 108 +796 124 76 108 +# 28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_697.jpg +509 349 187 133 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_221.jpg +227 221 35 59 +263 228 25 38 +288 235 18 27 +380 221 24 46 +441 268 23 54 +523 192 13 21 +502 210 7 8 +494 203 6 8 +541 194 6 8 +559 204 6 7 +567 197 5 7 +575 197 6 8 +716 196 8 10 +734 192 9 11 +757 195 5 7 +768 194 7 9 +784 183 6 7 +813 216 6 7 +759 217 8 13 +160 239 14 21 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_463.jpg +543 290 157 216 +234 583 154 213 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_363.jpg +581 167 68 72 +507 119 60 68 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_374.jpg +690 130 29 35 +675 182 20 24 +471 89 11 18 +391 119 22 26 +416 287 51 35 +452 501 40 22 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_311.jpg +684 409 22 18 +829 426 10 11 +447 366 12 14 +446 396 14 17 +300 381 14 15 +296 374 11 11 +234 380 18 20 +203 380 14 15 +47 374 15 17 +120 371 12 13 +0 362 9 15 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_158.jpg +593 391 48 60 +549 499 44 59 +687 170 11 13 +445 167 9 12 +575 705 73 83 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_316.jpg +883 103 85 91 +635 192 109 134 +552 113 87 101 +288 128 77 90 +166 166 104 112 +28 141 75 78 +939 3 66 66 +827 12 84 86 +708 0 62 54 +759 2 53 56 +63 0 36 37 +5 63 60 63 +95 3 60 60 +158 24 52 53 +202 27 44 47 +256 20 73 79 +327 26 55 59 +407 5 46 45 +500 0 75 59 +564 0 44 62 +654 0 38 42 +485 25 35 40 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_211.jpg +570 138 108 154 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_130.jpg +106 268 48 56 +378 165 38 56 +528 244 37 40 +239 245 33 32 +324 207 12 19 +681 492 37 54 +614 420 43 66 +470 199 25 38 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_10.jpg +457 293 18 23 +511 285 11 13 +558 280 14 16 +576 336 22 29 +525 375 24 30 +482 365 23 32 +464 330 20 26 +422 353 23 30 +659 366 23 32 +627 350 23 25 +525 319 16 21 +596 290 12 17 +484 297 14 21 +525 276 11 13 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_222.jpg +42 32 106 200 +264 148 104 150 +532 276 52 62 +656 202 72 118 +732 194 78 166 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_251.jpg +108 204 63 66 +223 195 47 56 +50 318 59 59 +49 572 72 73 +265 471 64 80 +378 487 72 67 +212 261 68 75 +316 329 55 71 +375 277 51 54 +374 203 52 50 +406 116 53 53 +501 224 67 81 +563 367 56 61 +639 167 52 76 +902 216 66 69 +900 96 41 63 +748 506 69 76 +733 295 67 87 +658 269 58 75 +510 141 46 63 +484 476 59 69 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_524.jpg +428 67 42 45 +212 36 34 43 +185 159 46 48 +128 237 55 33 +418 317 43 40 +562 312 41 36 +612 359 45 44 +677 543 25 57 +171 450 47 47 +916 61 8 10 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_161.jpg +572 166 113 147 +842 86 93 104 +521 155 75 94 +368 183 24 37 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_432.jpg +894 146 86 108 +598 2 72 52 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_84.jpg +638 276 164 126 +540 432 112 144 +304 420 150 120 +408 68 122 150 +566 140 132 126 +284 234 142 112 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_632.jpg +798 261 78 108 +532 288 74 96 +356 301 55 89 +272 348 50 86 +90 290 54 72 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_941.jpg +570 190 122 148 +308 228 100 102 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_358.jpg +939 422 24 25 +886 402 20 19 +844 411 21 23 +801 407 18 24 +771 392 20 20 +712 371 20 24 +672 391 19 21 +625 379 18 22 +575 393 16 20 +544 381 18 20 +534 420 16 19 +497 424 18 20 +490 384 16 19 +473 435 17 18 +456 431 14 13 +433 404 17 14 +430 431 11 14 +400 426 14 13 +389 423 14 16 +361 425 18 18 +339 445 18 18 +310 440 15 18 +283 439 17 18 +248 435 17 18 +268 366 17 19 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_312.jpg +492 62 98 124 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_590.jpg +382 97 108 125 +396 27 62 68 +158 23 43 48 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_66.jpg +15 135 27 48 +109 129 26 47 +322 192 26 51 +321 106 38 50 +189 86 32 48 +397 105 35 46 +447 170 36 45 +647 127 32 48 +698 197 28 57 +473 125 32 45 +818 177 20 40 +173 149 22 42 +0 164 14 50 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_451.jpg +255 357 445 585 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_74.jpg +600 272 204 208 +294 296 168 170 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_250.jpg +667 79 102 119 +72 168 21 25 +133 186 16 21 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_626.jpg +771 339 107 138 +984 255 40 91 +968 164 56 64 +863 152 25 39 +576 150 57 84 +510 164 32 38 +314 167 36 44 +156 175 71 95 +88 180 33 46 +0 398 124 152 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_21.jpg +9 72 28 39 +75 63 35 45 +114 57 37 45 +72 127 75 100 +207 107 53 62 +316 94 53 73 +452 56 43 59 +459 149 109 134 +433 35 24 37 +404 51 24 34 +740 206 130 169 +616 56 54 71 +579 16 29 33 +729 38 33 44 +765 62 51 66 +877 13 34 47 +921 36 42 53 +946 66 50 65 +151 82 52 65 +466 21 29 35 +499 48 30 27 +581 51 30 45 +502 79 50 73 +806 19 27 36 +665 28 26 36 +726 13 36 31 +985 36 34 34 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_585.jpg +462 56 77 78 +113 168 43 51 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_132.jpg +32 12 188 192 +470 80 160 192 +734 158 142 186 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_761.jpg +650 56 124 166 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_822.jpg +589 270 79 94 +468 54 80 103 +336 265 79 95 +200 266 15 22 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_902.jpg +432 113 111 144 +286 281 60 77 +714 308 62 84 +885 353 48 58 +971 377 39 47 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_42.jpg +242 228 193 248 +185 164 26 32 +563 264 18 29 +518 267 16 27 +226 192 31 40 +442 194 22 38 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_491.jpg +613 232 24 29 +498 213 24 30 +419 238 24 29 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_494.jpg +293 322 64 63 +345 244 57 57 +463 157 33 35 +383 163 34 39 +439 158 22 32 +353 166 21 31 +543 145 25 32 +721 137 28 35 +782 128 33 38 +820 132 24 32 +608 199 25 33 +281 28 43 49 +655 147 20 33 +628 152 24 36 +561 161 26 45 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_81.jpg +80 255 42 48 +189 172 39 51 +784 135 108 141 +696 302 34 46 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_146.jpg +196 116 136 164 +598 156 110 130 +754 302 152 166 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_380.jpg +408 214 142 148 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_208.jpg +487 136 46 73 +379 66 50 69 +520 77 63 35 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_436.jpg +344 176 130 162 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_900.jpg +280 106 212 272 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_525.jpg +356 98 80 98 +196 26 64 86 +608 156 182 234 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_489.jpg +820 63 59 69 +617 146 86 101 +439 240 119 135 +518 17 43 50 +587 2 24 42 +287 22 29 43 +317 12 24 37 +105 20 49 58 +405 70 37 43 +18 178 58 107 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_126.jpg +12 12 395 491 +623 10 374 448 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_684.jpg +456 42 47 62 +211 66 49 55 +787 478 7 9 +988 474 7 6 +955 457 6 7 +294 512 7 9 +322 520 4 6 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_477.jpg +876 297 65 81 +875 212 47 58 +848 141 40 54 +832 103 35 47 +726 80 39 43 +432 103 50 62 +870 369 54 84 +54 303 43 103 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_624.jpg +798 253 7 10 +760 261 8 13 +557 285 18 24 +508 251 6 9 +145 148 47 58 +406 254 6 7 +343 248 6 8 +284 256 6 8 +252 253 6 10 +101 264 7 8 +61 260 5 7 +40 271 7 9 +959 251 8 9 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_310.jpg +100 212 64 76 +284 188 54 84 +468 198 60 70 +654 188 58 76 +834 218 60 68 +758 188 90 112 +542 154 114 116 +358 206 114 138 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_506.jpg +356 104 310 418 +# 29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_148.jpg +220 158 182 230 +# 3--Riot/3_Riot_Riot_3_521.jpg +48 236 78 80 +152 234 72 76 +266 202 72 74 +374 242 86 102 +480 304 98 96 +554 178 128 152 +872 170 140 184 +# 3--Riot/3_Riot_Riot_3_604.jpg +76 132 144 158 +240 78 130 174 +704 62 112 180 +492 232 186 304 +# 3--Riot/3_Riot_Riot_3_522.jpg +409 212 27 39 +492 156 14 22 +414 151 12 18 +442 151 9 21 +184 200 25 35 +118 167 23 26 +218 212 21 36 +8 149 17 29 +374 154 9 15 +222 136 9 11 +271 178 7 11 +622 213 33 34 +879 203 31 35 +453 154 10 14 +# 3--Riot/3_Riot_Riot_3_415.jpg +290 237 18 27 +308 281 21 28 +445 224 16 30 +518 270 22 25 +394 268 20 30 +141 211 22 29 +555 263 11 21 +749 258 16 28 +845 419 23 32 +979 360 15 30 +799 240 16 27 +907 216 15 27 +872 396 24 20 +715 236 17 26 +658 246 15 24 +677 461 15 16 +659 304 20 27 +# 3--Riot/3_Riot_Riot_3_318.jpg +190 188 160 168 +718 80 146 156 +86 312 122 154 +410 92 82 100 +# 3--Riot/3_Riot_Riot_3_199.jpg +856 255 14 27 +428 316 29 37 +192 230 26 34 +84 157 18 24 +115 118 15 22 +93 92 9 14 +51 65 6 8 +25 57 5 5 +136 65 4 9 +166 65 6 8 +252 74 7 7 +276 76 5 7 +111 264 33 52 +68 281 36 68 +478 274 22 34 +425 269 20 30 +331 181 17 19 +394 194 19 25 +344 221 14 21 +372 107 8 13 +389 103 10 13 +463 98 7 9 +527 185 14 22 +608 170 10 17 +637 164 6 12 +663 171 10 19 +476 146 14 17 +495 106 8 13 +537 140 14 16 +585 143 9 10 +554 138 12 13 +647 144 10 12 +608 135 8 11 +614 150 11 13 +547 165 10 16 +716 185 13 18 +713 166 13 17 +771 181 13 20 +819 188 13 21 +897 190 13 21 +924 193 14 20 +872 191 11 18 +857 205 12 18 +955 183 15 18 +847 176 12 17 +882 165 11 16 +972 196 12 21 +824 168 10 12 +812 169 10 15 +791 173 16 22 +844 156 9 16 +897 222 17 16 +786 227 10 18 +1011 187 12 23 +979 330 26 42 +757 173 11 18 +# 3--Riot/3_Riot_Riot_3_488.jpg +476 126 94 110 +790 62 90 130 +128 62 98 136 +# 3--Riot/3_Riot_Riot_3_354.jpg +393 240 23 54 +406 161 23 32 +498 176 22 34 +239 151 19 27 +536 103 12 16 +620 99 14 18 +701 104 13 21 +565 138 11 24 +916 109 18 23 +# 3--Riot/3_Riot_Riot_3_101.jpg +561 232 47 61 +726 333 55 65 +891 226 57 65 +792 67 46 57 +426 292 50 53 +167 72 54 34 +# 3--Riot/3_Riot_Riot_3_666.jpg +522 124 156 202 +314 92 148 210 +# 3--Riot/3_Riot_Riot_3_263.jpg +547 174 18 20 +437 163 17 23 +421 164 14 23 +389 163 14 23 +344 140 18 24 +276 141 13 20 +350 172 11 17 +866 223 20 27 +859 213 12 17 +927 213 12 16 +189 124 31 25 +477 167 21 25 +208 142 15 20 +7 131 22 26 +# 3--Riot/3_Riot_Riot_3_506.jpg +186 163 86 108 +446 254 35 47 +# 3--Riot/3_Riot_Riot_3_772.jpg +496 138 168 244 +# 3--Riot/3_Riot_Riot_3_186.jpg +208 37 67 59 +791 139 20 24 +388 44 43 52 +549 35 40 55 +613 0 37 26 +# 3--Riot/3_Riot_Riot_3_542.jpg +393 245 9 11 +368 255 11 16 +356 235 6 9 +382 241 5 8 +298 254 11 14 +256 264 15 19 +577 265 7 14 +595 259 9 13 +542 256 7 10 +576 374 11 11 +144 184 7 11 +128 192 7 7 +103 190 7 10 +76 185 9 12 +20 189 7 10 +54 285 12 23 +72 267 14 23 +104 259 15 17 +143 258 12 18 +124 257 14 19 +48 259 11 14 +5 266 8 21 +14 253 13 15 +864 158 6 7 +964 140 7 11 +900 272 8 10 +945 276 9 12 +838 273 12 16 +830 264 8 12 +805 275 11 17 +794 261 8 10 +786 264 8 11 +769 265 9 11 +760 262 8 9 +745 273 9 10 +698 278 8 12 +723 258 9 14 +708 257 10 11 +689 260 9 11 +658 274 9 8 +671 259 9 11 +638 232 9 11 +626 229 4 7 +632 267 6 11 +614 259 8 12 +928 323 11 26 +1012 270 8 10 +953 265 11 18 +891 264 7 10 +331 212 5 5 +317 212 4 5 +304 209 3 4 +182 190 7 8 +203 189 8 9 +176 259 15 24 +229 257 18 20 +209 244 15 24 +357 266 15 18 +197 341 24 29 +424 394 18 22 +456 398 18 35 +318 274 10 19 +543 279 9 11 +493 270 7 13 +478 273 10 16 +459 260 11 17 +477 250 9 10 +509 252 8 9 +550 230 5 7 +595 226 7 6 +495 238 5 5 +419 268 11 15 +401 276 8 10 +420 249 9 16 +441 246 9 15 +392 232 9 9 +# 3--Riot/3_Riot_Riot_3_436.jpg +260 52 142 208 +# 3--Riot/3_Riot_Riot_3_306.jpg +263 142 63 65 +479 191 57 60 +659 169 71 71 +746 81 63 85 +924 67 59 79 +145 151 22 23 +472 134 35 54 +324 130 16 25 +238 189 29 43 +289 235 35 72 +841 130 12 17 +# 3--Riot/3_Riot_Riot_3_716.jpg +104 66 88 144 +352 138 86 120 +736 80 82 130 +# 3--Riot/3_Riot_Riot_3_790.jpg +207 277 26 30 +245 256 15 19 +262 240 16 19 +328 260 13 16 +431 269 14 16 +511 260 18 20 +485 263 10 14 +812 267 19 24 +786 251 17 20 +929 275 14 15 +987 274 15 17 +345 410 39 54 +# 3--Riot/3_Riot_Riot_3_689.jpg +620 120 124 168 +320 240 112 128 +184 82 120 118 +# 3--Riot/3_Riot_Riot_3_710.jpg +491 177 56 64 +616 71 59 73 +29 184 28 32 +0 193 26 35 +292 82 37 54 +743 12 33 66 +153 199 16 19 +# 3--Riot/3_Riot_Riot_3_393.jpg +727 177 11 16 +672 159 10 18 +442 131 37 45 +542 116 21 31 +787 153 14 18 +814 164 11 15 +853 173 12 16 +168 156 10 14 +223 130 14 19 +344 152 10 16 +330 125 11 22 +125 143 14 16 +195 147 6 9 +# 3--Riot/3_Riot_Riot_3_725.jpg +387 278 25 34 +389 232 21 27 +635 217 42 41 +262 237 22 26 +279 216 19 22 +306 222 21 25 +297 250 19 23 +324 242 17 24 +410 234 17 23 +418 267 20 29 +461 240 23 30 +519 281 21 28 +816 247 23 33 +728 262 22 30 +736 230 18 24 +865 146 12 18 +902 156 10 14 +940 148 13 16 +542 122 16 21 +687 241 20 21 +# 3--Riot/3_Riot_Riot_3_123.jpg +592 478 68 58 +# 3--Riot/3_Riot_Riot_3_963.jpg +237 80 525 680 +# 3--Riot/3_Riot_Riot_3_993.jpg +280 50 106 142 +544 28 92 138 +556 346 102 150 +250 358 112 170 +# 3--Riot/3_Riot_Riot_3_958.jpg +864 17 34 47 +925 77 20 31 +782 41 23 28 +824 49 19 24 +773 44 16 26 +706 28 27 37 +666 54 19 26 +522 44 24 36 +500 32 22 30 +607 47 15 22 +14 9 37 43 +109 33 24 30 +404 23 34 53 +337 33 19 30 +372 32 25 32 +237 11 24 35 +315 37 18 27 +306 28 16 27 +206 27 24 25 +137 4 25 33 +71 2 22 31 +53 24 25 33 +565 122 61 58 +183 27 21 25 +399 19 25 34 +950 62 29 35 +972 20 21 29 +# 3--Riot/3_Riot_Riot_3_1037.jpg +70 152 78 76 +104 204 68 61 +300 154 31 47 +602 181 29 36 +680 135 30 46 +717 155 26 26 +770 173 29 38 +56 493 52 67 +672 410 46 59 +9 710 84 93 +973 211 34 41 +320 449 8 12 +332 445 8 10 +279 453 9 13 +420 453 11 14 +448 454 8 9 +434 445 9 11 +359 899 205 125 +# 3--Riot/3_Riot_Riot_3_273.jpg +174 97 45 58 +400 99 39 52 +306 235 26 35 +238 323 28 34 +349 336 20 30 +379 323 16 22 +103 419 37 53 +462 384 45 55 +65 173 19 22 +# 3--Riot/3_Riot_Riot_3_750.jpg +526 124 336 402 +# 3--Riot/3_Riot_Riot_3_166.jpg +451 345 45 53 +# 3--Riot/3_Riot_Riot_3_405.jpg +300 232 228 276 +62 490 192 224 +694 2 210 204 +# 3--Riot/3_Riot_Riot_3_137.jpg +489 320 38 58 +661 360 42 58 +921 351 60 65 +976 320 20 35 +924 312 29 35 +789 318 17 28 +777 315 14 25 +547 302 11 13 +528 310 9 13 +461 306 18 27 +994 277 9 9 +979 284 7 10 +962 274 7 7 +901 328 15 21 +749 305 9 15 +761 288 9 15 +672 300 15 17 +588 289 7 13 +784 298 22 26 +809 296 10 11 +277 365 36 44 +337 385 49 66 +402 335 28 41 +349 324 18 25 +110 409 28 46 +141 404 20 44 +60 386 39 40 +233 319 11 14 +338 330 9 13 +373 316 10 12 +18 311 9 9 +25 328 10 12 +26 354 14 25 +152 332 8 11 +154 346 13 17 +144 349 12 13 +134 340 8 10 +966 309 10 14 +947 279 7 9 +950 288 7 13 +1009 288 8 10 +265 382 22 34 +190 316 8 7 +0 329 6 13 +573 298 6 8 +566 295 5 6 +717 313 10 16 +259 341 19 22 +252 329 16 23 +814 341 22 37 +748 292 11 14 +654 348 34 50 +# 3--Riot/3_Riot_Riot_3_26.jpg +424 374 31 40 +717 386 32 48 +674 354 36 32 +790 341 23 29 +756 430 14 31 +853 385 19 43 +359 224 25 33 +470 213 22 27 +561 235 25 35 +677 229 24 31 +649 212 21 30 +660 62 21 32 +540 97 19 25 +508 73 22 29 +467 75 19 29 +358 69 25 35 +297 50 22 31 +246 91 27 31 +193 43 19 30 +204 210 30 36 +253 224 26 42 +285 212 17 27 +429 222 19 23 +234 350 32 53 +257 331 27 34 +277 354 27 41 +322 355 28 42 +416 347 31 42 +338 419 24 42 +216 442 11 26 +93 356 26 36 +55 197 28 30 +51 246 31 33 +22 104 15 21 +849 32 19 32 +894 49 20 26 +1003 44 14 21 +939 160 20 38 +989 188 25 35 +790 219 22 28 +903 160 21 29 +763 41 17 23 +818 8 19 28 +941 3 20 20 +976 22 15 20 +645 0 23 20 +791 67 22 27 +861 345 27 33 +866 412 23 39 +940 406 22 44 +955 371 27 35 +958 414 24 46 +130 434 21 24 +726 357 23 35 +644 344 19 34 +778 393 27 32 +981 410 11 37 +782 367 28 36 +# 3--Riot/3_Riot_Riot_3_184.jpg +608 177 20 25 +691 169 20 30 +729 173 19 23 +753 160 23 33 +916 152 35 44 +968 171 31 39 +68 66 45 63 +212 84 32 57 +127 164 29 40 +# 3--Riot/3_Riot_Riot_3_438.jpg +836 209 42 39 +653 145 17 22 +451 151 18 23 +216 163 15 29 +273 189 8 12 +295 194 6 10 +# 3--Riot/3_Riot_Riot_3_765.jpg +695 340 106 146 +100 134 195 286 +# 3--Riot/3_Riot_Riot_3_480.jpg +164 56 106 168 +502 140 96 152 +750 242 76 92 +# 3--Riot/3_Riot_Riot_3_322.jpg +243 14 120 156 +# 3--Riot/3_Riot_Riot_3_106.jpg +670 176 194 212 +# 30--Surgeons/30_Surgeons_Surgeons_30_107.jpg +263 372 238 205 +673 314 212 222 +# 30--Surgeons/30_Surgeons_Surgeons_30_491.jpg +244 74 250 274 +530 120 338 236 +# 30--Surgeons/30_Surgeons_Surgeons_30_862.jpg +441 186 195 267 +# 30--Surgeons/30_Surgeons_Surgeons_30_819.jpg +357 21 54 77 +465 23 45 63 +579 39 52 73 +# 30--Surgeons/30_Surgeons_Surgeons_30_256.jpg +670 466 116 136 +690 200 166 100 +436 34 122 120 +268 256 150 88 +322 434 146 142 +# 30--Surgeons/30_Surgeons_Surgeons_30_708.jpg +340 226 88 138 +668 194 108 158 +# 30--Surgeons/30_Surgeons_Surgeons_30_533.jpg +495 257 51 67 +# 30--Surgeons/30_Surgeons_Surgeons_30_988.jpg +324 168 535 766 +# 30--Surgeons/30_Surgeons_Surgeons_30_77.jpg +394 252 164 240 +112 108 260 184 +694 12 262 192 +766 252 168 178 +# 30--Surgeons/30_Surgeons_Surgeons_30_63.jpg +226 196 130 148 +448 48 172 180 +# 30--Surgeons/30_Surgeons_Surgeons_30_95.jpg +365 200 53 67 +561 155 49 61 +593 191 58 88 +758 227 54 103 +# 30--Surgeons/30_Surgeons_Surgeons_30_554.jpg +766 214 44 56 +847 226 53 64 +477 219 47 63 +336 252 64 66 +202 224 70 98 +643 335 67 95 +# 30--Surgeons/30_Surgeons_Surgeons_30_264.jpg +201 63 144 234 +483 246 123 183 +730 117 132 198 +# 30--Surgeons/30_Surgeons_Surgeons_30_8.jpg +306 182 58 74 +412 166 68 100 +588 188 62 76 +706 136 54 82 +# 30--Surgeons/30_Surgeons_Surgeons_30_749.jpg +358 32 92 102 +# 30--Surgeons/30_Surgeons_Surgeons_30_932.jpg +379 137 277 373 +# 30--Surgeons/30_Surgeons_Surgeons_30_115.jpg +79 256 43 57 +199 278 60 61 +311 277 70 90 +477 307 71 90 +560 207 86 81 +847 257 77 150 +# 30--Surgeons/30_Surgeons_Surgeons_30_40.jpg +396 136 174 226 +# 30--Surgeons/30_Surgeons_Surgeons_30_43.jpg +660 268 230 238 +304 96 242 272 +8 2 214 250 +# 30--Surgeons/30_Surgeons_Surgeons_30_696.jpg +490 187 213 310 +# 30--Surgeons/30_Surgeons_Surgeons_30_746.jpg +152 40 110 188 +632 68 134 150 +854 164 90 178 +# 30--Surgeons/30_Surgeons_Surgeons_30_397.jpg +200 170 102 122 +476 128 100 140 +680 186 104 114 +# 30--Surgeons/30_Surgeons_Surgeons_30_914.jpg +215 147 512 693 +# 30--Surgeons/30_Surgeons_Surgeons_30_525.jpg +438 80 84 106 +620 98 58 80 +218 156 92 182 +# 30--Surgeons/30_Surgeons_Surgeons_30_482.jpg +356 350 408 509 +# 30--Surgeons/30_Surgeons_Surgeons_30_122.jpg +257 268 25 46 +279 305 33 40 +522 288 40 45 +738 274 38 39 +852 432 32 30 +# 30--Surgeons/30_Surgeons_Surgeons_30_555.jpg +138 37 55 68 +228 119 48 69 +321 46 49 64 +421 36 51 66 +537 48 58 75 +# 30--Surgeons/30_Surgeons_Surgeons_30_486.jpg +328 141 397 538 +# 30--Surgeons/30_Surgeons_Surgeons_30_552.jpg +214 257 27 44 +384 224 32 37 +228 325 34 22 +# 30--Surgeons/30_Surgeons_Surgeons_30_705.jpg +121 382 27 41 +545 323 23 35 +# 30--Surgeons/30_Surgeons_Surgeons_30_490.jpg +290 333 496 734 +# 30--Surgeons/30_Surgeons_Surgeons_30_343.jpg +806 82 78 100 +694 32 72 88 +540 44 54 64 +334 0 62 84 +186 142 78 108 +# 30--Surgeons/30_Surgeons_Surgeons_30_722.jpg +124 97 46 59 +168 27 54 76 +455 139 49 53 +507 39 55 74 +810 91 56 74 +856 29 65 87 +# 30--Surgeons/30_Surgeons_Surgeons_30_861.jpg +309 261 456 580 +# 30--Surgeons/30_Surgeons_Surgeons_30_840.jpg +316 328 355 493 +# 30--Surgeons/30_Surgeons_Surgeons_30_778.jpg +193 62 39 53 +320 92 46 60 +677 149 50 71 +785 166 47 65 +# 30--Surgeons/30_Surgeons_Surgeons_30_979.jpg +326 206 433 613 +# 30--Surgeons/30_Surgeons_Surgeons_30_160.jpg +516 170 62 94 +790 250 58 60 +298 114 52 110 +# 30--Surgeons/30_Surgeons_Surgeons_30_911.jpg +574 408 148 236 +# 30--Surgeons/30_Surgeons_Surgeons_30_823.jpg +448 132 88 138 +662 140 86 100 +946 246 74 86 +# 30--Surgeons/30_Surgeons_Surgeons_30_865.jpg +682 4 118 120 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_932.jpg +472 288 100 146 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_304.jpg +396 158 236 332 +680 110 256 320 +86 120 216 282 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_667.jpg +280 105 72 130 +704 127 82 97 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_769.jpg +430 76 142 180 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_740.jpg +408 100 88 112 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_162.jpg +453 333 243 320 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_21.jpg +316 96 164 230 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_93.jpg +146 218 112 148 +588 74 118 162 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_613.jpg +826 216 54 64 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_484.jpg +334 160 116 166 +570 56 122 188 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_339.jpg +400 42 128 196 +576 80 124 176 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_818.jpg +436 173 77 105 +183 129 111 145 +144 136 30 38 +721 148 68 70 +576 72 73 86 +644 95 57 68 +774 66 59 67 +665 52 45 51 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_788.jpg +457 0 62 62 +717 218 30 84 +249 186 33 64 +169 195 41 67 +107 229 32 62 +902 132 27 37 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_227.jpg +260 90 148 192 +619 213 145 204 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_43.jpg +300 50 108 158 +658 26 114 172 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_200.jpg +158 60 124 164 +678 50 120 162 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_327.jpg +453 344 112 171 +304 349 99 141 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_215.jpg +304 154 120 176 +582 60 136 192 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_212.jpg +582 162 124 102 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_34.jpg +420 57 168 267 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_230.jpg +386 206 122 116 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_927.jpg +420 222 237 300 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_118.jpg +366 84 170 210 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_225.jpg +206 86 132 152 +714 60 120 176 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_220.jpg +242 74 114 166 +678 74 116 158 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_465.jpg +272 236 122 176 +658 102 130 172 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_276.jpg +199 259 564 805 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_742.jpg +491 229 41 49 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_420.jpg +41 221 20 27 +122 297 16 24 +147 267 14 19 +186 292 14 26 +194 290 21 23 +239 316 20 26 +280 320 8 21 +334 301 20 30 +374 302 21 26 +409 236 18 26 +419 220 17 26 +465 183 29 42 +492 221 21 24 +537 207 18 27 +535 232 21 26 +549 219 25 38 +583 225 28 28 +651 223 29 32 +691 243 27 32 +695 211 25 28 +869 314 22 25 +890 313 20 30 +916 214 28 38 +1020 240 4 30 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_572.jpg +663 117 145 194 +311 65 149 197 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_726.jpg +288 94 142 180 +700 66 136 202 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_214.jpg +168 44 116 154 +334 84 120 134 +828 58 126 172 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_858.jpg +392 57 126 168 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_410.jpg +464 80 122 176 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_683.jpg +552 92 132 150 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_847.jpg +385 95 240 296 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_685.jpg +524 62 80 108 +828 58 84 110 +150 50 78 104 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_722.jpg +469 72 155 213 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_267.jpg +458 122 150 198 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_188.jpg +571 344 96 144 +304 229 99 125 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_195.jpg +183 57 147 198 +617 91 143 206 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_517.jpg +354 112 124 180 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_842.jpg +343 134 175 247 +743 574 175 244 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_176.jpg +116 52 254 316 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_111.jpg +192 128 156 190 +690 76 124 182 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_358.jpg +802 185 84 65 +500 290 67 93 +310 284 60 70 +175 318 36 57 +136 260 19 25 +683 368 24 27 +609 384 21 33 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_888.jpg +293 217 179 239 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_373.jpg +80 6 302 328 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_720.jpg +574 24 200 260 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_351.jpg +871 79 47 62 +299 176 141 175 +144 149 18 37 +844 159 10 20 +160 155 22 29 +# 31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_915.jpg +466 186 114 176 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_812.jpg +388 72 88 130 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_529.jpg +481 268 65 102 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_932.jpg +277 89 76 108 +957 783 53 67 +99 781 66 76 +70 850 72 62 +80 910 75 84 +246 790 50 63 +276 859 71 95 +357 832 61 67 +369 772 48 60 +520 808 53 76 +645 930 55 66 +685 775 53 65 +809 796 65 86 +860 783 46 67 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_987.jpg +766 52 54 80 +612 58 56 80 +270 212 60 78 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_101.jpg +202 26 98 126 +576 110 92 142 +776 512 104 144 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_723.jpg +299 83 51 71 +432 143 49 93 +169 0 57 40 +6 202 42 86 +782 49 39 77 +936 134 47 90 +498 185 40 84 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_135.jpg +316 87 327 442 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_357.jpg +206 188 64 88 +718 404 82 112 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_658.jpg +490 410 15 17 +552 425 16 14 +618 442 17 17 +680 449 16 18 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_944.jpg +424 356 74 108 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_624.jpg +255 337 109 162 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_42.jpg +381 234 168 237 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_692.jpg +202 66 112 154 +440 132 70 118 +686 128 54 88 +862 154 66 82 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_530.jpg +614 164 118 154 +262 110 110 146 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_44.jpg +513 263 221 328 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_870.jpg +483 305 72 109 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_494.jpg +286 181 463 642 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_169.jpg +390 84 230 340 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_786.jpg +477 522 167 233 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_468.jpg +352 86 104 166 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_788.jpg +452 180 162 236 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_594.jpg +656 160 62 82 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_516.jpg +45 24 76 94 +226 132 29 27 +257 110 34 46 +297 118 44 61 +368 147 17 23 +418 122 31 39 +518 101 40 48 +678 160 17 24 +728 145 25 24 +757 97 42 54 +845 120 29 30 +878 144 35 43 +960 53 64 84 +279 320 48 32 +343 318 47 31 +479 350 39 22 +574 363 45 27 +672 329 48 34 +763 338 59 37 +591 126 31 29 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_1039.jpg +580 311 208 275 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_134.jpg +465 333 96 138 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_512.jpg +424 75 93 139 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_209.jpg +736 136 80 98 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_1038.jpg +431 180 119 132 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_462.jpg +440 178 84 106 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_860.jpg +352 330 364 542 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_400.jpg +697 182 47 52 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_443.jpg +516 24 404 662 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_170.jpg +474 576 134 104 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_110.jpg +698 55 94 100 +490 333 42 39 +169 242 17 20 +204 139 18 14 +107 193 19 15 +455 12 20 26 +281 152 20 10 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_262.jpg +401 401 142 192 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_26.jpg +202 257 36 38 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_204.jpg +396 232 130 226 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_408.jpg +281 309 91 130 +443 128 77 111 +712 329 82 105 +528 723 96 128 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_116.jpg +210 72 86 114 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_738.jpg +285 324 123 168 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_68.jpg +405 258 323 426 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_434.jpg +448 152 234 329 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_90.jpg +380 226 121 186 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_595.jpg +397 182 39 45 +# 32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_566.jpg +658 354 60 84 +# 33--Running/33_Running_Running_33_547.jpg +117 44 57 54 +255 87 49 47 +363 107 48 49 +452 54 51 52 +523 112 51 58 +622 141 47 48 +743 69 55 56 +938 177 13 14 +7 138 5 7 +# 33--Running/33_Running_Running_33_332.jpg +592 38 142 216 +# 33--Running/33_Running_Running_33_17.jpg +477 208 47 73 +# 33--Running/33_Running_Running_33_107.jpg +458 160 35 50 +598 189 32 46 +744 167 35 50 +# 33--Running/33_Running_Running_33_760.jpg +422 341 107 138 +# 33--Running/33_Running_Running_33_490.jpg +614 178 74 86 +# 33--Running/33_Running_Running_33_786.jpg +283 228 13 20 +128 376 27 35 +75 381 17 28 +505 134 31 28 +703 132 29 23 +847 139 6 6 +900 123 14 16 +922 135 7 8 +952 115 14 16 +57 240 4 8 +113 248 4 8 +138 248 5 8 +520 238 30 35 +707 249 19 15 +889 341 12 18 +943 354 14 15 +975 343 10 16 +450 339 9 12 +514 343 7 9 +550 338 9 13 +314 374 17 21 +50 467 11 14 +90 459 13 14 +141 463 14 15 +248 458 8 8 +343 466 9 12 +502 465 35 36 +732 462 30 37 +916 448 22 35 +706 338 29 48 +335 255 15 19 +# 33--Running/33_Running_Running_33_119.jpg +287 164 51 65 +555 145 47 74 +894 183 58 70 +# 33--Running/33_Running_Running_33_771.jpg +419 332 116 148 +332 132 106 142 +# 33--Running/33_Running_Running_33_316.jpg +855 111 15 25 +829 111 9 18 +894 120 13 14 +915 132 13 18 +856 197 47 68 +758 146 51 70 +423 148 37 48 +680 65 10 11 +369 139 18 14 +220 163 45 62 +267 163 33 56 +326 173 22 33 +354 168 38 58 +402 180 44 62 +527 222 38 46 +628 181 42 62 +413 106 13 19 +430 121 14 22 +472 132 9 13 +573 129 15 21 +542 123 22 23 +52 111 8 17 +100 159 20 17 +184 131 14 22 +215 101 15 21 +289 101 15 19 +325 103 17 19 +153 187 41 49 +603 124 7 10 +789 123 14 16 +666 131 12 19 +693 150 19 29 +718 138 14 15 +479 231 33 36 +# 33--Running/33_Running_Running_33_569.jpg +492 110 396 538 +# 33--Running/33_Running_Running_33_341.jpg +510 249 70 107 +# 33--Running/33_Running_Running_33_286.jpg +481 292 48 70 +# 33--Running/33_Running_Running_33_891.jpg +418 122 158 198 +# 33--Running/33_Running_Running_33_475.jpg +617 149 44 69 +# 33--Running/33_Running_Running_33_577.jpg +475 192 333 267 +213 267 187 163 +101 237 131 117 +# 33--Running/33_Running_Running_33_209.jpg +596 144 114 142 +# 33--Running/33_Running_Running_33_35.jpg +638 278 98 161 +# 33--Running/33_Running_Running_33_538.jpg +696 42 44 51 +# 33--Running/33_Running_Running_33_747.jpg +764 182 11 14 +# 33--Running/33_Running_Running_33_517.jpg +89 95 71 72 +337 141 45 61 +429 71 38 56 +505 141 45 62 +556 116 36 40 +694 139 51 62 +872 120 60 74 +# 33--Running/33_Running_Running_33_44.jpg +612 76 50 72 +753 39 55 78 +# 33--Running/33_Running_Running_33_586.jpg +78 127 23 37 +375 63 77 89 +600 175 17 24 +329 148 24 25 +# 33--Running/33_Running_Running_33_266.jpg +1 163 37 53 +41 166 24 34 +49 130 38 43 +184 175 41 47 +127 200 35 36 +69 229 49 33 +93 270 49 50 +239 217 43 62 +287 203 35 45 +252 155 37 42 +391 135 35 40 +428 137 32 35 +480 147 25 41 +509 171 45 49 +485 244 46 65 +379 216 44 41 +362 257 45 66 +609 127 44 50 +663 195 48 43 +792 87 44 58 +886 139 39 38 +914 146 11 1 +748 216 40 43 +810 208 47 40 +876 193 43 60 +769 270 47 59 +650 333 46 24 +923 99 29 52 +328 167 30 36 +157 148 24 37 +989 212 32 45 +# 33--Running/33_Running_Running_33_411.jpg +503 18 43 68 +# 33--Running/33_Running_Running_33_203.jpg +365 96 42 56 +# 34--Baseball/34_Baseball_Baseball_34_756.jpg +724 518 52 67 +# 34--Baseball/34_Baseball_Baseball_34_560.jpg +579 133 24 32 +# 34--Baseball/34_Baseball_Baseball_34_608.jpg +315 354 348 483 +# 34--Baseball/34_Baseball_Baseball_34_350.jpg +590 332 17 19 +604 273 18 22 +653 274 18 20 +234 118 12 15 +218 95 10 14 +189 136 11 16 +171 125 11 14 +120 140 11 13 +86 114 12 15 +70 104 11 13 +11 138 10 15 +20 102 10 15 +18 7 9 13 +28 25 10 14 +66 11 10 14 +97 0 9 11 +119 9 10 15 +133 23 10 17 +172 7 12 14 +184 32 11 15 +442 139 11 15 +541 134 10 15 +424 110 13 19 +495 130 12 15 +485 122 11 13 +482 93 11 15 +552 158 11 14 +609 155 9 15 +593 129 12 15 +610 94 12 14 +675 153 11 16 +661 134 10 13 +676 117 11 15 +678 101 10 15 +524 87 13 14 +801 150 11 15 +859 152 11 14 +938 154 12 11 +742 134 12 17 +839 118 11 14 +896 119 10 13 +962 118 10 15 +783 97 11 13 +740 83 12 15 +859 92 9 13 +907 96 11 15 +973 101 11 15 +360 31 9 12 +408 29 10 13 +394 9 12 15 +345 6 10 11 +439 37 12 14 +452 11 10 15 +507 28 10 15 +562 29 10 14 +598 31 11 16 +551 6 11 14 +613 30 12 14 +683 22 11 15 +665 5 10 12 +759 20 11 14 +736 9 11 14 +799 24 9 16 +800 5 10 14 +855 14 11 14 +998 58 10 13 +895 29 10 14 +910 5 10 16 +955 27 10 15 +# 34--Baseball/34_Baseball_Baseball_34_600.jpg +392 94 114 139 +# 34--Baseball/34_Baseball_Baseball_34_667.jpg +485 171 108 165 +# 34--Baseball/34_Baseball_Baseball_34_585.jpg +658 88 146 220 +# 34--Baseball/34_Baseball_Baseball_34_391.jpg +58 260 23 32 +# 34--Baseball/34_Baseball_Baseball_34_436.jpg +394 136 68 78 +480 544 62 74 +# 34--Baseball/34_Baseball_Baseball_34_867.jpg +406 78 292 408 +# 34--Baseball/34_Baseball_Baseball_34_164.jpg +161 381 24 29 +294 357 16 18 +750 310 16 18 +820 281 14 16 +# 34--Baseball/34_Baseball_Baseball_34_895.jpg +248 325 453 408 +# 34--Baseball/34_Baseball_Baseball_34_828.jpg +308 206 78 150 +442 118 98 134 +552 94 88 130 +# 34--Baseball/34_Baseball_Baseball_34_886.jpg +736 78 108 164 +# 34--Baseball/34_Baseball_Baseball_34_580.jpg +433 67 177 258 +# 34--Baseball/34_Baseball_Baseball_34_171.jpg +420 82 94 98 +# 34--Baseball/34_Baseball_Baseball_34_143.jpg +202 304 13 16 +66 141 7 9 +674 211 7 11 +171 71 3 5 +868 58 5 7 +# 34--Baseball/34_Baseball_Baseball_34_66.jpg +481 143 75 88 +320 201 43 57 +877 294 40 49 +# 34--Baseball/34_Baseball_Baseball_34_356.jpg +575 245 135 168 +# 34--Baseball/34_Baseball_Baseball_34_127.jpg +551 241 29 45 +# 34--Baseball/34_Baseball_Baseball_34_829.jpg +348 529 201 396 +# 34--Baseball/34_Baseball_Baseball_34_622.jpg +670 284 54 80 +888 208 56 88 +# 34--Baseball/34_Baseball_Baseball_34_73.jpg +393 126 35 46 +# 34--Baseball/34_Baseball_Baseball_34_16.jpg +344 82 65 62 +# 35--Basketball/35_Basketball_basketballgame_ball_35_124.jpg +180 33 119 190 +410 15 111 152 +589 35 137 167 +880 25 114 167 +622 273 119 167 +314 271 126 167 +# 35--Basketball/35_Basketball_playingbasketball_35_36.jpg +818 44 74 104 +# 35--Basketball/35_Basketball_playingbasketball_35_134.jpg +314 326 94 114 +# 35--Basketball/35_Basketball_Basketball_35_635.jpg +814 304 35 39 +753 316 32 36 +697 343 31 37 +621 306 34 39 +599 265 34 40 +544 293 29 35 +459 278 29 34 +485 326 27 36 +422 320 28 35 +358 334 30 33 +282 310 32 35 +276 274 32 35 +211 318 29 28 +370 415 27 29 +447 420 33 39 +570 451 31 38 +510 474 30 37 +310 453 28 35 +# 35--Basketball/35_Basketball_basketballgame_ball_35_391.jpg +208 48 86 120 +370 90 86 112 +534 64 88 126 +702 76 92 132 +# 35--Basketball/35_Basketball_playingbasketball_35_730.jpg +419 164 336 422 +# 35--Basketball/35_Basketball_basketballgame_ball_35_662.jpg +62 41 27 34 +36 117 24 28 +64 101 38 60 +111 66 31 38 +113 111 31 40 +208 146 39 59 +228 122 33 43 +287 136 34 39 +243 66 36 41 +305 96 34 41 +378 69 32 39 +361 140 35 37 +393 189 26 38 +411 146 31 38 +429 13 30 36 +480 76 54 80 +492 0 27 18 +558 36 24 31 +595 43 53 72 +680 125 56 58 +659 3 27 31 +683 48 28 39 +708 80 34 42 +737 123 37 68 +760 3 26 32 +805 44 31 32 +798 120 52 74 +841 141 27 45 +0 175 32 37 +170 1 31 34 +282 1 28 30 +0 0 18 28 +# 35--Basketball/35_Basketball_playingbasketball_35_350.jpg +488 148 156 184 +210 142 132 202 +# 35--Basketball/35_Basketball_basketballgame_ball_35_133.jpg +332 74 176 240 +736 62 198 320 +118 42 162 206 +700 48 120 188 +# 35--Basketball/35_Basketball_playingbasketball_35_585.jpg +627 188 194 329 +240 218 218 283 +# 35--Basketball/35_Basketball_Basketball_35_185.jpg +295 437 60 63 +712 475 52 92 +454 637 53 57 +607 720 20 30 +# 35--Basketball/35_Basketball_basketballgame_ball_35_858.jpg +566 222 92 102 +778 506 98 38 +# 35--Basketball/35_Basketball_playingbasketball_35_431.jpg +446 147 46 68 +# 35--Basketball/35_Basketball_playingbasketball_35_794.jpg +750 88 44 59 +831 138 33 48 +555 123 33 42 +512 136 30 39 +424 177 21 27 +440 198 20 30 +389 145 20 24 +380 172 19 23 +284 179 29 31 +321 164 23 29 +265 150 18 25 +242 181 20 32 +97 159 31 35 +0 169 24 44 +189 166 14 22 +149 17 17 27 +379 194 18 26 +68 202 18 23 +# 35--Basketball/35_Basketball_basketballgame_ball_35_565.jpg +119 389 8 8 +534 292 9 8 +550 278 8 9 +511 257 8 8 +701 213 12 12 +663 220 10 11 +751 190 10 11 +553 434 14 19 +14 392 7 9 +14 427 8 9 +95 397 7 10 +101 411 9 10 +189 414 9 12 +946 156 13 14 +978 146 12 15 +1005 143 15 16 +1006 195 14 19 +879 237 14 16 +908 234 14 16 +913 253 14 18 +937 235 18 21 +976 197 13 16 +954 246 14 19 +975 263 18 25 +962 281 20 26 +898 292 21 24 +882 354 20 26 +951 353 19 28 +926 389 11 29 +827 384 20 26 +867 431 25 34 +876 464 18 22 +50 407 6 8 +56 402 6 7 +67 401 5 5 +74 404 11 14 +112 398 6 9 +131 388 5 6 +115 426 6 9 +122 427 4 8 +99 444 13 21 +127 430 9 13 +137 449 13 20 +170 460 19 16 +195 456 14 19 +223 449 12 22 +234 457 14 17 +254 458 22 22 +277 461 17 22 +158 391 8 12 +194 431 11 12 +203 414 11 12 +241 398 9 11 +316 424 10 14 +385 411 11 17 +389 397 10 13 +383 394 11 12 +332 374 10 14 +337 359 13 15 +350 351 8 11 +377 349 8 11 +362 370 11 10 +375 339 6 8 +364 333 7 10 +378 327 7 8 +347 332 7 9 +324 335 8 8 +313 380 10 10 +305 371 6 9 +287 345 8 8 +243 381 9 13 +232 379 7 8 +390 313 8 9 +392 278 6 9 +216 345 5 4 +224 358 7 7 +434 304 7 10 +451 291 8 8 +483 256 8 9 +493 262 8 8 +484 295 7 9 +497 284 8 12 +506 287 9 11 +499 299 10 11 +417 337 14 13 +429 327 5 8 +431 319 5 7 +448 322 8 11 +467 317 11 13 +442 370 13 14 +402 415 15 17 +443 415 12 17 +471 384 14 14 +489 399 14 19 +499 367 12 18 +494 358 9 10 +499 343 8 10 +514 360 15 12 +519 339 11 13 +540 328 12 15 +567 313 12 14 +557 343 11 11 +567 339 11 14 +579 246 9 9 +638 227 9 9 +629 269 11 15 +615 298 11 14 +616 230 9 10 +615 283 13 12 +660 257 11 13 +671 265 12 12 +684 253 7 12 +661 278 12 11 +661 293 12 13 +659 310 13 13 +646 327 10 17 +614 346 12 19 +649 350 12 16 +689 308 11 16 +707 313 12 19 +722 294 10 14 +714 251 11 15 +704 250 8 11 +763 250 10 14 +785 233 11 16 +800 259 13 19 +814 273 16 25 +777 266 12 16 +732 310 15 22 +792 317 16 17 +798 336 17 21 +697 351 15 19 +701 389 23 27 +779 385 22 29 +800 428 22 29 +786 471 24 32 +146 398 7 8 +152 368 6 7 +165 369 7 8 +198 387 9 11 +199 353 7 9 +211 349 5 8 +236 365 8 7 +182 366 8 8 +275 369 7 9 +291 370 9 10 +260 360 7 8 +306 353 7 7 +316 351 7 9 +316 366 9 11 +244 341 7 7 +249 331 7 7 +264 339 6 7 +304 342 6 7 +279 305 5 6 +300 299 5 6 +288 301 5 6 +306 316 5 6 +280 327 6 6 +332 310 7 8 +333 292 7 8 +337 347 8 9 +371 310 7 9 +366 286 7 7 +309 300 7 7 +298 354 6 7 +298 335 6 8 +271 320 6 7 +516 434 16 20 +532 447 17 25 +579 424 16 23 +591 474 19 25 +607 430 17 21 +700 486 24 20 +552 242 7 9 +573 244 8 8 +591 236 7 8 +602 238 7 8 +724 282 11 13 +785 186 9 11 +828 228 10 14 +847 178 10 12 +868 218 13 16 +878 171 12 14 +914 162 14 15 +841 278 13 15 +858 272 12 14 +888 250 12 16 +858 251 13 18 +115 379 5 6 +106 388 7 9 +139 372 6 8 +412 274 5 6 +443 268 6 8 +406 294 6 7 +0 422 6 9 +3 396 6 9 +# 35--Basketball/35_Basketball_playingbasketball_35_78.jpg +429 478 64 78 +637 497 64 75 +176 307 62 74 +177 1031 74 71 +187 569 88 76 +576 761 65 70 +# 35--Basketball/35_Basketball_playingbasketball_35_476.jpg +314 12 96 152 +590 82 100 154 +# 35--Basketball/35_Basketball_Basketball_35_180.jpg +684 165 28 38 +413 397 34 48 +610 355 33 45 +219 175 32 40 +283 187 33 43 +336 189 26 39 +385 193 28 38 +441 178 27 38 +476 216 28 45 +529 155 27 39 +602 166 30 38 +# 35--Basketball/35_Basketball_playingbasketball_35_127.jpg +170 8 17 19 +320 57 26 37 +424 87 13 17 +497 117 34 40 +534 172 35 42 +599 195 46 54 +640 37 29 41 +762 15 16 25 +852 5 23 25 +889 66 21 32 +15 66 15 19 +91 20 15 19 +403 6 22 17 +# 35--Basketball/35_Basketball_Basketball_35_209.jpg +51 244 7 10 +66 271 9 10 +171 190 20 28 +10 263 11 12 +50 220 7 10 +86 240 7 9 +419 110 32 42 +500 135 32 47 +898 181 20 26 +613 236 8 12 +834 263 9 13 +321 250 6 8 +18 234 6 9 +183 249 6 9 +176 276 6 9 +75 192 5 6 +23 205 4 6 +73 183 4 5 +22 184 5 6 +98 210 5 6 +359 272 5 7 +290 225 4 5 +336 215 3 5 +300 203 4 5 +278 214 4 5 +277 199 4 5 +743 270 7 9 +761 240 6 9 +117 269 7 8 +714 265 7 10 +86 267 7 9 +665 262 8 11 +127 273 7 10 +152 270 5 7 +110 239 6 8 +79 236 5 6 +138 238 4 5 +19 210 7 8 +79 228 5 7 +76 210 5 7 +109 214 6 7 +134 230 6 7 +153 229 7 8 +162 240 5 7 +147 260 6 7 +112 253 6 7 +174 239 6 7 +184 235 7 9 +164 215 5 6 +152 199 5 6 +127 200 5 6 +49 189 5 8 +117 198 4 6 +289 269 6 8 +339 267 6 8 +316 226 4 7 +263 206 5 5 +302 214 4 6 +333 200 4 6 +267 228 5 6 +979 224 13 16 +928 183 6 8 +952 191 6 7 +999 168 6 7 +1009 168 6 8 +842 225 7 8 +840 194 7 7 +968 220 8 11 +1019 213 5 8 +730 239 5 5 +675 240 7 10 +667 231 6 5 +714 235 6 8 +722 239 6 8 +987 168 6 9 +959 172 6 7 +954 146 6 7 +930 159 6 7 +865 184 5 5 +899 149 6 6 +# 35--Basketball/35_Basketball_playingbasketball_35_682.jpg +551 168 245 327 +# 35--Basketball/35_Basketball_playingbasketball_35_523.jpg +156 54 106 136 +# 35--Basketball/35_Basketball_playingbasketball_35_362.jpg +622 375 18 20 +537 406 23 26 +492 230 23 22 +705 465 13 14 +888 428 16 20 +18 523 11 14 +46 523 10 13 +98 528 8 11 +74 527 8 9 +117 528 6 9 +142 524 10 10 +172 524 7 9 +149 506 6 9 +97 486 5 8 +26 497 6 5 +40 504 9 10 +243 526 9 10 +186 505 6 7 +268 523 6 7 +281 518 6 8 +299 519 6 8 +317 521 8 9 +292 526 8 9 +373 529 6 9 +432 529 5 7 +51 466 6 7 +71 474 5 8 +92 475 7 8 +113 475 6 7 +137 487 6 6 +1016 478 7 9 +206 488 12 14 +791 496 7 7 +# 35--Basketball/35_Basketball_playingbasketball_35_582.jpg +679 108 40 60 +513 138 49 44 +427 123 36 48 +# 35--Basketball/35_Basketball_Basketball_35_529.jpg +266 212 29 38 +376 180 28 37 +440 165 28 42 +504 161 32 45 +564 176 29 43 +639 174 30 43 +697 214 29 40 +766 238 28 37 +# 35--Basketball/35_Basketball_playingbasketball_35_612.jpg +357 141 147 183 +544 444 162 231 +# 35--Basketball/35_Basketball_basketballgame_ball_35_937.jpg +714 134 106 148 +544 88 108 154 +426 162 90 132 +212 186 96 128 +# 35--Basketball/35_Basketball_basketballgame_ball_35_290.jpg +472 174 36 51 +409 252 43 58 +57 202 21 27 +4 318 13 18 +53 286 6 8 +35 268 7 10 +9 268 10 12 +9 258 8 10 +8 243 6 9 +36 235 6 8 +40 224 6 7 +6 225 5 8 +15 211 6 10 +13 202 6 6 +23 204 7 9 +38 196 5 7 +27 176 5 8 +2 192 7 8 +22 192 6 7 +60 235 7 10 +81 191 6 7 +89 198 5 8 +104 213 5 8 +111 225 5 6 +112 234 7 8 +125 286 7 10 +138 302 8 12 +153 309 6 11 +144 327 10 14 +108 206 6 7 +132 190 5 7 +114 200 8 9 +127 213 6 7 +123 225 5 7 +131 233 5 8 +150 243 7 9 +159 228 6 6 +181 238 6 7 +164 242 5 7 +164 253 8 8 +183 256 6 8 +121 275 7 7 +124 253 6 9 +147 267 6 10 +154 281 6 8 +161 280 6 7 +161 296 7 11 +171 291 5 7 +197 223 5 7 +186 225 5 6 +197 183 5 9 +152 181 5 7 +132 208 5 9 +144 210 6 6 +4 151 3 8 +24 154 4 6 +56 157 4 8 +69 170 4 6 +83 172 3 5 +109 175 3 6 +122 166 4 8 +130 168 4 7 +113 157 5 7 +88 140 5 7 +71 144 5 6 +61 138 5 6 +141 172 4 4 +121 136 2 6 +208 232 5 6 +201 250 6 4 +179 249 7 6 +215 239 5 9 +228 237 6 7 +229 249 5 7 +197 259 15 20 +228 281 6 7 +271 294 7 8 +260 261 5 8 +241 260 5 8 +273 252 5 8 +296 240 5 9 +264 222 4 6 +285 239 4 7 +463 324 6 9 +470 318 7 9 +444 329 7 9 +392 256 4 7 +365 238 5 6 +353 239 4 6 +395 241 4 6 +579 330 8 10 +638 117 34 48 +907 135 31 43 +766 230 14 22 +608 339 6 11 +627 329 5 8 +625 347 5 9 +618 353 6 9 +610 357 5 9 +# 35--Basketball/35_Basketball_playingbasketball_35_199.jpg +586 202 260 228 +# 35--Basketball/35_Basketball_playingbasketball_35_495.jpg +545 402 88 78 +217 460 15 21 +290 451 13 21 +347 475 15 21 +361 424 14 17 +203 432 14 21 +124 442 14 21 +48 454 16 18 +61 408 15 16 +145 438 12 18 +184 459 10 16 +2 448 12 16 +1 420 9 16 +705 474 16 24 +680 438 13 18 +654 481 14 22 +781 489 23 16 +857 484 12 13 +848 507 14 18 +894 510 16 22 +167 633 18 22 +108 629 17 20 +135 672 17 23 +232 637 17 25 +260 604 17 24 +287 580 15 20 +305 630 17 23 +207 608 15 22 +220 586 14 21 +167 568 17 24 +104 573 15 20 +44 568 19 22 +285 672 17 23 +288 704 17 21 +243 707 19 20 +175 706 17 24 +225 743 20 27 +286 741 20 23 +342 758 60 68 +263 773 20 28 +288 815 18 26 +342 737 16 23 +350 715 16 21 +180 923 23 36 +203 968 26 29 +172 1116 26 35 +235 1157 22 32 +250 1102 25 32 +0 779 97 75 +522 153 15 21 +630 160 13 20 +699 183 13 19 +606 125 18 20 +608 63 15 22 +798 83 15 14 +767 452 17 16 +1004 447 14 17 +710 622 16 22 +740 599 15 20 +788 645 15 19 +818 657 16 21 +830 630 15 19 +844 604 13 19 +874 629 14 21 +870 659 14 21 +919 673 14 17 +942 630 19 22 +934 603 16 20 +992 609 15 21 +981 639 14 16 +988 694 16 21 +934 695 16 22 +802 726 17 21 +726 657 16 21 +690 692 14 21 +696 717 18 23 +592 712 19 23 +588 685 17 23 +692 752 17 25 +739 754 17 21 +793 757 18 19 +728 812 19 26 +733 789 17 24 +676 822 19 25 +660 856 19 24 +720 850 22 25 +770 859 19 22 +782 817 20 26 +835 796 16 21 +861 764 16 20 +906 816 19 23 +944 792 18 21 +935 730 14 20 +961 739 17 22 +867 724 20 24 +997 757 18 24 +951 830 16 21 +974 844 15 15 +946 864 17 19 +971 897 18 24 +1008 898 16 24 +1015 832 9 23 +737 1058 24 35 +740 991 23 32 +669 986 21 30 +685 959 22 29 +1002 1095 22 31 +766 896 18 25 +721 905 19 19 +652 798 23 28 +883 690 15 21 +701 31 16 23 +867 91 12 18 +678 86 13 20 +832 73 12 17 +237 478 17 22 +1007 1192 17 39 +# 35--Basketball/35_Basketball_basketballgame_ball_35_197.jpg +44 95 62 86 +247 97 60 70 +260 186 60 79 +447 157 58 74 +629 153 56 82 +701 168 49 78 +720 206 66 95 +821 272 67 77 +800 15 54 72 +25 37 49 59 +2 0 53 31 +# 35--Basketball/35_Basketball_playingbasketball_35_556.jpg +374 507 189 257 +# 35--Basketball/35_Basketball_basketballgame_ball_35_256.jpg +228 71 53 67 +358 153 33 44 +366 65 35 41 +467 162 46 57 +527 57 66 93 +676 266 29 48 +721 240 38 55 +706 201 28 39 +780 109 30 41 +854 196 28 42 +957 52 25 36 +927 117 23 34 +963 170 30 45 +761 355 48 57 +754 324 45 49 +867 325 45 55 +886 369 42 54 +936 354 44 59 +988 340 36 58 +# 35--Basketball/35_Basketball_playingbasketball_35_823.jpg +452 85 111 128 +# 35--Basketball/35_Basketball_Basketball_35_754.jpg +498 109 19 21 +385 119 21 17 +196 334 9 15 +924 295 14 15 +485 24 14 10 +627 16 12 13 +238 227 14 10 +# 35--Basketball/35_Basketball_basketballgame_ball_35_82.jpg +437 192 51 64 +284 257 49 56 +990 1 34 78 +801 408 21 28 +770 409 23 26 +704 310 22 30 +650 407 19 24 +605 414 16 21 +566 415 18 22 +634 379 13 16 +787 382 16 20 +785 329 14 21 +740 329 13 17 +647 321 16 18 +755 276 13 18 +803 275 10 13 +741 272 11 14 +734 304 15 17 +583 408 11 14 +620 401 13 16 +387 421 15 21 +381 405 16 16 +111 371 15 19 +32 375 13 18 +51 368 13 18 +77 403 10 14 +107 298 13 18 +88 317 15 17 +10 289 14 18 +32 288 10 14 +43 264 11 11 +154 305 11 16 +207 295 12 18 +210 404 15 19 +11 389 17 17 +47 221 14 17 +29 207 13 16 +78 207 10 16 +104 226 9 13 +114 212 13 16 +142 230 13 16 +172 229 12 13 +185 250 11 14 +171 271 12 16 +139 267 12 17 +206 264 9 11 +240 241 12 16 +252 228 9 14 +249 188 10 14 +197 184 10 14 +154 203 13 16 +294 194 12 16 +342 222 13 18 +87 179 13 14 +48 177 13 15 +36 321 14 18 +104 151 10 14 +115 166 10 13 +158 170 11 11 +193 158 11 15 +220 148 11 15 +252 169 11 14 +260 142 11 16 +239 121 13 15 +235 104 13 14 +237 84 13 13 +201 96 13 12 +192 121 13 14 +153 109 10 13 +166 99 13 14 +123 107 12 13 +108 127 13 14 +130 73 12 13 +39 160 12 14 +63 135 10 14 +34 138 11 13 +29 118 12 13 +44 94 11 15 +52 71 10 14 +89 74 13 16 +100 34 11 13 +57 41 11 14 +5 82 11 14 +14 62 11 14 +28 22 12 14 +141 47 9 14 +198 53 10 13 +161 44 11 14 +266 6 10 14 +225 23 11 14 +188 3 8 11 +244 55 10 14 +227 66 9 11 +87 94 11 15 +64 110 11 14 +0 142 9 18 +654 250 11 16 +706 257 13 14 +726 234 10 14 +763 221 13 15 +688 218 13 16 +726 214 12 17 +723 196 11 15 +642 195 12 17 +682 194 12 16 +611 216 12 18 +646 216 13 15 +638 234 10 12 +593 230 10 13 +605 194 11 14 +615 174 11 13 +555 185 12 14 +513 189 13 14 +516 175 13 15 +437 155 13 14 +411 192 11 15 +396 212 11 15 +420 222 12 15 +400 155 13 14 +392 136 12 13 +437 137 11 14 +496 120 13 14 +466 115 11 14 +499 96 10 13 +538 124 9 13 +570 122 10 13 +629 149 14 16 +568 174 13 13 +673 152 14 17 +662 139 14 17 +699 134 13 16 +634 122 11 13 +601 145 12 16 +733 142 13 14 +684 100 12 14 +755 120 10 13 +778 113 12 15 +800 95 13 16 +726 96 11 13 +769 135 13 15 +842 109 12 15 +798 211 13 14 +813 235 13 16 +779 231 11 16 +750 36 10 12 +782 28 13 15 +631 38 12 13 +590 14 9 13 +536 35 12 13 +617 6 12 14 +403 121 12 16 +426 121 11 16 +397 100 12 16 +605 125 11 14 +366 21 9 11 +380 7 8 11 +# 35--Basketball/35_Basketball_playingbasketball_35_606.jpg +948 384 11 15 +891 394 9 11 +838 405 7 10 +807 414 7 8 +782 416 6 7 +914 443 7 7 +982 440 9 8 +852 431 6 9 +697 422 6 6 +657 424 5 6 +618 425 5 6 +576 424 5 6 +541 426 5 4 +499 425 5 6 +433 425 5 7 +460 425 5 6 +382 193 22 26 +195 206 28 28 +25 382 17 19 +77 389 18 17 +242 404 11 14 +363 427 6 7 +822 428 5 8 +292 450 5 8 +241 443 5 6 +398 441 4 5 +416 442 4 4 +# 35--Basketball/35_Basketball_playingbasketball_35_651.jpg +716 342 90 126 +638 344 82 118 +176 374 76 112 +236 394 116 114 +# 35--Basketball/35_Basketball_playingbasketball_35_209.jpg +372 193 413 519 +# 35--Basketball/35_Basketball_playingbasketball_35_156.jpg +402 438 104 80 +480 332 56 74 +812 380 80 100 +# 35--Basketball/35_Basketball_playingbasketball_35_644.jpg +334 192 186 229 +# 35--Basketball/35_Basketball_playingbasketball_35_73.jpg +488 368 56 66 +796 376 54 70 +# 35--Basketball/35_Basketball_basketballgame_ball_35_80.jpg +890 68 45 55 +810 76 51 53 +492 102 104 119 +382 21 40 50 +293 15 40 46 +166 1 45 56 +258 134 47 64 +359 106 44 55 +167 235 36 50 +65 212 45 51 +92 186 36 49 +138 155 31 37 +34 113 35 47 +4 161 43 53 +1 231 29 62 +2 361 41 57 +429 199 33 45 +904 375 38 50 +690 206 28 41 +973 299 27 40 +# 35--Basketball/35_Basketball_playingbasketball_35_276.jpg +512 194 118 88 +438 58 62 96 +# 35--Basketball/35_Basketball_Basketball_35_361.jpg +73 163 25 34 +164 128 24 32 +165 248 26 38 +263 126 25 33 +300 251 24 34 +363 129 26 34 +405 248 26 32 +477 116 26 36 +515 244 29 35 +597 121 24 33 +653 254 24 32 +698 116 27 35 +777 242 26 32 +825 115 27 33 +870 238 29 35 +909 130 30 43 +# 35--Basketball/35_Basketball_playingbasketball_35_764.jpg +492 247 28 43 +303 190 30 44 +# 35--Basketball/35_Basketball_Basketball_35_664.jpg +752 247 16 18 +646 285 21 17 +790 303 16 14 +675 326 21 15 +885 381 10 10 +889 393 9 12 +38 327 6 11 +46 324 8 10 +22 319 9 10 +137 357 5 9 +121 337 5 7 +153 331 5 8 +101 308 6 8 +# 35--Basketball/35_Basketball_basketballgame_ball_35_64.jpg +870 59 22 34 +953 186 71 113 +904 173 61 90 +692 223 73 87 +806 186 39 45 +590 216 69 87 +476 227 64 80 +294 240 65 81 +126 295 57 69 +32 246 59 65 +99 200 43 54 +189 216 32 33 +241 248 31 40 +722 96 39 32 +655 108 34 39 +593 78 27 35 +491 157 34 39 +523 92 26 32 +440 110 29 37 +397 149 25 31 +414 113 22 30 +466 65 21 28 +536 19 24 30 +389 69 24 30 +340 154 29 31 +234 193 30 34 +271 128 27 31 +245 167 26 31 +148 141 28 30 +217 102 24 35 +138 110 24 30 +96 130 25 34 +180 72 25 34 +30 150 30 34 +415 17 20 19 +449 0 25 32 +624 10 25 25 +208 10 18 22 +157 3 18 25 +267 6 17 24 +296 0 23 19 +92 42 22 28 +23 76 21 28 +46 42 20 24 +565 198 32 41 +296 95 28 31 +55 194 22 27 +0 292 42 71 +336 0 20 20 +# 35--Basketball/35_Basketball_basketballgame_ball_35_412.jpg +660 156 70 98 +854 202 64 102 +62 184 80 102 +# 35--Basketball/35_Basketball_Basketball_35_737.jpg +653 234 38 92 +693 251 53 79 +847 19 89 111 +769 18 81 109 +395 101 8 9 +433 102 6 10 +56 37 71 83 +230 314 26 36 +202 339 30 25 +391 264 6 11 +957 435 18 22 +939 468 20 21 +# 35--Basketball/35_Basketball_playingbasketball_35_566.jpg +384 158 108 156 +582 46 114 156 +# 35--Basketball/35_Basketball_basketballgame_ball_35_208.jpg +250 218 132 174 +606 218 108 152 +618 4 98 84 +# 35--Basketball/35_Basketball_playingbasketball_35_377.jpg +562 659 56 56 +# 35--Basketball/35_Basketball_basketballgame_ball_35_287.jpg +68 304 5 7 +85 308 5 8 +66 324 7 9 +87 319 5 6 +81 330 6 6 +72 319 5 7 +4 296 6 9 +1 324 6 11 +21 353 7 10 +66 336 6 11 +62 358 7 10 +69 370 8 10 +55 366 8 12 +73 392 9 9 +59 385 9 14 +157 409 7 14 +201 399 7 11 +246 401 8 12 +186 441 11 12 +217 446 10 13 +239 429 11 16 +258 436 11 13 +276 425 10 14 +298 429 9 14 +306 414 6 8 +312 304 5 7 +325 313 5 8 +282 302 5 7 +311 324 5 7 +297 326 5 6 +261 321 4 7 +228 324 6 7 +291 340 7 7 +320 357 5 9 +333 349 5 7 +312 339 7 9 +312 438 7 12 +339 300 4 6 +353 298 5 5 +396 307 4 7 +416 305 4 7 +433 303 5 7 +437 325 6 7 +427 329 5 7 +433 342 6 7 +403 326 5 7 +401 315 5 6 +390 325 5 9 +382 318 4 6 +352 332 6 7 +373 336 5 6 +385 351 6 7 +410 345 6 6 +445 363 5 9 +425 380 7 9 +430 365 5 8 +432 397 6 10 +423 355 10 22 +398 361 6 8 +386 372 6 7 +367 346 5 10 +362 356 7 7 +344 347 6 7 +344 357 6 8 +379 384 5 8 +374 394 8 8 +355 384 6 7 +341 369 7 8 +337 377 7 7 +519 297 4 5 +506 306 5 6 +479 295 5 6 +453 306 4 6 +450 331 6 9 +445 306 5 8 +648 294 4 6 +650 312 4 7 +634 294 5 6 +631 301 4 7 +612 296 4 7 +602 305 4 6 +606 296 3 5 +591 294 5 6 +572 295 4 5 +563 299 6 8 +574 313 5 8 +587 320 5 6 +614 318 4 7 +621 319 3 6 +624 330 5 7 +625 318 3 6 +609 326 4 7 +610 409 9 12 +716 358 4 8 +700 356 4 6 +661 375 7 10 +688 388 6 9 +707 387 7 9 +702 402 6 9 +671 403 7 8 +696 414 8 11 +651 403 7 10 +663 413 8 10 +635 410 8 10 +649 450 9 15 +659 431 9 12 +686 443 9 12 +775 291 4 7 +766 309 4 6 +749 309 4 7 +720 324 6 9 +698 325 5 8 +686 339 6 9 +707 345 6 8 +729 342 5 9 +736 335 5 7 +751 333 5 8 +777 355 6 7 +749 350 5 7 +760 364 6 8 +765 372 8 10 +736 388 7 11 +770 387 10 15 +725 416 8 11 +725 401 7 10 +785 394 8 10 +821 406 6 8 +813 421 9 10 +798 427 8 12 +803 453 10 15 +789 305 5 7 +789 296 4 6 +807 307 6 6 +806 317 5 7 +771 334 6 7 +797 348 6 9 +804 336 6 8 +823 367 6 7 +831 292 4 7 +831 304 6 8 +853 305 5 6 +855 299 3 4 +873 294 5 7 +878 306 4 5 +893 304 4 8 +917 305 5 6 +919 317 5 6 +898 325 5 7 +894 317 4 5 +845 344 6 8 +870 364 8 9 +871 349 4 6 +889 356 6 8 +895 390 8 11 +943 294 5 7 +957 293 4 7 +972 296 3 7 +936 303 5 8 +920 328 6 6 +909 310 4 6 +922 336 6 8 +938 337 5 7 +966 333 5 11 +965 326 6 7 +986 326 6 7 +989 332 7 8 +986 347 5 9 +970 360 4 5 +951 345 4 8 +943 350 5 5 +949 357 6 9 +922 353 7 10 +928 369 7 10 +1002 366 6 10 +895 343 6 7 +930 381 6 8 +954 382 6 9 +979 384 6 8 +975 377 5 7 +1007 381 7 11 +953 398 7 9 +931 400 10 14 +994 406 9 11 +950 423 9 13 +1001 426 7 10 +972 451 9 14 +990 375 5 9 +890 447 11 15 +901 440 8 10 +875 355 7 8 +897 380 8 10 +793 361 8 10 +822 334 5 8 +760 391 10 16 +206 462 10 12 +575 335 5 6 +282 326 4 6 +# 35--Basketball/35_Basketball_playingbasketball_35_491.jpg +337 122 227 307 +# 35--Basketball/35_Basketball_basketballgame_ball_35_389.jpg +505 227 35 64 +62 582 6 11 +28 595 7 13 +16 586 4 9 +18 611 7 14 +85 618 8 15 +90 578 6 13 +137 588 6 12 +158 590 6 11 +153 622 11 14 +223 610 8 14 +185 596 5 9 +139 569 5 10 +113 572 6 12 +102 572 5 9 +112 556 5 8 +264 601 6 11 +256 625 10 14 +322 613 9 17 +288 606 7 12 +315 596 5 10 +22 561 7 14 +54 568 6 10 +66 560 6 11 +351 606 6 11 +344 564 7 12 +311 572 9 12 +303 559 6 12 +235 555 5 11 +237 619 6 12 +203 569 6 10 +208 556 5 10 +415 614 6 12 +503 584 7 14 +548 613 7 11 +545 630 8 9 +567 618 10 21 +625 587 14 27 +464 565 5 8 +471 580 7 16 +680 598 6 11 +685 622 7 13 +703 629 12 9 +148 574 6 11 +# 35--Basketball/35_Basketball_Basketball_35_712.jpg +959 253 20 24 +907 242 24 26 +844 215 24 30 +784 233 22 25 +718 256 23 28 +655 253 23 29 +583 243 25 26 +523 256 24 26 +431 255 22 24 +365 239 23 27 +296 237 23 26 +235 239 23 23 +164 221 24 28 +101 228 24 25 +40 249 20 25 +# 35--Basketball/35_Basketball_playingbasketball_35_588.jpg +593 152 66 89 +613 477 106 208 +# 35--Basketball/35_Basketball_playingbasketball_35_3.jpg +281 16 71 104 +513 101 50 62 +812 114 75 90 +# 35--Basketball/35_Basketball_playingbasketball_35_279.jpg +386 1005 275 123 +# 35--Basketball/35_Basketball_basketballgame_ball_35_904.jpg +262 122 51 70 +321 116 58 54 +449 118 55 60 +530 147 65 48 +551 117 52 61 +# 35--Basketball/35_Basketball_playingbasketball_35_782.jpg +116 12 63 83 +227 9 58 72 +285 39 65 95 +422 23 60 90 +762 5 74 72 +470 72 63 90 +114 331 86 118 +521 276 90 116 +507 322 107 146 +927 403 79 114 +215 575 97 120 +# 35--Basketball/35_Basketball_basketballgame_ball_35_393.jpg +418 299 12 18 +664 291 10 19 +677 286 11 16 +683 296 9 14 +713 289 12 19 +702 300 13 18 +780 290 13 16 +856 282 15 20 +118 227 9 9 +110 237 7 8 +96 230 7 9 +70 226 8 9 +71 240 8 9 +78 251 9 11 +65 268 8 9 +80 281 7 11 +126 313 9 11 +95 314 8 9 +49 252 8 9 +20 240 8 9 +36 256 7 8 +47 282 8 11 +15 271 8 10 +11 287 8 9 +15 299 8 12 +3 327 9 13 +24 328 9 11 +52 333 10 11 +83 333 9 11 +110 333 8 11 +132 330 8 11 +166 292 12 17 +# 35--Basketball/35_Basketball_Basketball_35_684.jpg +890 32 120 202 +# 35--Basketball/35_Basketball_Basketball_35_549.jpg +724 378 48 53 +685 378 40 55 +# 35--Basketball/35_Basketball_playingbasketball_35_632.jpg +326 520 49 31 +130 446 16 20 +459 336 35 22 +911 483 15 16 +# 35--Basketball/35_Basketball_basketballgame_ball_35_446.jpg +466 307 120 179 +# 35--Basketball/35_Basketball_playingbasketball_35_283.jpg +646 354 171 96 +# 35--Basketball/35_Basketball_Basketball_35_449.jpg +79 409 11 15 +288 388 12 17 +493 243 22 33 +563 425 8 11 +855 397 11 15 +905 416 9 11 +982 409 8 12 +# 35--Basketball/35_Basketball_Basketball_35_327.jpg +187 182 18 27 +251 170 19 26 +324 184 18 26 +384 177 22 29 +435 163 20 28 +465 177 22 34 +412 234 54 74 +491 177 17 24 +538 153 28 40 +599 160 18 31 +637 171 18 27 +664 153 29 34 +737 153 23 35 +763 143 29 40 +790 169 30 42 +828 149 27 35 +915 139 37 59 +970 181 28 42 +101 164 16 28 +508 194 18 21 +981 159 25 32 +563 160 16 28 +83 162 25 40 +24 176 25 33 +# 35--Basketball/35_Basketball_basketballgame_ball_35_827.jpg +372 387 222 327 +715 892 258 399 +# 35--Basketball/35_Basketball_playingbasketball_35_113.jpg +477 169 51 55 +577 125 44 47 +# 35--Basketball/35_Basketball_basketballgame_ball_35_513.jpg +616 149 169 235 +227 86 191 244 +# 35--Basketball/35_Basketball_playingbasketball_35_11.jpg +481 139 87 139 +# 35--Basketball/35_Basketball_basketballgame_ball_35_681.jpg +452 20 92 126 +# 35--Basketball/35_Basketball_playingbasketball_35_248.jpg +172 894 24 25 +286 857 12 13 +145 859 16 19 +154 959 30 35 +396 833 31 40 +493 930 29 32 +651 864 20 20 +816 863 19 17 +624 905 41 50 +27 895 31 38 +# 35--Basketball/35_Basketball_basketballgame_ball_35_736.jpg +388 128 70 136 +480 186 62 96 +716 142 72 128 +# 35--Basketball/35_Basketball_basketballgame_ball_35_153.jpg +419 225 167 219 +709 290 151 204 +89 207 133 225 +86 15 126 123 +# 35--Basketball/35_Basketball_playingbasketball_35_19.jpg +115 299 30 57 +520 250 33 26 +# 35--Basketball/35_Basketball_basketballgame_ball_35_542.jpg +320 257 38 55 +551 252 43 57 +86 256 44 55 +# 35--Basketball/35_Basketball_playingbasketball_35_795.jpg +83 36 23 29 +137 110 20 30 +220 73 24 30 +198 128 11 15 +39 16 10 15 +28 67 5 9 +37 67 6 8 +316 93 4 6 +325 90 3 5 +338 90 3 5 +317 81 4 4 +411 89 4 4 +408 80 3 4 +366 82 7 10 +393 48 8 7 +557 53 42 61 +845 86 41 59 +53 268 64 88 +402 395 23 35 +9 458 26 26 +609 519 12 16 +483 499 15 19 +575 558 6 8 +442 539 5 7 +665 553 6 9 +682 562 7 8 +692 551 6 8 +770 552 7 10 +785 551 7 9 +793 521 6 9 +784 518 6 9 +752 526 6 8 +726 522 5 7 +715 525 7 9 +701 522 6 9 +684 524 6 8 +912 437 20 33 +851 595 12 16 +937 572 10 14 +984 533 9 12 +575 533 6 9 +595 499 5 6 +622 498 5 7 +341 35 6 11 +# 35--Basketball/35_Basketball_playingbasketball_35_252.jpg +560 3 34 39 +599 64 48 60 +469 77 59 67 +104 19 44 55 +452 12 43 57 +# 35--Basketball/35_Basketball_basketballgame_ball_35_423.jpg +392 98 298 440 +# 35--Basketball/35_Basketball_playingbasketball_35_195.jpg +1012 399 11 13 +210 275 15 27 +55 404 9 10 +114 410 10 9 +152 404 9 12 +303 406 9 9 +461 395 15 16 +505 387 11 11 +359 401 7 10 +432 403 7 10 +538 404 8 10 +583 385 15 17 +737 401 8 8 +775 405 8 9 +780 391 9 9 +711 400 9 10 +971 387 12 18 +557 399 14 17 +617 426 9 11 +253 405 4 7 +135 416 10 12 +344 397 8 8 +290 435 9 11 +389 431 6 8 +# 35--Basketball/35_Basketball_playingbasketball_35_555.jpg +382 10 86 112 +526 50 100 102 +# 35--Basketball/35_Basketball_Basketball_35_653.jpg +872 33 55 73 +734 48 57 70 +599 28 59 68 +460 76 56 69 +369 54 58 74 +268 49 56 68 +99 38 53 71 +90 276 59 69 +209 314 46 69 +380 301 53 63 +536 299 58 69 +653 276 54 64 +813 283 51 66 +921 278 61 76 +# 35--Basketball/35_Basketball_Basketball_35_107.jpg +41 199 75 117 +419 58 128 186 +619 450 24 35 +609 518 36 56 +691 547 48 40 +931 443 33 39 +851 386 26 35 +892 381 27 31 +949 373 26 33 +992 483 32 82 +743 481 27 42 +947 489 41 69 +667 446 30 37 +858 354 20 25 +818 410 22 24 +748 379 18 22 +788 348 15 25 +734 350 19 26 +712 360 15 23 +853 301 21 30 +816 336 26 30 +1007 343 16 29 +1010 383 14 26 +1011 431 13 40 +727 232 20 25 +838 217 20 27 +845 135 17 22 +917 122 17 23 +881 92 15 20 +818 93 15 21 +786 162 14 23 +690 200 19 27 +615 110 16 20 +311 166 15 17 +263 170 13 18 +231 119 16 20 +291 123 14 18 +336 125 15 18 +202 329 16 22 +165 288 16 24 +94 161 21 17 +229 358 19 25 +682 509 37 49 +320 66 14 20 +221 70 14 17 +273 71 12 16 +167 72 14 15 +780 226 18 21 +741 196 17 23 +798 187 16 22 +795 387 22 29 +775 99 17 18 +683 81 13 17 +867 218 17 22 +957 357 18 20 +365 88 14 18 +210 103 13 17 +320 99 13 15 +198 45 12 17 +258 34 12 16 +293 42 10 14 +339 31 12 16 +389 32 11 16 +146 45 10 13 +630 82 12 16 +746 80 11 14 +746 47 13 16 +749 12 11 13 +788 12 13 15 +675 18 14 16 +923 362 22 27 +772 406 17 21 +982 155 16 23 +658 254 17 19 +270 5 15 17 +297 14 14 17 +# 35--Basketball/35_Basketball_playingbasketball_35_251.jpg +432 99 84 180 +# 35--Basketball/35_Basketball_basketballgame_ball_35_276.jpg +17 0 58 53 +236 0 56 48 +18 125 40 65 +67 184 58 62 +277 160 56 81 +492 119 55 77 +432 50 48 45 +671 120 66 89 +961 34 52 75 +940 151 61 76 +# 35--Basketball/35_Basketball_playingbasketball_35_619.jpg +199 282 16 20 +82 278 12 14 +781 281 14 20 +609 285 14 14 +576 260 17 26 +449 274 22 24 +526 178 21 28 +# 35--Basketball/35_Basketball_Basketball_35_304.jpg +706 250 25 39 +765 269 33 36 +861 342 60 69 +# 35--Basketball/35_Basketball_Basketball_35_457.jpg +938 320 15 17 +606 151 36 39 +410 336 7 12 +380 327 8 15 +304 364 6 9 +16 393 9 11 +69 385 9 10 +50 383 9 8 +40 360 8 7 +42 351 8 8 +94 381 7 8 +126 388 8 10 +171 387 8 6 +136 387 7 7 +127 373 7 5 +125 381 6 7 +159 374 5 6 +213 386 5 7 +194 371 6 7 +213 360 5 6 +248 393 6 7 +282 377 5 8 +269 364 6 7 +234 351 5 7 +81 347 7 8 +# 35--Basketball/35_Basketball_basketballgame_ball_35_341.jpg +240 240 105 175 +501 190 125 163 +653 25 125 183 +# 35--Basketball/35_Basketball_playingbasketball_35_405.jpg +576 308 16 14 +826 295 16 25 +358 312 18 16 +343 316 13 17 +979 453 45 190 +# 35--Basketball/35_Basketball_basketballgame_ball_35_375.jpg +83 567 7 16 +871 518 9 18 +994 535 11 19 +1017 636 7 18 +960 493 6 12 +102 562 9 17 +987 481 11 18 +994 471 10 11 +19 77 5 8 +42 77 5 7 +66 78 5 6 +84 79 6 7 +104 84 3 5 +123 83 5 6 +122 96 4 5 +125 112 4 5 +106 112 5 7 +106 91 5 7 +86 98 5 5 +82 108 6 7 +69 98 4 5 +56 95 4 5 +31 94 4 6 +9 94 5 5 +30 107 5 6 +7 159 5 6 +22 162 6 7 +53 140 6 6 +69 142 5 6 +34 161 6 7 +37 174 6 5 +75 169 7 6 +93 157 7 8 +111 158 6 8 +119 172 7 8 +101 168 5 6 +86 167 6 7 +58 165 6 7 +97 130 5 7 +81 131 5 6 +63 124 5 6 +1 116 4 9 +0 133 6 9 +155 129 5 5 +156 141 7 7 +163 152 6 7 +163 163 5 8 +147 161 7 9 +134 159 5 9 +142 176 5 7 +155 178 7 7 +132 144 6 8 +137 99 4 5 +143 81 5 7 +160 83 5 7 +27 64 5 6 +55 63 4 5 +54 49 6 6 +36 49 4 5 +89 51 5 7 +79 63 6 8 +96 65 5 6 +110 64 6 8 +112 51 5 7 +131 54 5 6 +134 67 5 6 +145 53 4 6 +148 66 5 6 +161 56 5 6 +182 69 6 7 +179 58 5 5 +165 68 4 6 +174 132 6 7 +180 149 6 8 +179 164 6 7 +123 3 4 7 +102 4 4 6 +127 16 4 6 +108 16 5 7 +119 33 4 6 +104 33 5 6 +86 31 5 6 +68 33 4 6 +52 32 4 6 +55 18 4 5 +65 6 4 5 +93 18 4 6 +77 8 5 10 +150 5 5 5 +161 8 5 5 +156 18 4 7 +158 31 5 7 +174 35 5 7 +137 35 5 7 +141 18 4 7 +259 8 4 5 +246 6 4 6 +230 7 3 6 +215 5 5 7 +196 5 4 6 +182 5 4 6 +191 18 4 7 +210 20 4 5 +229 24 6 7 +245 18 5 6 +177 21 4 6 +188 35 5 7 +207 38 4 5 +221 39 6 6 +206 56 5 6 +254 60 5 6 +256 73 5 7 +256 89 5 7 +224 82 7 8 +204 96 5 8 +235 70 6 7 +235 81 5 9 +350 10 4 5 +358 13 4 5 +373 15 4 6 +392 13 5 7 +378 27 4 5 +360 29 5 5 +350 30 4 5 +335 26 4 7 +322 24 4 5 +301 23 4 5 +280 37 5 7 +300 36 5 7 +314 40 5 6 +352 43 5 6 +341 43 3 5 +329 44 4 5 +382 45 4 5 +391 45 6 7 +406 47 5 7 +395 65 5 7 +380 60 5 7 +363 61 5 8 +351 64 4 7 +331 59 4 7 +293 58 6 7 +313 81 4 7 +331 78 6 8 +354 77 5 7 +372 82 6 8 +404 96 6 9 +385 80 4 6 +382 115 4 8 +369 90 5 12 +362 115 6 7 +348 109 6 8 +335 108 6 10 +350 128 7 8 +339 131 5 8 +320 126 6 10 +308 126 7 8 +291 124 4 7 +315 108 5 7 +297 106 6 7 +280 73 8 10 +289 91 7 9 +269 75 5 7 +272 92 6 6 +270 105 5 7 +239 119 6 9 +244 138 6 7 +263 143 6 7 +283 140 6 7 +299 142 5 7 +221 118 5 7 +199 119 6 6 +197 134 6 7 +214 133 5 8 +224 138 5 6 +194 148 5 7 +214 154 7 7 +230 153 7 9 +250 158 8 8 +273 159 5 7 +289 162 6 8 +291 177 5 7 +272 177 8 9 +256 175 6 8 +238 173 7 8 +219 173 6 7 +202 168 7 8 +187 168 6 7 +173 175 6 7 +196 185 7 9 +221 185 5 7 +238 187 6 8 +251 186 4 7 +256 198 5 7 +279 192 7 9 +303 193 5 9 +328 6 8 8 +409 0 6 5 +409 15 4 5 +440 6 4 6 +446 4 4 6 +454 4 5 6 +470 4 3 6 +463 15 5 6 +445 17 4 5 +422 0 5 4 +463 29 5 6 +442 31 4 4 +422 32 4 5 +412 42 5 7 +427 48 3 3 +451 44 4 6 +473 48 7 6 +448 67 7 7 +416 65 6 8 +337 145 6 7 +363 145 6 10 +396 151 7 11 +412 155 6 7 +428 159 5 7 +451 157 5 8 +468 160 8 11 +473 140 6 6 +478 105 6 9 +448 142 6 8 +427 136 6 8 +414 135 6 9 +430 128 6 10 +429 119 8 11 +458 120 7 8 +456 100 7 9 +435 69 5 7 +477 89 6 6 +534 6 5 6 +523 6 6 7 +522 18 5 6 +505 4 4 4 +493 4 4 5 +500 18 5 7 +301 75 6 8 +272 60 5 6 +321 95 5 6 +275 123 5 6 +259 122 6 7 +324 145 5 6 +428 16 4 5 +487 3 4 5 +485 14 5 6 +496 29 5 6 +475 35 4 6 +495 50 4 5 +415 82 4 6 +428 81 5 6 +498 88 3 5 +510 73 5 7 +496 105 6 8 +27 124 6 8 +26 143 4 9 +32 142 4 7 +633 9 4 6 +615 8 4 5 +592 7 6 7 +577 18 5 6 +593 19 6 7 +615 26 5 8 +622 26 3 5 +626 43 4 5 +612 39 3 6 +561 39 5 6 +578 41 4 5 +593 56 6 7 +604 61 6 7 +627 60 5 6 +622 78 5 7 +597 78 6 7 +581 77 6 7 +561 75 5 6 +567 55 5 7 +554 53 5 7 +534 50 6 7 +526 72 6 8 +542 72 6 8 +518 89 4 6 +541 88 5 9 +556 91 5 8 +576 91 6 9 +574 113 5 7 +559 107 5 9 +543 107 5 7 +518 104 6 9 +570 129 6 7 +557 127 6 7 +531 126 6 8 +506 128 6 8 +490 128 5 6 +318 156 4 5 +312 165 5 6 +318 196 7 8 +360 166 8 9 +379 178 7 8 +397 175 7 7 +414 176 7 8 +410 195 7 10 +378 195 8 7 +345 185 7 9 +320 182 7 9 +312 179 6 8 +330 234 7 5 +639 76 6 8 +661 79 5 8 +598 96 5 7 +613 96 5 7 +635 101 6 6 +651 100 5 6 +638 118 5 9 +612 118 5 7 +594 112 6 9 +592 134 7 8 +610 136 7 8 +635 133 9 8 +659 136 8 9 +651 161 7 8 +625 155 6 9 +612 152 7 9 +594 152 6 8 +574 150 6 9 +568 168 6 10 +584 180 5 7 +609 176 6 8 +625 178 7 9 +650 180 5 9 +606 193 7 8 +559 187 6 8 +390 96 4 5 +441 179 5 8 +459 180 5 8 +493 141 5 7 +507 150 5 8 +527 151 6 7 +541 149 6 8 +549 167 7 8 +518 164 7 9 +501 168 6 8 +483 160 6 9 +575 193 6 9 +535 184 6 9 +521 185 5 8 +494 186 6 8 +480 181 7 9 +526 205 5 9 +543 207 6 8 +568 213 7 9 +587 215 6 9 +558 223 7 8 +558 235 8 9 +523 227 6 9 +501 219 8 10 +434 200 8 9 +451 212 8 12 +420 196 6 7 +478 124 5 7 +469 201 9 8 +623 203 5 7 +614 216 7 9 +629 216 7 10 +611 232 8 8 +636 233 7 10 +646 242 6 7 +657 221 6 8 +682 203 5 10 +696 205 6 7 +674 221 7 12 +685 236 7 11 +705 243 7 11 +540 261 5 10 +587 266 6 12 +550 257 8 9 +525 259 7 11 +592 229 8 9 +645 6 4 5 +671 9 5 7 +679 9 4 6 +699 16 4 4 +682 27 5 5 +679 33 5 7 +679 42 5 7 +672 41 5 7 +660 30 5 7 +643 25 5 8 +647 57 6 8 +661 62 5 5 +682 66 5 7 +702 60 5 8 +703 43 5 7 +724 45 5 6 +746 41 6 9 +741 25 5 6 +729 30 4 6 +703 24 5 7 +720 9 5 7 +740 11 5 7 +731 63 5 8 +749 65 7 7 +738 85 5 7 +717 84 5 7 +701 83 5 7 +679 84 6 7 +738 100 6 10 +720 99 8 9 +696 105 5 6 +715 127 5 8 +695 120 6 8 +677 117 7 8 +680 101 7 8 +757 18 5 5 +755 30 7 7 +783 12 4 7 +801 13 5 8 +818 13 5 8 +840 13 5 6 +803 33 6 11 +781 26 6 8 +778 41 8 9 +766 44 7 10 +763 70 6 9 +763 85 6 8 +831 98 6 7 +853 95 6 9 +851 116 5 8 +831 119 5 7 +928 4 7 8 +948 5 6 8 +979 3 5 8 +1007 17 5 7 +998 39 6 6 +976 24 5 7 +960 21 4 6 +939 18 5 6 +912 18 4 6 +1000 58 5 7 +975 58 6 8 +966 41 4 7 +955 35 6 8 +942 37 5 10 +915 38 5 7 +890 41 5 7 +878 53 6 7 +899 59 5 6 +921 58 6 8 +926 73 6 8 +947 58 5 8 +948 79 6 8 +968 82 6 7 +999 83 5 6 +1017 80 6 11 +1001 103 6 10 +978 104 6 8 +959 100 6 8 +934 101 6 10 +903 100 5 7 +906 79 5 6 +882 74 5 8 +869 69 6 12 +885 100 5 6 +878 118 6 9 +937 122 6 6 +973 122 6 8 +1020 129 2 10 +964 146 6 9 +947 139 6 8 +915 137 7 11 +892 138 5 8 +988 128 5 9 +918 118 6 9 +1018 169 5 9 +995 170 7 8 +972 185 7 9 +964 167 6 10 +940 165 6 9 +952 187 6 9 +935 208 5 10 +917 189 6 7 +908 206 6 8 +917 228 6 9 +948 239 6 10 +896 181 8 11 +892 158 6 8 +861 136 7 8 +863 157 6 8 +845 151 8 9 +822 153 8 7 +817 175 7 8 +840 176 6 8 +864 183 6 7 +861 199 7 9 +882 211 6 8 +865 224 7 9 +886 236 6 7 +832 216 7 9 +823 203 5 9 +800 226 5 7 +775 215 7 9 +775 144 7 12 +806 189 8 10 +1011 47 7 12 +987 84 9 6 +916 161 6 7 +1018 196 5 10 +1007 195 5 9 +1015 260 7 12 +1006 277 8 12 +845 267 8 7 +875 271 7 7 +725 185 7 10 +752 164 8 8 +754 211 6 8 +749 226 8 10 +809 266 8 9 +750 260 6 7 +177 113 6 6 +243 104 6 6 +260 107 5 7 +208 129 4 7 +184 180 7 7 +# 35--Basketball/35_Basketball_basketballgame_ball_35_479.jpg +139 291 5 6 +133 286 4 6 +130 284 3 4 +138 296 4 5 +138 320 4 5 +123 334 4 6 +114 287 4 6 +103 283 3 5 +92 280 4 4 +101 295 4 5 +103 332 4 6 +96 335 5 5 +85 342 6 5 +80 306 4 6 +87 294 3 5 +80 289 4 6 +62 310 3 4 +46 340 5 9 +23 338 4 6 +22 313 4 6 +40 305 4 5 +62 300 5 6 +56 310 4 6 +61 336 6 9 +92 344 4 7 +39 322 4 5 +28 367 4 6 +22 373 5 8 +22 404 4 6 +43 384 5 7 +54 389 5 7 +64 391 4 6 +51 409 5 7 +70 402 4 6 +73 412 6 6 +85 415 6 8 +28 295 3 4 +20 282 3 6 +21 296 2 5 +57 293 4 5 +16 340 5 7 +27 343 4 6 +126 426 7 9 +139 438 5 9 +134 439 5 8 +111 437 6 8 +76 441 6 9 +102 451 5 10 +84 447 4 8 +91 479 8 11 +94 468 6 7 +61 434 7 9 +49 430 6 8 +35 426 6 6 +22 446 7 6 +36 443 5 7 +26 461 5 9 +144 487 5 8 +92 518 10 14 +139 531 8 13 +42 379 5 5 +32 377 5 4 +10 393 5 5 +9 398 5 7 +8 410 5 7 +4 422 3 6 +2 407 4 6 +7 420 4 6 +103 423 5 9 +7 450 6 9 +102 472 8 9 +103 622 19 31 +87 575 7 15 +221 486 6 10 +251 471 4 7 +269 475 5 8 +279 487 4 9 +175 453 6 6 +164 449 5 8 +195 466 5 6 +196 478 5 6 +323 554 8 11 +304 583 11 14 +481 584 11 12 +628 600 5 10 +816 665 9 16 +951 645 8 12 +982 638 9 11 +179 229 3 3 +189 239 2 4 +216 281 3 4 +174 274 4 4 +160 253 3 4 +155 231 3 4 +144 232 3 4 +177 247 4 4 +171 255 4 5 +168 294 5 6 +249 227 4 5 +240 228 3 4 +225 229 2 3 +260 279 4 6 +268 270 3 4 +251 288 4 4 +371 339 4 5 +421 366 5 8 +503 360 4 7 +477 357 4 5 +539 351 4 6 +558 350 3 6 +570 369 5 7 +626 377 4 7 +268 289 2 5 +238 292 4 5 +225 302 4 5 +221 310 5 5 +246 328 3 5 +237 305 5 7 +255 303 4 4 +273 300 4 4 +255 309 4 5 +285 300 3 5 +290 306 3 5 +299 304 4 5 +314 304 4 5 +319 295 4 5 +308 289 4 6 +268 303 3 6 +203 280 5 7 +256 261 6 4 +272 249 5 6 +253 270 3 5 +239 310 5 5 +324 256 3 5 +335 255 4 8 +311 255 3 6 +337 280 4 5 +312 279 3 4 +348 253 3 5 +370 264 3 4 +374 290 5 5 +357 290 2 4 +323 299 3 6 +320 279 4 6 +386 274 4 5 +398 272 4 6 +397 291 4 7 +390 290 5 5 +354 235 3 5 +336 244 2 3 +324 247 2 3 +349 277 3 4 +401 265 4 5 +399 255 3 4 +394 249 3 4 +425 238 3 4 +416 248 3 4 +408 248 3 5 +413 239 2 4 +399 240 3 4 +440 285 3 5 +430 294 3 4 +430 305 3 5 +444 309 3 5 +455 311 4 5 +462 311 3 5 +477 315 3 4 +489 316 5 6 +482 299 4 5 +473 299 4 4 +468 286 4 5 +479 288 4 4 +456 295 4 5 +501 319 4 6 +466 317 4 4 +533 313 3 6 +511 322 3 5 +531 323 3 5 +500 292 4 6 +491 290 4 6 +534 273 3 4 +519 272 3 4 +497 270 3 4 +484 271 2 3 +474 270 3 4 +447 267 3 4 +454 277 3 4 +505 261 3 5 +521 262 3 4 +530 264 3 4 +515 283 4 5 +493 279 3 5 +548 323 4 5 +558 327 4 5 +570 328 4 6 +584 330 3 6 +586 297 4 6 +575 297 4 5 +563 298 3 4 +549 295 4 6 +558 283 4 5 +546 308 4 4 +581 267 3 4 +594 264 4 5 +544 262 3 4 +587 285 2 3 +596 290 2 4 +606 289 2 3 +606 305 3 3 +600 312 4 5 +594 310 4 5 +705 268 2 4 +695 268 4 4 +703 277 3 4 +695 277 3 4 +680 277 4 5 +673 280 4 5 +666 283 4 5 +657 285 3 3 +643 282 3 5 +635 280 4 6 +602 271 3 4 +611 282 2 3 +648 294 3 6 +661 297 3 4 +666 298 4 4 +677 307 3 5 +709 314 3 5 +716 328 3 6 +706 327 4 6 +698 312 4 5 +690 327 5 6 +683 325 4 5 +657 306 5 4 +665 333 4 5 +646 341 4 5 +646 332 4 5 +628 341 4 6 +620 336 4 6 +615 327 4 5 +602 340 4 5 +601 334 3 4 +603 327 3 5 +593 324 6 5 +575 357 6 4 +657 360 5 6 +626 292 6 8 +639 297 7 8 +623 317 4 5 +639 318 3 5 +580 308 4 6 +468 258 3 4 +460 248 3 5 +464 240 2 4 +381 281 3 7 +416 217 3 4 +459 267 3 5 +569 306 3 4 +580 279 2 5 +526 252 3 4 +537 253 3 4 +548 254 3 3 +559 257 2 3 +560 246 3 5 +545 244 2 3 +497 229 2 3 +517 230 2 4 +515 248 4 6 +510 269 2 5 +500 250 3 5 +565 222 3 6 +561 232 3 4 +522 220 4 4 +507 211 3 4 +576 224 2 4 +588 225 2 5 +494 262 3 4 +474 248 3 4 +422 226 3 4 +765 237 2 4 +755 238 3 3 +729 243 4 5 +748 249 3 4 +732 259 3 4 +791 249 3 4 +829 253 4 4 +843 255 4 5 +835 268 4 6 +818 277 4 5 +822 266 3 5 +798 263 3 6 +785 263 4 5 +774 265 4 5 +782 273 3 5 +773 275 4 5 +756 272 3 5 +738 281 5 5 +750 285 3 5 +789 274 3 5 +807 274 5 5 +837 280 4 4 +845 295 4 5 +831 291 4 5 +795 286 6 7 +741 294 4 5 +730 301 4 6 +749 306 3 6 +774 298 4 5 +775 289 3 5 +792 299 3 4 +805 300 4 5 +816 296 5 7 +831 306 4 5 +814 315 6 9 +797 313 4 6 +801 327 3 6 +811 328 5 7 +844 318 4 5 +775 309 3 4 +763 305 5 8 +735 318 4 5 +743 319 4 6 +756 321 4 5 +751 335 4 5 +781 337 3 5 +785 321 3 8 +774 324 3 6 +795 340 3 4 +768 337 4 5 +731 307 4 5 +722 304 4 5 +710 301 5 5 +719 296 4 6 +720 319 3 4 +705 342 4 5 +713 341 5 7 +725 350 4 4 +751 351 4 5 +754 352 4 7 +766 361 5 7 +721 333 4 5 +638 260 3 4 +644 258 5 5 +608 257 2 4 +601 275 3 5 +628 261 4 4 +603 248 4 6 +655 216 4 7 +634 236 3 5 +671 246 3 5 +688 242 3 5 +696 240 3 5 +707 242 3 5 +357 278 5 7 +333 290 3 4 +310 261 4 5 +358 262 4 6 +364 235 4 6 +350 243 5 6 +502 310 4 5 +523 312 4 6 +531 302 3 4 +561 321 4 5 +570 319 4 5 +567 285 3 5 +558 307 3 5 +853 256 4 5 +792 219 3 3 +782 221 3 4 +819 228 5 4 +832 229 4 4 +846 212 3 3 +621 241 3 6 +620 260 3 5 +645 250 3 4 +650 228 3 4 +585 207 4 7 +665 210 4 5 +577 207 4 4 +596 207 3 4 +609 216 2 4 +588 198 2 4 +629 199 3 4 +609 190 3 4 +626 190 4 6 +607 197 4 6 +599 219 4 5 +765 285 4 6 +797 365 5 6 +799 354 4 6 +789 354 4 7 +812 337 5 7 +827 342 5 7 +786 364 4 6 +810 372 5 5 +831 367 7 8 +814 359 4 6 +957 238 5 7 +947 232 2 4 +934 224 4 5 +918 226 4 5 +908 245 3 6 +906 233 4 9 +930 244 6 6 +916 247 6 8 +945 249 4 6 +933 276 7 7 +919 273 5 8 +903 273 4 5 +924 286 6 7 +886 246 4 4 +881 257 3 5 +922 262 4 5 +934 263 4 5 +949 265 3 6 +959 258 7 6 +957 317 5 8 +941 327 5 6 +926 342 4 6 +918 355 5 5 +905 357 5 8 +891 353 6 8 +903 259 6 6 +896 259 3 5 +894 224 4 4 +864 236 3 4 +868 257 4 5 +847 269 4 5 +862 269 4 6 +877 271 4 5 +889 274 3 5 +904 298 4 6 +898 287 5 7 +871 283 4 5 +889 286 4 6 +860 283 4 6 +886 297 5 6 +890 309 5 7 +869 306 5 6 +859 321 4 6 +877 323 4 7 +889 325 3 4 +914 303 5 5 +854 339 4 4 +845 347 5 5 +860 304 5 6 +844 303 6 10 +908 380 6 8 +896 377 5 7 +863 338 5 6 +886 366 4 8 +876 364 4 6 +843 371 7 8 +841 364 6 11 +861 373 6 7 +856 362 6 6 +976 241 4 5 +965 252 4 5 +1000 230 7 7 +997 242 7 9 +1009 286 5 6 +967 307 3 5 +1013 296 6 8 +859 291 4 9 +830 318 4 6 +973 253 7 8 +1017 312 5 7 +1009 304 7 7 +1008 336 6 9 +1006 324 3 5 +988 338 5 6 +991 359 5 5 +1006 358 4 6 +972 338 5 6 +958 346 4 7 +987 368 4 6 +945 346 5 7 +937 351 2 4 +936 368 4 4 +996 339 6 7 +875 376 5 7 +936 378 7 8 +957 389 8 11 +1007 393 7 11 +988 243 3 5 +1014 218 5 7 +1011 208 5 6 +995 207 4 7 +985 220 3 5 +970 219 4 6 +956 204 4 5 +945 205 4 4 +969 197 3 5 +932 214 4 5 +971 370 5 5 +992 405 6 7 +847 235 3 4 +892 235 4 5 +875 241 4 4 +906 215 3 4 +897 215 4 5 +883 211 4 5 +868 212 4 4 +908 203 3 5 +895 201 4 7 +894 193 4 5 +882 221 4 5 +922 215 4 5 +904 191 4 5 +905 226 4 6 +966 205 5 5 +980 185 3 4 +967 185 3 3 +995 187 4 4 +955 185 4 5 +944 181 4 5 +966 175 4 5 +976 176 4 4 +998 178 2 3 +943 195 3 4 +906 166 4 5 +916 192 4 6 +894 170 4 6 +894 181 3 6 +918 174 3 4 +932 185 4 4 +928 173 2 5 +955 165 4 4 +940 162 4 5 +930 165 3 4 +915 154 4 6 +440 220 3 5 +429 216 4 6 +421 215 5 5 +437 198 4 6 +456 196 3 3 +883 201 3 7 +875 195 4 6 +868 198 4 6 +842 187 4 6 +854 212 4 7 +859 196 5 8 +829 190 5 7 +851 168 4 5 +846 167 4 6 +712 256 4 5 +697 255 3 4 +759 224 4 6 +735 222 5 5 +746 223 4 4 +736 235 3 3 +729 236 4 3 +717 227 5 8 +683 211 4 4 +676 208 6 6 +664 220 3 3 +687 230 3 5 +696 223 3 3 +702 213 3 4 +696 202 3 5 +688 201 2 4 +754 196 4 4 +717 214 3 7 +724 215 3 6 +736 215 3 5 +765 225 4 6 +760 207 3 4 +745 195 3 6 +705 194 3 6 +711 192 4 4 +830 221 4 5 +819 199 3 5 +810 198 3 5 +792 208 3 5 +794 188 3 4 +794 199 3 3 +488 205 2 4 +519 211 3 4 +508 231 2 4 +982 206 5 6 +988 167 4 6 +1000 142 4 5 +973 135 3 4 +988 155 4 7 +952 134 3 5 +938 136 4 4 +992 184 2 4 +1003 186 5 10 +1009 158 5 5 +1007 143 4 8 +# 35--Basketball/35_Basketball_Basketball_35_158.jpg +198 462 120 174 +955 565 60 84 +# 35--Basketball/35_Basketball_basketballgame_ball_35_254.jpg +627 312 92 78 +312 237 61 106 +12 143 80 92 +# 35--Basketball/35_Basketball_playingbasketball_35_219.jpg +468 968 61 69 +580 871 30 37 +684 927 23 34 +702 1053 66 83 +815 959 68 94 +924 871 68 88 +246 683 96 113 +829 877 22 26 +759 870 28 35 +# 35--Basketball/35_Basketball_basketballgame_ball_35_460.jpg +444 427 213 288 +# 35--Basketball/35_Basketball_basketballgame_ball_35_709.jpg +767 996 35 45 +749 971 22 26 +782 974 35 42 +865 978 21 26 +882 1012 30 38 +894 978 31 40 +937 983 27 31 +960 972 23 23 +994 978 29 35 +1003 1002 21 36 +993 930 22 28 +972 909 18 24 +930 908 23 25 +774 923 24 29 +851 918 20 32 +908 955 23 28 +904 925 25 30 +851 896 23 29 +868 863 18 24 +913 867 20 26 +972 861 24 28 +973 834 19 25 +998 816 19 25 +981 781 26 29 +976 759 20 24 +991 749 23 23 +983 720 23 23 +979 700 18 20 +1000 680 18 25 +942 652 19 25 +949 684 18 25 +897 675 20 24 +836 653 18 18 +804 668 16 20 +841 677 17 20 +790 649 20 18 +776 672 18 20 +771 692 21 20 +739 701 18 23 +758 725 17 20 +758 742 18 20 +779 715 19 25 +824 697 19 24 +824 721 25 28 +888 707 18 23 +880 737 20 23 +923 734 20 21 +925 766 19 21 +921 807 24 28 +923 786 20 19 +904 837 22 23 +860 847 18 19 +859 808 20 29 +877 810 21 30 +789 836 24 24 +795 808 23 25 +801 778 24 24 +832 773 18 25 +865 771 20 19 +874 787 18 22 +796 867 22 25 +893 641 21 25 +971 607 18 23 +972 579 15 21 +928 587 17 22 +880 583 14 17 +851 621 17 19 +791 620 19 19 +810 607 14 16 +807 558 15 14 +813 541 14 15 +673 567 24 22 +715 569 17 21 +748 493 19 21 +717 479 17 17 +695 462 16 18 +667 427 11 12 +684 434 13 15 +703 454 14 16 +729 468 11 15 +738 452 11 15 +728 430 10 11 +695 406 13 15 +766 476 12 14 +788 482 14 19 +803 482 11 12 +827 491 13 17 +793 518 12 13 +875 504 12 15 +857 470 12 14 +838 482 11 15 +868 492 12 15 +887 470 11 15 +909 489 10 18 +926 477 12 17 +956 508 15 16 +976 459 10 15 +988 492 12 17 +1003 476 11 17 +848 450 12 11 +829 444 9 12 +810 457 12 12 +790 440 12 14 +801 432 12 15 +773 461 12 13 +812 415 12 16 +866 443 11 16 +890 451 11 12 +933 443 13 19 +954 482 13 16 +310 388 15 15 +331 371 17 19 +364 363 14 13 +369 383 14 16 +390 402 11 13 +410 409 15 19 +354 420 14 14 +337 394 12 19 +350 386 13 16 +314 422 11 15 +322 439 10 12 +341 441 12 14 +376 434 11 16 +400 423 11 14 +401 449 14 17 +423 477 14 19 +346 490 19 21 +320 484 13 16 +304 555 17 20 +408 550 21 24 +520 401 76 69 +11 560 15 15 +22 586 17 16 +14 622 18 20 +49 635 18 24 +78 567 18 22 +98 587 18 24 +108 646 15 18 +129 623 18 19 +143 607 16 20 +161 592 16 19 +178 563 21 23 +240 552 18 21 +197 556 19 22 +217 603 19 22 +179 616 19 20 +243 627 20 29 +254 615 15 16 +291 622 16 23 +306 598 18 21 +259 579 19 25 +223 584 20 22 +16 461 17 21 +39 473 15 16 +63 466 15 18 +109 468 15 15 +109 441 16 18 +150 444 13 16 +190 436 13 16 +169 432 11 12 +233 455 15 17 +267 445 18 13 +262 467 15 14 +298 433 15 16 +276 482 15 17 +58 594 19 22 +51 619 20 22 +361 602 17 19 +390 580 17 22 +419 599 17 23 +393 629 16 22 +368 645 20 25 +328 644 21 23 +26 774 22 29 +50 804 26 27 +27 829 27 29 +17 858 22 26 +3 888 19 27 +3 933 24 29 +3 966 23 29 +5 1003 36 46 +34 988 29 38 +65 985 29 42 +53 962 27 25 +91 978 25 31 +120 1000 27 33 +137 959 28 36 +98 931 25 29 +83 856 24 31 +105 828 24 29 +135 822 30 33 +173 939 21 33 +180 983 37 43 +212 973 23 30 +165 989 29 40 +219 1009 40 42 +249 928 23 33 +268 958 27 25 +268 987 26 36 +288 997 37 38 +348 966 29 36 +79 667 16 20 +148 674 17 22 +193 663 17 22 +182 689 22 24 +152 706 23 29 +186 711 25 25 +182 735 25 25 +227 685 19 26 +261 664 19 18 +294 688 21 23 +266 705 20 23 +238 736 21 25 +216 761 25 23 +263 775 20 31 +273 758 23 23 +300 732 21 25 +320 705 21 26 +356 691 14 18 +360 722 18 23 +352 752 22 29 +320 778 25 30 +304 805 23 27 +352 831 23 29 +372 820 20 23 +386 787 26 27 +413 756 23 27 +399 690 16 19 +437 703 20 25 +392 865 27 33 +496 994 25 36 +506 937 23 30 +133 505 22 23 +632 595 89 74 +184 481 14 17 +64 433 14 18 +44 394 15 17 +43 426 13 16 +42 456 11 14 +75 409 11 13 +88 397 11 13 +108 409 15 17 +127 424 12 16 +134 400 12 15 +67 376 13 16 +71 360 12 16 +55 353 11 14 +48 321 13 15 +76 336 10 15 +86 353 11 15 +108 361 12 16 +106 338 13 15 +148 355 12 17 +164 376 13 17 +188 384 13 16 +206 376 15 15 +220 433 14 18 +230 414 15 19 +251 402 15 16 +290 378 13 15 +281 355 13 17 +233 384 10 13 +250 325 11 15 +171 358 14 16 +206 332 12 14 +222 344 12 18 +174 327 11 15 +246 359 12 17 +763 416 14 19 +757 390 12 14 +852 402 12 16 +947 432 12 14 +420 647 20 22 +374 667 18 24 +356 556 16 17 +0 810 15 25 +# 35--Basketball/35_Basketball_playingbasketball_35_732.jpg +645 256 36 48 +# 35--Basketball/35_Basketball_playingbasketball_35_13.jpg +443 160 98 122 +# 35--Basketball/35_Basketball_playingbasketball_35_433.jpg +529 120 117 201 +# 35--Basketball/35_Basketball_Basketball_35_393.jpg +752 170 72 68 +188 6 198 228 +# 35--Basketball/35_Basketball_basketballgame_ball_35_178.jpg +86 395 25 32 +195 470 21 25 +266 506 15 18 +303 484 17 26 +567 264 32 22 +455 383 29 27 +13 658 7 9 +24 676 7 8 +31 656 8 9 +47 667 7 8 +42 660 4 7 +30 640 5 6 +20 646 5 5 +39 646 4 6 +50 647 4 6 +48 655 5 5 +55 644 3 6 +60 646 4 5 +66 647 3 5 +72 645 4 5 +58 659 6 9 +68 662 6 9 +75 666 4 7 +86 668 4 8 +120 675 5 8 +126 676 4 7 +151 674 7 7 +146 681 6 6 +228 688 3 6 +241 690 4 7 +235 691 5 5 +294 686 3 4 +313 701 4 4 +290 699 3 6 +328 694 3 6 +341 695 3 5 +355 694 4 5 +370 694 3 4 +372 704 4 5 +382 695 4 5 +390 703 5 5 +397 697 3 4 +409 697 3 5 +420 693 3 5 +435 683 3 5 +434 695 3 4 +426 683 3 4 +478 691 3 6 +463 694 3 4 +406 690 4 6 +396 690 3 5 +483 685 3 4 +490 683 4 5 +501 681 4 6 +505 680 3 4 +491 678 3 4 +535 697 4 6 +542 698 4 4 +569 694 3 6 +583 694 4 5 +592 696 3 7 +605 696 3 5 +603 683 3 4 +551 687 4 5 +601 704 4 5 +618 693 4 6 +653 697 3 5 +662 696 3 4 +633 695 4 5 +621 670 3 5 +719 689 4 5 +743 691 5 6 +750 681 4 6 +759 703 5 6 +761 692 4 6 +787 689 4 5 +794 686 4 5 +798 685 5 5 +809 685 3 4 +811 681 3 6 +818 686 3 5 +825 682 3 5 +833 680 4 5 +832 687 5 6 +843 670 5 7 +759 666 2 4 +748 667 3 4 +768 664 3 4 +784 665 2 3 +762 677 5 8 +791 673 4 6 +861 656 3 5 +855 678 5 6 +867 680 5 5 +881 675 6 8 +863 676 4 5 +887 677 6 6 +886 671 4 5 +916 673 6 7 +905 675 5 7 +900 674 4 7 +897 675 4 6 +894 676 4 5 +932 668 6 8 +924 664 3 5 +917 658 4 5 +899 666 5 5 +928 657 3 5 +934 659 3 4 +945 668 6 6 +952 663 5 7 +961 667 6 7 +962 653 6 7 +970 663 7 8 +1015 666 6 5 +1013 677 7 7 +986 661 5 7 +976 666 6 7 +981 663 5 7 +977 655 4 6 +1009 646 5 6 +995 645 5 6 +988 652 3 6 +985 644 4 4 +975 646 3 5 +970 650 4 5 +983 635 3 4 +690 387 25 28 +839 190 27 36 +# 35--Basketball/35_Basketball_playingbasketball_35_366.jpg +410 324 40 45 +# 35--Basketball/35_Basketball_playingbasketball_35_65.jpg +681 234 11 11 +744 380 14 18 +850 416 16 16 +907 354 16 22 +918 376 24 32 +968 405 22 27 +1005 416 14 20 +800 425 14 16 +621 391 16 22 +452 349 22 23 +529 311 18 25 +227 428 23 36 +717 416 11 14 +# 35--Basketball/35_Basketball_basketballgame_ball_35_309.jpg +396 70 82 110 +214 6 84 58 +136 30 68 116 +766 42 180 258 +# 35--Basketball/35_Basketball_playingbasketball_35_636.jpg +340 48 232 308 +866 366 90 106 +# 35--Basketball/35_Basketball_basketballgame_ball_35_192.jpg +743 125 130 168 +555 608 132 161 +# 35--Basketball/35_Basketball_playingbasketball_35_511.jpg +550 294 23 18 +# 35--Basketball/35_Basketball_basketballgame_ball_35_216.jpg +708 406 9 9 +640 420 7 9 +481 207 19 19 +370 170 21 22 +1012 434 7 11 +963 467 4 7 +933 465 5 8 +294 394 10 12 +196 428 7 10 +124 457 6 10 +47 450 8 10 +158 432 5 8 +89 435 5 7 +105 420 4 8 +66 417 8 11 +46 409 5 6 +26 396 6 8 +95 410 5 8 +140 403 5 7 +4 373 4 7 +6 359 5 8 +94 375 5 7 +41 392 6 8 +12 390 6 6 +69 354 5 8 +118 431 6 8 +77 436 4 5 +72 464 5 8 +212 441 5 9 +232 421 4 7 +250 446 6 7 +242 441 5 6 +26 428 6 8 +6 408 4 6 +158 390 4 6 +177 420 4 8 +# 35--Basketball/35_Basketball_basketballgame_ball_35_998.jpg +46 200 23 50 +65 285 34 38 +121 288 37 48 +45 394 40 50 +336 95 57 79 +351 320 40 54 +320 409 32 47 +680 63 52 75 +817 317 41 56 +910 281 46 68 +1000 312 23 63 +839 218 27 40 +447 145 27 37 +570 152 29 35 +502 337 39 41 +826 100 33 37 +114 119 23 24 +0 316 24 35 +513 50 27 33 +588 53 27 40 +270 88 23 32 +177 121 28 27 +909 210 38 37 +866 303 31 40 +476 229 25 32 +392 183 33 36 +2 242 13 40 +# 35--Basketball/35_Basketball_basketballgame_ball_35_689.jpg +617 71 11 15 +657 71 8 12 +370 78 7 12 +323 99 11 15 +352 104 13 14 +388 113 9 10 +393 82 11 9 +423 89 9 11 +445 83 9 12 +491 86 11 15 +520 90 11 17 +551 91 12 15 +575 94 12 15 +624 103 11 14 +653 104 12 16 +581 133 9 13 +548 131 10 13 +517 131 8 13 +479 117 11 14 +452 115 9 16 +411 109 12 14 +343 140 11 12 +341 170 12 23 +382 141 8 14 +411 143 9 12 +437 146 9 12 +475 145 11 12 +557 163 10 12 +392 189 12 15 +421 175 10 14 +447 181 11 14 +421 202 11 14 +449 202 10 15 +469 223 11 12 +491 188 8 14 +514 190 11 15 +524 210 11 12 +550 194 12 15 +582 193 9 15 +590 216 14 17 +552 216 15 14 +632 201 9 15 +660 169 12 15 +653 201 12 17 +665 226 12 16 +595 15 8 14 +677 27 8 10 +698 19 13 18 +742 18 9 15 +707 57 9 16 +684 78 8 15 +699 174 11 15 +803 85 11 18 +767 112 11 16 +771 144 13 21 +808 151 10 18 +804 203 9 15 +766 209 12 14 +723 185 10 13 +206 255 15 21 +317 317 16 22 +549 305 13 19 +624 317 15 19 +672 368 16 22 +62 17 9 9 +84 20 8 9 +20 38 9 10 +50 41 9 11 +78 44 7 10 +34 66 11 13 +61 70 9 11 +20 95 9 10 +20 124 10 12 +56 100 8 12 +872 197 10 18 +881 174 10 14 +872 126 10 21 +831 96 8 14 +822 64 10 14 +894 78 8 13 +911 99 9 13 +937 70 10 16 +930 41 11 11 +928 11 9 11 +952 18 10 11 +969 38 10 16 +976 79 11 16 +985 107 11 19 +937 109 14 21 +944 139 11 17 +986 142 14 15 +966 226 13 13 +952 274 18 19 +99 45 9 12 +116 23 9 11 +129 55 8 10 +119 73 9 10 +146 91 9 10 +99 134 9 15 +96 153 11 14 +123 195 8 13 +138 138 9 15 +177 112 10 14 +154 50 9 11 +147 17 7 13 +188 32 8 9 +212 86 8 12 +204 119 8 11 +238 91 10 15 +220 119 9 13 +206 5 9 11 +247 62 9 10 +271 48 8 10 +270 65 8 12 +271 98 8 12 +294 67 10 15 +296 99 9 13 +284 131 9 14 +235 156 11 15 +185 155 7 12 +271 166 11 11 +315 132 9 13 +310 40 9 14 +298 11 10 15 +50 128 9 12 +29 145 11 14 +68 149 10 14 +74 129 10 14 +82 104 9 13 +230 4 8 12 +173 4 8 6 +352 17 8 12 +371 16 8 13 +395 25 8 12 +436 25 9 12 +461 25 10 12 +520 4 7 11 +552 11 9 13 +614 12 8 9 +653 19 8 13 +333 53 9 10 +334 70 8 9 +360 56 7 9 +391 51 11 12 +427 51 8 14 +463 55 8 12 +486 54 9 14 +500 36 8 13 +515 33 11 12 +542 48 9 11 +578 42 8 13 +649 44 10 12 +519 65 8 14 +542 69 10 12 +582 64 11 15 +241 125 11 15 +475 3 9 12 +484 1 10 12 +675 47 10 12 +602 233 12 12 +893 45 9 10 +770 186 10 12 +726 144 11 16 +615 132 12 14 +926 168 12 16 +960 190 11 13 +939 210 10 16 +690 240 13 14 +4 19 9 10 +5 72 11 11 +995 22 11 12 +# 35--Basketball/35_Basketball_playingbasketball_35_91.jpg +272 378 334 397 +# 35--Basketball/35_Basketball_playingbasketball_35_449.jpg +339 153 99 135 +# 35--Basketball/35_Basketball_basketballgame_ball_35_50.jpg +409 208 23 31 +276 170 24 30 +588 129 32 44 +777 336 40 39 +949 271 29 40 +213 280 38 51 +970 104 6 8 +943 105 6 8 +962 105 5 8 +922 97 6 8 +937 97 6 8 +891 96 5 8 +879 96 5 8 +997 106 8 10 +# 35--Basketball/35_Basketball_playingbasketball_35_135.jpg +498 269 31 36 +196 245 35 52 +# 35--Basketball/35_Basketball_playingbasketball_35_674.jpg +634 45 61 80 +370 154 66 72 +183 139 51 55 +152 68 36 42 +214 77 25 32 +269 83 28 40 +289 115 31 39 +357 94 26 30 +90 8 31 36 +175 15 27 37 +405 20 27 34 +351 8 24 30 +450 47 25 30 +478 103 25 23 +561 104 29 37 +505 29 23 34 +450 19 26 31 +553 32 28 32 +603 22 23 33 +859 259 28 36 +598 274 34 44 +646 259 36 47 +950 245 23 33 +999 246 23 32 +817 158 26 37 +803 127 31 40 +876 160 26 37 +901 73 24 34 +803 50 24 31 +789 89 23 28 +742 40 25 37 +981 31 43 78 +16 235 33 39 +0 89 27 47 +977 120 25 30 +926 81 26 32 +481 153 28 31 +844 135 20 31 +578 77 25 26 +269 3 20 32 +# 35--Basketball/35_Basketball_playingbasketball_35_876.jpg +461 110 82 118 +980 522 21 27 +956 506 20 25 +929 521 19 24 +923 491 16 23 +929 452 19 22 +964 449 15 22 +1010 506 14 27 +1008 477 16 28 +886 515 24 28 +856 487 20 25 +896 460 21 27 +865 449 18 25 +849 424 17 22 +882 404 20 23 +932 415 15 20 +954 399 15 24 +990 433 19 22 +1001 416 16 22 +996 380 14 21 +826 476 16 23 +837 460 15 21 +821 449 16 22 +800 506 25 29 +771 501 22 29 +730 525 25 30 +709 503 23 29 +656 502 24 31 +791 493 14 21 +772 470 19 24 +622 405 19 25 +206 528 22 28 +262 531 23 28 +340 490 21 21 +284 494 18 22 +209 488 20 25 +267 476 17 17 +158 483 18 24 +134 465 19 23 +102 477 18 22 +89 468 18 22 +955 316 17 20 +967 345 16 21 +943 342 15 19 +886 319 20 25 +907 266 16 20 +865 265 13 18 +843 314 17 19 +811 289 15 17 +816 259 16 18 +755 334 16 19 +779 321 13 18 +729 318 15 20 +701 280 14 20 +773 281 14 19 +750 270 12 15 +770 262 12 17 +770 249 14 15 +715 252 14 20 +664 256 13 20 +682 227 16 22 +724 229 15 18 +778 205 14 19 +769 182 13 16 +724 181 15 17 +677 182 16 18 +634 181 13 18 +585 176 14 20 +628 248 12 15 +946 222 14 20 +957 199 13 18 +982 220 12 17 +998 177 13 18 +966 182 15 16 +955 150 13 14 +1006 363 13 16 +150 155 12 16 +190 158 12 17 +231 180 13 15 +256 203 12 15 +235 227 15 18 +209 243 14 14 +259 266 11 14 +299 232 11 17 +285 175 14 20 +280 139 13 17 +328 151 15 17 +393 162 14 16 +431 156 14 14 +336 72 12 14 +288 81 13 15 +243 80 13 16 +192 67 13 16 +299 195 14 19 +65 115 74 96 +596 74 12 16 +546 66 12 16 +422 14 15 16 +296 571 24 33 +360 552 12 17 +# 35--Basketball/35_Basketball_Basketball_35_801.jpg +642 280 14 15 +586 347 15 16 +520 320 14 16 +919 254 15 18 +# 35--Basketball/35_Basketball_playingbasketball_35_2.jpg +820 517 84 99 +447 405 66 90 +# 35--Basketball/35_Basketball_playingbasketball_35_417.jpg +888 164 28 34 +926 117 20 27 +961 52 27 41 +744 86 30 34 +367 22 44 46 +526 36 29 31 +248 60 27 29 +309 55 28 39 +225 133 31 35 +184 51 23 28 +52 65 28 37 +159 65 27 36 +349 240 35 47 +278 262 35 43 +210 250 32 47 +443 35 28 39 +# 35--Basketball/35_Basketball_Basketball_35_579.jpg +903 96 51 64 +492 116 23 36 +733 299 47 54 +204 216 23 25 +80 96 28 29 +411 343 24 27 +478 572 22 28 +266 529 23 31 +77 599 25 29 +# 35--Basketball/35_Basketball_basketballgame_ball_35_429.jpg +422 210 252 335 +# 35--Basketball/35_Basketball_basketballgame_ball_35_478.jpg +325 470 12 12 +127 411 70 102 +310 281 7 10 +405 261 8 11 +332 261 7 9 +342 248 7 8 +399 393 8 10 +722 320 10 12 +802 337 11 14 +860 276 7 11 +# 35--Basketball/35_Basketball_Basketball_35_458.jpg +245 35 40 64 +492 25 43 72 +743 31 40 56 +# 35--Basketball/35_Basketball_playingbasketball_35_818.jpg +423 89 97 141 +# 35--Basketball/35_Basketball_Basketball_35_791.jpg +770 42 84 120 +# 35--Basketball/35_Basketball_basketballgame_ball_35_201.jpg +13 29 72 153 +83 131 105 137 +115 236 107 153 +638 359 126 177 +284 381 118 169 +507 212 102 142 +729 166 99 123 +383 145 99 142 +611 11 107 129 +936 209 86 137 +834 11 99 102 +21 359 107 201 +# 36--Football/36_Football_americanfootball_ball_36_81.jpg +170 72 60 59 +374 123 57 48 +910 7 57 52 +567 220 35 50 +700 91 31 37 +474 45 52 53 +342 38 38 40 +# 36--Football/36_Football_americanfootball_ball_36_234.jpg +335 91 101 148 +# 36--Football/36_Football_americanfootball_ball_36_162.jpg +402 272 196 284 +# 36--Football/36_Football_americanfootball_ball_36_257.jpg +71 222 75 95 +429 193 63 81 +292 179 49 65 +100 166 55 65 +629 132 54 72 +864 152 83 123 +503 205 35 53 +387 191 29 48 +780 209 44 61 +62 185 19 27 +# 36--Football/36_Football_americanfootball_ball_36_301.jpg +454 116 187 235 +# 36--Football/36_Football_americanfootball_ball_36_279.jpg +337 881 84 148 +# 36--Football/36_Football_americanfootball_ball_36_510.jpg +566 144 76 78 +191 56 48 73 +# 36--Football/36_Football_americanfootball_ball_36_16.jpg +540 88 132 152 +292 266 128 160 +# 36--Football/36_Football_americanfootball_ball_36_396.jpg +252 200 126 166 +# 36--Football/36_Football_Football_36_157.jpg +230 174 18 22 +95 240 23 28 +409 55 22 22 +240 152 16 18 +427 266 20 25 +50 131 17 25 +121 113 15 17 +144 98 17 22 +180 96 18 28 +248 117 17 23 +152 152 20 22 +157 177 20 25 +186 185 21 25 +78 148 16 22 +1 55 8 31 +566 133 16 17 +610 143 18 25 +584 162 17 20 +544 194 17 22 +362 116 15 22 +152 28 15 15 +102 174 18 17 +44 188 20 22 +820 18 13 19 +900 26 18 24 +409 125 19 19 +765 147 20 26 +812 107 18 19 +869 84 23 28 +905 106 17 23 +897 86 24 24 +983 246 22 27 +642 0 18 18 +669 35 18 20 +701 17 14 17 +583 192 19 22 +504 253 19 23 +1 188 15 20 +75 200 19 21 +90 138 18 21 +433 219 15 26 +484 207 17 25 +115 203 19 21 +357 177 16 20 +338 105 18 21 +91 108 18 21 +199 5 20 17 +273 44 20 22 +579 86 21 24 +615 74 20 24 +963 140 18 22 +906 215 24 31 +839 190 16 23 +384 367 32 45 +410 141 19 23 +441 165 17 20 +479 122 22 27 +425 96 20 25 +454 104 17 19 +382 129 18 22 +392 81 19 24 +96 10 18 20 +608 244 28 32 +504 347 34 39 +483 357 25 31 +11 170 16 23 +345 134 20 24 +845 45 21 27 +337 41 20 24 +144 269 19 27 +237 250 26 33 +905 170 20 25 +614 333 39 47 +570 332 33 47 +335 179 18 24 +337 164 16 21 +703 144 19 21 +15 119 18 23 +856 4 17 19 +970 23 20 24 +935 165 17 19 +975 159 17 25 +981 200 16 24 +667 173 19 24 +635 173 24 19 +274 185 19 21 +246 188 20 25 +229 174 18 22 +567 104 17 25 +459 33 26 30 +516 54 14 24 +493 8 16 21 +112 138 20 25 +615 42 20 22 +947 97 17 23 +535 76 18 27 +490 88 17 23 +348 5 20 22 +417 1 22 19 +96 243 23 28 +42 258 22 25 +851 168 17 20 +510 176 17 24 +522 113 20 23 +832 108 16 22 +285 136 18 24 +218 163 16 22 +787 174 22 23 +232 77 21 29 +877 174 20 25 +752 189 21 21 +739 217 26 32 +661 257 22 22 +631 196 24 24 +434 348 21 32 +163 204 21 25 +301 159 20 26 +783 112 18 25 +789 58 14 20 +319 82 23 24 +542 30 18 23 +581 8 17 24 +546 109 18 24 +392 197 37 42 +788 232 29 40 +516 141 18 27 +303 115 18 24 +732 55 18 20 +755 118 21 24 +37 17 25 24 +294 89 19 23 +890 124 17 22 +298 211 23 26 +302 254 24 29 +227 128 19 23 +271 166 18 21 +250 172 19 18 +253 154 16 19 +32 154 19 24 +0 146 16 21 +957 101 18 21 +589 139 19 24 +# 36--Football/36_Football_americanfootball_ball_36_526.jpg +624 160 108 144 +# 36--Football/36_Football_americanfootball_ball_36_132.jpg +112 208 50 47 +530 19 39 41 +793 79 37 37 +376 244 50 39 +780 225 49 45 +690 130 20 21 +# 36--Football/36_Football_americanfootball_ball_36_358.jpg +318 174 56 80 +400 330 56 78 +# 36--Football/36_Football_Football_36_62.jpg +313 114 40 56 +440 190 41 61 +773 140 48 60 +807 114 38 48 +# 36--Football/36_Football_americanfootball_ball_36_853.jpg +212 96 100 148 +# 36--Football/36_Football_americanfootball_ball_36_25.jpg +356 124 58 94 +574 86 66 98 +# 36--Football/36_Football_americanfootball_ball_36_615.jpg +200 104 25 47 +385 140 17 37 +454 155 15 25 +84 136 13 18 +532 186 18 23 +645 165 11 18 +741 138 14 22 +669 174 13 18 +862 153 13 18 +923 102 19 32 +522 152 11 24 +# 36--Football/36_Football_americanfootball_ball_36_487.jpg +920 212 8 12 +915 234 8 12 +939 226 10 11 +969 226 7 10 +534 197 10 14 +547 197 9 13 +961 177 10 11 +85 181 10 12 +843 212 8 12 +860 149 8 11 +850 134 10 12 +863 108 8 11 +743 174 9 11 +953 29 8 9 +341 88 9 12 +721 234 8 13 +358 273 10 11 +861 78 9 13 +840 46 7 11 +772 35 9 10 +811 38 7 10 +577 42 8 8 +590 34 8 9 +606 46 8 10 +628 9 7 10 +562 12 8 8 +599 88 9 7 +320 71 7 10 +320 185 11 13 +355 182 12 13 +13 146 10 13 +901 93 8 10 +925 98 8 9 +909 127 8 11 +950 120 10 11 +844 90 9 10 +758 180 7 11 +767 195 9 11 +790 203 8 11 +786 159 8 11 +792 174 7 11 +826 168 7 11 +741 280 14 14 +793 275 13 14 +644 283 14 14 +844 3 9 13 +931 14 7 9 +908 13 9 9 +884 16 8 8 +556 225 9 9 +282 144 11 14 +327 38 9 12 +400 135 9 10 +402 97 8 12 +227 78 9 10 +58 101 10 14 +413 234 10 13 +417 255 11 14 +129 142 10 10 +310 12 10 17 +294 159 9 14 +253 159 11 12 +230 151 11 13 +228 136 10 13 +261 119 9 11 +250 108 8 12 +225 94 6 11 +944 174 9 9 +943 144 9 13 +378 97 7 10 +354 79 7 9 +382 69 8 11 +331 15 8 10 +940 55 9 10 +505 42 7 11 +383 47 8 10 +584 220 12 15 +572 221 7 12 +623 185 10 12 +610 246 9 12 +572 252 8 12 +633 225 8 14 +422 109 9 14 +454 116 10 12 +452 130 10 12 +797 93 10 11 +496 129 9 11 +238 223 11 10 +10 38 10 10 +28 38 8 9 +31 23 10 12 +391 180 11 11 +417 182 10 10 +432 187 9 15 +277 162 10 13 +236 24 9 11 +189 17 7 12 +197 7 7 11 +308 92 8 14 +824 212 10 13 +798 222 9 10 +393 233 8 14 +439 29 8 11 +450 39 8 9 +454 33 6 9 +481 38 7 8 +455 61 8 9 +444 83 8 9 +463 88 8 9 +566 102 8 13 +391 91 9 9 +507 100 8 10 +466 2 9 8 +487 203 10 10 +460 185 7 10 +515 198 10 14 +421 72 9 11 +538 113 10 11 +769 88 9 11 +723 88 8 10 +763 77 7 8 +797 54 8 10 +777 61 8 10 +915 73 9 8 +947 89 8 9 +930 122 6 7 +918 169 9 10 +159 90 10 13 +767 112 10 13 +775 0 8 9 +107 14 9 9 +89 12 12 11 +897 197 10 12 +283 7 10 12 +884 45 10 11 +596 6 10 11 +532 269 14 14 +797 330 18 23 +811 331 18 31 +59 303 13 21 +48 256 11 18 +83 256 10 15 +88 225 13 14 +71 194 11 17 +101 200 10 15 +103 174 10 16 +58 155 10 17 +42 134 10 16 +33 102 9 16 +148 168 9 12 +143 248 9 12 +101 241 11 14 +169 236 11 19 +69 225 8 12 +24 169 10 16 +28 233 12 18 +44 120 8 14 +42 92 9 14 +545 293 13 17 +552 95 8 9 +296 183 10 15 +294 258 8 14 +322 255 10 16 +337 281 11 11 +297 291 14 17 +356 297 15 17 +969 14 8 10 +731 12 8 8 +64 89 10 16 +95 92 10 13 +113 86 10 18 +88 104 11 15 +628 74 8 11 +645 89 8 9 +702 78 8 11 +661 32 10 11 +708 14 7 11 +698 35 8 11 +736 73 9 11 +642 47 9 11 +870 44 8 12 +29 13 7 9 +54 13 8 11 +67 27 8 11 +80 18 9 14 +84 33 9 12 +62 40 10 12 +83 68 9 12 +115 52 9 13 +131 47 9 13 +60 54 10 13 +8 217 10 14 +6 259 9 13 +140 34 9 11 +185 35 7 11 +192 34 9 11 +176 48 10 12 +159 64 7 9 +142 66 10 11 +139 105 11 13 +351 49 8 8 +204 78 9 9 +184 65 8 10 +843 175 10 9 +874 177 11 13 +874 234 9 11 +899 228 9 12 +884 204 10 11 +274 106 10 11 +248 84 8 10 +258 76 7 7 +976 146 10 13 +978 127 9 11 +977 102 9 14 +775 244 10 13 +728 199 8 14 +689 184 9 10 +699 187 10 13 +656 180 12 15 +651 150 11 11 +615 149 10 11 +635 153 9 10 +598 142 9 8 +621 135 10 10 +634 134 9 11 +628 100 8 9 +651 117 9 11 +663 114 9 10 +698 119 9 13 +681 111 10 11 +137 284 18 17 +924 273 15 13 +461 290 17 17 +517 281 14 18 +79 138 9 12 +62 141 11 15 +102 158 10 12 +890 25 9 8 +917 59 7 10 +1002 31 8 10 +1005 102 8 10 +528 246 10 10 +378 254 10 12 +365 257 10 13 +369 239 10 13 +397 252 8 14 +988 229 8 11 +978 176 7 9 +1004 173 7 14 +1013 150 9 12 +670 81 8 9 +921 22 7 7 +211 121 9 12 +102 35 11 11 +126 35 10 12 +163 16 8 11 +286 80 11 15 +302 104 8 14 +13 69 10 15 +9 109 10 11 +17 91 10 13 +47 57 7 14 +467 134 8 13 +672 62 7 9 +678 98 7 10 +167 118 12 17 +325 108 10 12 +204 232 11 15 +196 174 7 13 +226 253 11 15 +234 235 11 13 +279 199 9 16 +507 72 9 12 +534 83 8 9 +496 42 6 10 +200 65 11 8 +633 206 11 13 +529 23 7 10 +523 67 7 9 +552 51 5 9 +522 41 7 11 +501 4 7 9 +552 34 6 10 +511 4 7 12 +540 16 8 9 +175 99 10 14 +658 208 10 12 +676 239 8 13 +731 245 9 11 +749 248 10 13 +760 249 6 11 +495 18 8 10 +705 97 8 11 +652 78 9 9 +689 76 8 11 +715 130 8 9 +728 115 6 9 +266 36 9 15 +224 3 8 11 +318 53 8 10 +304 55 6 9 +943 106 9 11 +993 104 8 11 +233 92 8 11 +372 102 7 12 +876 217 10 11 +922 183 9 14 +886 165 9 11 +880 127 7 9 +363 4 10 8 +394 15 7 9 +424 32 9 9 +266 160 9 13 +113 104 11 15 +40 165 12 18 +557 27 13 14 +737 123 10 13 +838 25 9 13 +786 34 11 12 +832 91 5 8 +882 62 5 7 +939 96 5 9 +833 185 7 9 +810 161 6 7 +913 116 8 8 +763 147 9 11 +754 264 8 10 +16 244 15 17 +119 255 9 14 +162 35 11 10 +0 216 5 12 +20 306 12 19 +1010 241 11 13 +585 273 14 19 +# 36--Football/36_Football_americanfootball_ball_36_693.jpg +493 300 50 72 +604 481 71 46 +465 575 68 63 +270 127 78 65 +552 164 71 73 +# 36--Football/36_Football_Football_36_138.jpg +133 57 24 35 +284 80 30 38 +747 131 23 31 +878 86 29 39 +839 261 22 22 +326 200 8 25 +406 214 21 34 +538 230 17 29 +520 203 13 16 +471 210 10 16 +560 202 11 18 +495 314 21 26 +644 218 22 27 +684 197 9 16 +980 75 11 16 +387 190 7 9 +863 78 11 13 +424 213 11 14 +634 303 22 22 +452 191 14 14 +922 93 10 14 +# 36--Football/36_Football_americanfootball_ball_36_114.jpg +725 105 69 82 +217 32 33 47 +602 20 33 44 +437 13 42 41 +43 27 38 42 +914 31 26 48 +# 36--Football/36_Football_americanfootball_ball_36_126.jpg +60 139 31 48 +340 164 31 46 +782 115 32 48 +523 300 33 46 +329 304 40 39 +675 94 35 43 +699 140 42 44 +641 224 42 42 +# 36--Football/36_Football_americanfootball_ball_36_265.jpg +368 91 222 280 +# 36--Football/36_Football_americanfootball_ball_36_456.jpg +524 78 148 184 +# 36--Football/36_Football_Football_36_80.jpg +191 232 28 31 +597 177 21 30 +746 297 34 37 +845 197 26 44 +# 36--Football/36_Football_Football_36_108.jpg +134 127 13 17 +273 119 14 21 +532 69 15 22 +# 36--Football/36_Football_americanfootball_ball_36_38.jpg +370 22 64 78 +518 52 60 74 +714 72 78 82 +# 36--Football/36_Football_americanfootball_ball_36_327.jpg +762 428 92 82 +# 36--Football/36_Football_Football_36_110.jpg +78 159 32 40 +183 156 32 40 +131 314 34 49 +222 309 35 51 +289 158 34 49 +398 151 34 44 +383 303 35 50 +507 144 31 43 +506 330 32 47 +603 144 32 38 +637 297 35 42 +715 123 31 41 +814 159 33 44 +779 273 38 45 +# 36--Football/36_Football_americanfootball_ball_36_373.jpg +634 70 84 84 +402 92 64 100 +# 36--Football/36_Football_americanfootball_ball_36_631.jpg +418 126 162 190 +# 36--Football/36_Football_Football_36_202.jpg +561 325 48 56 +358 239 48 45 +394 331 40 43 +480 195 41 56 +630 185 53 54 +713 194 38 43 +773 149 41 61 +# 36--Football/36_Football_americanfootball_ball_36_647.jpg +411 236 149 122 +# 36--Football/36_Football_americanfootball_ball_36_321.jpg +510 158 102 182 +# 36--Football/36_Football_americanfootball_ball_36_681.jpg +429 153 143 163 +# 36--Football/36_Football_Football_36_23.jpg +228 60 54 74 +378 96 62 70 +604 44 52 82 +738 52 52 78 +# 36--Football/36_Football_americanfootball_ball_36_273.jpg +322 226 74 74 +# 36--Football/36_Football_Football_36_194.jpg +69 191 11 16 +405 148 10 13 +456 190 12 17 +421 220 15 23 +453 255 18 34 +555 302 19 26 +826 240 18 21 +991 139 9 13 +# 36--Football/36_Football_americanfootball_ball_36_6.jpg +26 155 12 14 +127 103 33 43 +253 169 12 15 +300 174 14 18 +357 171 12 15 +354 270 16 17 +252 275 15 15 +407 157 16 20 +399 194 14 17 +377 217 13 19 +444 202 17 17 +449 178 14 11 +495 173 10 14 +493 197 12 12 +566 123 13 18 +613 164 13 16 +596 215 14 18 +575 275 12 13 +611 255 15 16 +648 234 11 15 +652 161 15 17 +537 209 27 36 +477 239 35 41 +938 154 14 17 +997 152 11 13 +970 259 13 15 +808 124 34 40 +861 156 30 30 +555 300 16 11 +1018 207 6 20 +0 228 11 19 +# 36--Football/36_Football_americanfootball_ball_36_27.jpg +188 194 56 90 +656 32 74 98 +# 36--Football/36_Football_americanfootball_ball_36_111.jpg +466 142 130 151 +# 36--Football/36_Football_americanfootball_ball_36_1021.jpg +206 306 108 118 +432 192 80 92 +# 37--Soccer/37_Soccer_soccer_ball_37_113.jpg +514 144 132 174 +706 78 75 93 +# 37--Soccer/37_Soccer_Soccer_37_263.jpg +687 144 35 48 +# 37--Soccer/37_Soccer_soccer_ball_37_150.jpg +350 42 64 94 +# 37--Soccer/37_Soccer_Soccer_37_394.jpg +584 124 20 27 +517 136 15 18 +902 92 25 32 +131 148 14 20 +0 164 17 23 +# 37--Soccer/37_Soccer_soccer_ball_37_994.jpg +242 68 80 110 +# 37--Soccer/37_Soccer_Soccer_37_3.jpg +316 144 52 80 +844 96 58 86 +# 37--Soccer/37_Soccer_soccer_ball_37_720.jpg +858 184 30 36 +711 258 21 30 +516 337 16 22 +581 348 15 20 +448 29 45 61 +121 104 44 58 +# 37--Soccer/37_Soccer_soccer_ball_37_114.jpg +125 149 27 34 +219 174 28 37 +162 258 28 36 +73 255 29 33 +240 259 29 37 +292 163 26 37 +365 159 28 40 +325 246 32 36 +412 242 30 38 +135 366 36 44 +264 362 33 42 +384 348 33 44 +506 344 31 45 +442 152 27 36 +520 152 27 34 +501 233 28 36 +610 153 28 39 +675 158 27 31 +740 157 28 38 +683 257 29 39 +602 272 30 39 +642 355 35 43 +831 161 30 36 +774 255 28 35 +892 250 30 44 +758 363 34 44 +# 37--Soccer/37_Soccer_Soccer_37_170.jpg +945 61 67 85 +743 74 44 54 +619 85 77 95 +733 0 16 17 +603 2 24 31 +467 223 73 78 +# 37--Soccer/37_Soccer_soccer_ball_37_643.jpg +326 86 62 78 +742 42 58 74 +# 37--Soccer/37_Soccer_soccer_ball_37_32.jpg +370 130 58 78 +# 37--Soccer/37_Soccer_Soccer_37_469.jpg +310 125 57 76 +649 54 57 78 +# 37--Soccer/37_Soccer_soccer_ball_37_583.jpg +280 61 6 8 +331 39 5 9 +324 30 5 9 +333 19 5 8 +266 30 5 8 +280 17 6 8 +271 39 6 8 +309 48 6 8 +295 47 5 7 +13 103 8 9 +100 49 5 8 +99 36 6 8 +83 46 6 8 +75 24 6 8 +110 28 5 6 +81 67 7 9 +46 75 6 9 +59 50 6 7 +44 50 6 7 +656 66 5 8 +663 52 6 9 +682 51 6 8 +630 52 5 8 +646 55 6 7 +267 96 44 51 +183 70 45 61 +139 99 7 9 +156 63 5 5 +155 87 6 9 +133 87 6 8 +121 98 7 9 +122 46 6 9 +138 48 7 9 +795 98 40 53 +796 252 44 55 +866 140 7 8 +961 102 6 8 +945 99 5 8 +905 98 5 7 +893 86 8 9 +561 102 43 51 +666 85 40 55 +623 92 6 8 +668 254 44 57 +525 270 44 56 +404 77 45 60 +414 260 44 60 +285 287 45 58 +355 88 6 9 +372 82 5 8 +390 82 6 7 +329 80 5 8 +350 79 6 8 +379 60 6 8 +313 89 5 7 +309 69 6 8 +95 102 7 8 +65 79 5 7 +24 81 6 6 +4 74 7 10 +38 42 4 6 +16 38 6 7 +0 37 3 7 +38 26 5 7 +37 18 4 6 +10 18 5 7 +57 26 5 7 +50 23 4 6 +70 15 7 9 +60 39 5 6 +103 3 4 6 +109 17 6 8 +94 22 5 6 +113 5 5 6 +130 8 5 7 +146 8 5 8 +137 24 6 8 +123 35 5 7 +112 58 6 6 +91 70 5 8 +68 92 7 8 +46 96 7 7 +36 91 5 8 +0 88 7 9 +159 38 5 8 +157 24 6 7 +172 4 5 8 +185 6 6 6 +178 28 5 6 +238 62 6 6 +164 50 7 9 +181 50 5 5 +180 44 4 4 +197 39 7 7 +246 71 5 8 +297 56 6 8 +290 41 6 7 +303 18 5 7 +276 7 6 7 +324 112 7 6 +454 132 7 8 +327 70 6 9 +322 59 6 7 +333 49 5 7 +341 59 4 6 +359 60 6 8 +365 71 6 7 +353 52 5 6 +354 39 6 8 +345 32 6 6 +320 19 4 6 +292 12 5 7 +252 0 5 6 +259 9 4 7 +334 8 6 7 +315 8 5 7 +353 9 5 7 +359 21 6 8 +369 27 5 8 +377 21 5 6 +367 19 7 8 +369 0 6 6 +310 42 5 6 +389 70 6 8 +397 62 6 8 +393 28 6 8 +459 95 6 6 +492 84 6 8 +411 47 5 6 +436 63 5 4 +439 32 7 6 +432 22 6 7 +412 20 5 6 +426 13 5 6 +442 8 5 8 +400 2 6 7 +455 18 6 8 +464 40 6 8 +481 36 6 8 +472 26 4 6 +482 7 6 7 +492 2 7 9 +474 1 5 5 +495 39 5 6 +493 24 4 6 +497 16 5 7 +483 15 5 5 +464 23 4 6 +503 33 5 7 +516 30 6 9 +524 40 5 6 +520 22 7 7 +512 18 5 6 +515 2 4 5 +522 12 5 7 +536 3 5 7 +517 66 5 7 +542 63 6 9 +544 41 5 8 +537 31 6 6 +542 16 5 6 +547 4 6 8 +518 99 6 7 +537 93 7 8 +547 86 6 9 +542 82 6 8 +534 88 5 6 +547 52 5 7 +556 63 5 6 +632 87 5 7 +613 113 6 7 +640 103 5 4 +616 68 6 6 +636 65 5 6 +598 59 6 7 +606 53 6 8 +590 52 4 7 +569 53 4 5 +559 42 5 7 +583 40 4 7 +600 41 6 8 +611 31 5 9 +550 25 5 7 +568 23 6 7 +577 15 6 7 +560 8 7 9 +572 3 5 8 +586 22 5 7 +595 13 5 6 +605 23 5 7 +611 6 4 6 +624 26 6 5 +640 24 5 6 +629 33 6 6 +637 44 5 6 +648 33 6 7 +655 40 5 7 +667 33 5 7 +660 25 7 9 +652 9 7 9 +628 0 5 6 +630 11 5 9 +618 43 5 6 +676 40 5 8 +685 34 5 7 +674 23 5 6 +668 14 6 7 +701 33 5 8 +694 29 4 5 +702 54 6 8 +711 64 6 10 +711 45 5 7 +726 108 6 7 +748 103 7 8 +736 94 7 8 +766 116 6 7 +718 107 6 7 +739 74 5 7 +733 66 5 7 +760 95 5 7 +767 57 5 6 +779 56 5 8 +785 58 6 6 +794 44 6 7 +778 41 5 6 +754 37 7 7 +745 26 4 5 +762 24 4 5 +799 22 4 6 +787 0 4 3 +769 0 5 3 +809 54 5 8 +818 67 6 7 +840 83 4 8 +834 94 5 8 +850 99 5 7 +856 85 6 8 +864 78 5 5 +871 66 6 7 +873 98 6 9 +860 52 5 7 +871 52 5 6 +831 37 4 6 +812 34 5 7 +841 21 4 5 +823 18 5 6 +856 23 5 5 +875 19 5 6 +875 39 6 7 +853 67 5 7 +863 62 5 5 +853 54 4 4 +916 6 5 8 +937 12 5 7 +950 5 6 11 +893 38 5 6 +910 38 5 6 +881 76 5 7 +909 68 5 7 +924 48 5 7 +917 77 4 7 +930 66 5 7 +936 80 6 6 +911 87 6 9 +895 115 5 5 +892 101 5 8 +953 78 6 8 +938 61 4 6 +952 64 7 7 +954 41 5 6 +968 42 4 7 +984 57 7 9 +1011 32 4 7 +996 49 5 7 +1006 60 5 8 +1014 50 5 9 +1000 69 5 7 +969 87 5 9 +983 90 6 7 +1005 100 5 8 +1009 82 6 7 +989 101 7 8 +980 99 6 8 +955 95 5 8 +927 102 5 8 +885 47 7 10 +807 17 5 7 +# 37--Soccer/37_Soccer_soccer_ball_37_345.jpg +141 315 18 27 +1001 190 15 21 +969 180 16 23 +818 167 15 20 +715 164 15 19 +384 93 11 14 +365 179 15 21 +427 182 15 21 +555 166 16 22 +490 171 16 21 +622 179 15 21 +# 37--Soccer/37_Soccer_soccer_ball_37_233.jpg +857 390 13 16 +622 200 47 57 +414 264 30 41 +# 37--Soccer/37_Soccer_soccer_ball_37_803.jpg +342 131 409 589 +# 37--Soccer/37_Soccer_soccer_ball_37_238.jpg +616 295 27 36 +757 287 27 34 +413 251 26 28 +65 142 23 28 +# 37--Soccer/37_Soccer_soccer_ball_37_867.jpg +687 108 46 67 +508 59 49 73 +518 219 50 72 +900 231 41 51 +385 206 33 50 +252 238 44 67 +85 287 29 46 +# 37--Soccer/37_Soccer_soccer_ball_37_926.jpg +509 139 7 9 +938 234 7 9 +508 295 7 9 +# 37--Soccer/37_Soccer_soccer_ball_37_841.jpg +417 340 92 139 +693 663 101 112 +# 37--Soccer/37_Soccer_soccer_ball_37_1001.jpg +344 190 242 239 +# 37--Soccer/37_Soccer_soccer_ball_37_506.jpg +692 254 29 39 +608 261 33 45 +405 241 29 41 +457 236 30 37 +# 37--Soccer/37_Soccer_soccer_ball_37_698.jpg +682 454 50 71 +889 449 49 64 +499 473 69 104 +296 445 126 157 +43 391 78 97 +# 37--Soccer/37_Soccer_Soccer_37_415.jpg +520 30 68 82 +192 417 49 85 +# 37--Soccer/37_Soccer_soccer_ball_37_483.jpg +98 152 40 57 +205 167 31 39 +495 88 46 60 +401 120 44 44 +684 146 36 45 +761 144 27 36 +853 232 13 17 +912 80 37 41 +# 37--Soccer/37_Soccer_soccer_ball_37_254.jpg +622 205 61 49 +728 51 53 79 +# 37--Soccer/37_Soccer_soccer_ball_37_1011.jpg +840 279 28 35 +776 320 38 52 +481 1 297 456 +324 0 100 134 +401 227 79 106 +430 0 44 63 +# 37--Soccer/37_Soccer_soccer_ball_37_832.jpg +408 46 62 85 +553 114 39 54 +# 37--Soccer/37_Soccer_soccer_ball_37_512.jpg +504 68 60 102 +# 37--Soccer/37_Soccer_Soccer_37_655.jpg +23 84 28 43 +358 16 34 55 +485 30 26 44 +615 126 19 32 +736 125 26 40 +572 186 10 13 +# 37--Soccer/37_Soccer_Soccer_37_74.jpg +276 44 62 78 +666 66 60 86 +# 37--Soccer/37_Soccer_soccer_ball_37_886.jpg +368 116 228 302 +# 37--Soccer/37_Soccer_soccer_ball_37_74.jpg +540 164 240 291 +# 37--Soccer/37_Soccer_Soccer_37_618.jpg +10 304 74 90 +242 322 74 94 +488 334 84 114 +822 360 72 116 +662 334 68 98 +460 290 66 86 +# 37--Soccer/37_Soccer_soccer_ball_37_479.jpg +482 46 84 74 +718 300 86 52 +786 672 116 66 +# 37--Soccer/37_Soccer_Soccer_37_52.jpg +381 447 63 87 +# 37--Soccer/37_Soccer_soccer_ball_37_171.jpg +840 231 18 24 +730 204 19 23 +# 37--Soccer/37_Soccer_soccer_ball_37_269.jpg +911 128 27 36 +897 231 28 35 +832 143 26 34 +831 240 30 38 +768 154 24 31 +740 234 30 37 +664 226 27 39 +688 144 25 34 +620 133 27 37 +595 246 30 41 +547 138 26 37 +509 240 32 38 +489 122 28 36 +411 267 30 36 +405 125 26 35 +335 132 26 37 +326 230 29 46 +100 133 29 38 +174 123 28 37 +164 251 34 41 +241 247 30 37 +266 136 31 39 +# 37--Soccer/37_Soccer_Soccer_37_114.jpg +189 63 43 61 +462 178 49 76 +# 37--Soccer/37_Soccer_Soccer_37_565.jpg +287 101 66 69 +# 37--Soccer/37_Soccer_soccer_ball_37_341.jpg +359 266 34 27 +378 179 13 13 +# 37--Soccer/37_Soccer_soccer_ball_37_281.jpg +0 0 0 0 +# 37--Soccer/37_Soccer_soccer_ball_37_685.jpg +578 62 66 96 +# 37--Soccer/37_Soccer_soccer_ball_37_28.jpg +369 141 36 65 +381 99 40 59 +616 40 43 54 +726 38 34 49 +# 37--Soccer/37_Soccer_soccer_ball_37_88.jpg +374 174 60 54 +516 168 66 64 +643 30 20 28 +302 9 20 29 +85 31 23 31 +193 21 18 22 +938 9 13 17 +# 37--Soccer/37_Soccer_soccer_ball_37_8.jpg +242 140 52 68 +662 80 58 70 +# 37--Soccer/37_Soccer_soccer_ball_37_60.jpg +512 231 48 78 +852 483 27 37 +228 447 34 44 +# 37--Soccer/37_Soccer_soccer_ball_37_818.jpg +562 102 62 76 +646 8 74 98 +60 78 124 156 +# 37--Soccer/37_Soccer_soccer_ball_37_692.jpg +460 266 9 11 +578 264 10 13 +571 258 8 10 +618 261 10 14 +658 262 9 13 +80 272 10 13 +74 256 8 10 +459 210 6 7 +# 37--Soccer/37_Soccer_Soccer_37_50.jpg +530 143 56 65 +659 188 55 78 +# 37--Soccer/37_Soccer_Soccer_37_651.jpg +6 217 25 39 +101 224 42 42 +304 218 25 40 +498 307 25 41 +904 294 28 41 +# 37--Soccer/37_Soccer_soccer_ball_37_851.jpg +124 235 57 83 +227 106 52 67 +378 277 58 76 +273 238 57 74 +494 96 45 57 +365 72 47 66 +530 273 53 78 +644 141 47 62 +724 203 51 70 +774 128 45 57 +634 246 52 73 +843 199 56 72 +826 95 42 54 +866 120 46 62 +# 37--Soccer/37_Soccer_Soccer_37_393.jpg +163 162 67 86 +328 200 93 113 +806 100 89 122 +578 35 53 65 +# 37--Soccer/37_Soccer_soccer_ball_37_815.jpg +298 184 130 190 +686 228 152 194 +# 37--Soccer/37_Soccer_soccer_ball_37_907.jpg +911 172 46 56 +887 275 36 45 +776 319 36 53 +674 87 36 51 +540 81 41 59 +917 64 35 48 +1001 77 23 44 +424 111 38 47 +333 116 35 39 +258 70 32 51 +306 281 36 49 +192 272 35 52 +148 307 33 40 +124 175 39 53 +13 295 36 46 +22 181 38 60 +168 50 34 48 +43 60 36 50 +725 0 23 21 +# 38--Tennis/38_Tennis_Tennis_38_507.jpg +298 275 33 38 +560 258 47 62 +751 182 24 30 +727 245 25 27 +# 38--Tennis/38_Tennis_Tennis_38_558.jpg +480 182 171 197 +# 38--Tennis/38_Tennis_Tennis_38_40.jpg +478 76 62 74 +# 38--Tennis/38_Tennis_Tennis_38_142.jpg +481 134 31 38 +# 38--Tennis/38_Tennis_Tennis_38_81.jpg +703 167 172 188 +# 38--Tennis/38_Tennis_Tennis_38_323.jpg +294 85 112 152 +# 38--Tennis/38_Tennis_Tennis_38_497.jpg +370 147 62 88 +# 38--Tennis/38_Tennis_Tennis_38_592.jpg +267 147 42 59 +# 38--Tennis/38_Tennis_Tennis_38_717.jpg +196 366 678 791 +# 38--Tennis/38_Tennis_Tennis_38_300.jpg +272 84 152 204 +# 38--Tennis/38_Tennis_Tennis_38_131.jpg +220 98 190 264 +# 38--Tennis/38_Tennis_Tennis_38_683.jpg +797 154 54 69 +182 156 49 68 +337 150 58 80 +429 151 51 67 +467 221 57 75 +590 154 51 78 +647 179 61 77 +397 429 7 9 +# 38--Tennis/38_Tennis_Tennis_38_332.jpg +290 38 106 72 +# 38--Tennis/38_Tennis_Tennis_38_23.jpg +684 247 28 32 +# 38--Tennis/38_Tennis_Tennis_38_531.jpg +730 472 66 88 +# 38--Tennis/38_Tennis_Tennis_38_604.jpg +672 70 114 114 +# 38--Tennis/38_Tennis_Tennis_38_535.jpg +145 305 21 29 +171 276 22 27 +241 283 19 29 +193 340 20 25 +232 318 21 29 +292 292 19 22 +359 312 19 28 +397 322 19 24 +467 264 21 28 +418 316 20 31 +479 320 21 23 +509 316 23 27 +139 415 21 27 +134 456 21 28 +206 432 22 28 +247 420 20 28 +323 382 20 28 +289 460 20 29 +330 449 22 27 +370 405 22 28 +377 450 22 31 +436 414 20 25 +462 422 23 31 +514 421 23 31 +431 447 25 30 +504 470 24 32 +364 514 23 30 +579 301 21 35 +642 314 18 26 +713 319 20 26 +777 306 22 29 +863 326 19 25 +550 467 21 27 +596 422 19 31 +646 422 22 28 +656 460 19 30 +708 416 23 32 +761 427 22 28 +818 474 21 26 +893 487 22 29 +46 449 21 29 +209 301 17 26 +601 296 20 25 +# 38--Tennis/38_Tennis_Tennis_38_94.jpg +468 64 80 102 +# 38--Tennis/38_Tennis_Tennis_38_580.jpg +287 234 25 41 +321 238 22 37 +347 241 23 41 +407 243 28 40 +453 236 26 39 +497 244 23 40 +524 248 24 41 +560 227 28 40 +600 249 27 40 +625 242 24 40 +674 246 26 39 +32 369 18 38 +651 235 18 33 +# 38--Tennis/38_Tennis_Tennis_38_485.jpg +380 110 164 168 +# 38--Tennis/38_Tennis_Tennis_38_319.jpg +590 94 222 312 +# 38--Tennis/38_Tennis_Tennis_38_371.jpg +528 212 22 31 +566 197 25 26 +609 220 28 30 +651 199 26 28 +695 191 25 30 +731 190 22 27 +786 194 24 37 +817 192 19 27 +880 189 23 31 +764 283 28 41 +666 275 28 39 +567 287 31 37 +128 201 26 33 +172 192 27 33 +231 198 26 33 +277 201 27 34 +324 205 22 29 +378 209 25 35 +412 204 23 32 +444 213 25 34 +492 201 23 31 +343 296 29 36 +438 300 29 37 +549 369 33 46 +475 366 32 42 +# 38--Tennis/38_Tennis_Tennis_38_232.jpg +805 333 14 17 +733 253 9 12 +1014 257 9 13 +164 223 8 13 +726 69 6 10 +686 66 7 8 +648 71 7 8 +610 64 8 9 +634 38 6 10 +636 18 8 9 +673 21 7 10 +703 39 7 9 +732 38 7 10 +760 69 7 9 +769 43 7 10 +709 19 7 12 +779 19 6 10 +855 67 6 9 +893 63 7 10 +1008 61 8 10 +1015 40 7 11 +980 41 7 9 +944 42 7 9 +899 37 8 11 +870 36 7 10 +875 11 7 10 +911 18 9 11 +948 14 8 10 +984 11 8 10 +# 38--Tennis/38_Tennis_Tennis_38_420.jpg +638 50 52 74 +# 38--Tennis/38_Tennis_Tennis_38_182.jpg +252 100 168 234 +# 38--Tennis/38_Tennis_Tennis_38_758.jpg +44 230 268 374 +584 162 258 380 +# 38--Tennis/38_Tennis_Tennis_38_501.jpg +146 588 86 102 +216 412 58 82 +564 90 118 150 +894 460 68 100 +# 38--Tennis/38_Tennis_Tennis_38_230.jpg +304 182 158 164 +# 38--Tennis/38_Tennis_Tennis_38_240.jpg +714 156 170 242 +# 38--Tennis/38_Tennis_Tennis_38_666.jpg +382 190 104 104 +# 38--Tennis/38_Tennis_Tennis_38_18.jpg +290 94 60 70 +# 38--Tennis/38_Tennis_Tennis_38_128.jpg +532 108 134 152 +# 38--Tennis/38_Tennis_Tennis_38_692.jpg +775 494 20 28 +482 229 48 68 +227 344 48 58 +745 508 11 13 +641 119 292 352 +# 38--Tennis/38_Tennis_Tennis_38_452.jpg +46 114 37 51 +185 127 39 52 +282 79 37 51 +393 63 33 51 +510 55 34 52 +614 60 38 49 +722 79 36 50 +807 99 38 51 +913 112 33 52 +# 38--Tennis/38_Tennis_Tennis_38_754.jpg +236 106 168 218 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_901.jpg +661 413 29 34 +622 410 37 34 +301 139 35 54 +459 341 13 22 +570 250 14 24 +662 332 11 15 +706 331 10 13 +801 266 13 18 +963 311 10 16 +691 327 13 15 +918 304 25 22 +906 565 21 18 +878 573 9 10 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_855.jpg +548 327 31 57 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_200.jpg +522 180 78 96 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_417.jpg +257 246 51 75 +321 245 61 80 +247 190 10 14 +372 165 15 17 +479 165 14 21 +503 283 66 87 +614 201 74 105 +864 177 14 19 +979 165 9 10 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_121.jpg +196 36 128 168 +670 58 100 130 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_176.jpg +425 346 49 108 +694 359 64 120 +322 193 14 17 +687 176 11 18 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_103.jpg +719 239 26 38 +415 187 30 42 +443 169 22 27 +336 168 17 20 +181 183 16 23 +227 213 17 20 +533 179 13 23 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_825.jpg +498 156 62 100 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_583.jpg +784 148 27 33 +696 202 10 13 +304 88 20 29 +352 183 20 25 +445 134 32 46 +460 224 45 33 +395 97 16 25 +1026 474 -2 23 +384 97 11 15 +521 114 27 40 +499 137 21 22 +325 86 18 25 +303 173 17 22 +274 74 13 13 +329 72 13 13 +652 198 10 11 +661 210 10 9 +676 206 9 13 +668 201 8 10 +599 177 9 12 +627 173 9 11 +626 137 5 7 +619 164 5 6 +639 212 7 9 +418 108 19 25 +359 94 16 17 +612 188 10 11 +653 178 10 11 +634 220 20 17 +550 127 9 12 +593 112 12 13 +665 182 6 11 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_276.jpg +509 229 22 27 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_388.jpg +33 299 68 95 +198 308 77 92 +240 340 104 121 +376 314 74 98 +447 305 115 151 +627 364 83 89 +778 349 71 107 +888 334 65 92 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_541.jpg +466 287 118 109 +631 188 21 18 +593 142 25 24 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_487.jpg +506 253 54 66 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_416.jpg +402 272 66 88 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_992.jpg +910 21 41 61 +746 2 55 56 +838 132 48 59 +422 20 50 69 +283 45 47 55 +117 48 52 61 +221 169 42 56 +384 144 49 53 +612 272 51 55 +805 251 46 60 +939 247 47 63 +459 259 55 59 +315 268 46 58 +168 281 55 55 +385 384 44 55 +341 486 46 59 +280 429 41 56 +22 381 58 54 +998 128 26 58 +993 459 31 61 +955 580 56 64 +824 479 51 59 +728 368 49 59 +666 482 57 64 +779 608 56 62 +533 723 76 91 +729 731 52 76 +899 707 52 67 +298 848 58 72 +159 812 50 62 +284 684 70 76 +12 635 54 65 +214 623 47 64 +567 376 50 57 +88 154 47 56 +323 371 47 50 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_438.jpg +336 246 96 144 +745 363 117 144 +622 318 69 78 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_349.jpg +644 314 28 44 +814 287 54 60 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_793.jpg +786 371 17 28 +511 554 296 341 +844 985 26 36 +146 642 101 115 +571 1076 40 36 +945 831 19 18 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_1026.jpg +278 130 80 104 +452 226 68 92 +654 74 70 78 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_252.jpg +186 190 41 41 +307 179 18 18 +323 175 23 28 +376 182 22 28 +417 167 20 29 +474 183 26 30 +514 177 25 28 +627 197 18 20 +719 181 31 38 +822 164 23 30 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_794.jpg +566 940 16 20 +700 951 20 19 +739 1014 18 19 +369 945 16 19 +329 957 14 19 +343 1015 23 28 +931 984 37 47 +204 1159 35 44 +57 1185 35 42 +51 1044 26 35 +50 984 25 26 +633 974 12 19 +45 926 13 15 +4 940 11 25 +5 982 9 27 +992 934 18 18 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_1047.jpg +245 113 57 69 +748 144 55 75 +458 163 21 25 +531 131 19 26 +602 131 19 22 +590 195 21 29 +528 175 21 27 +499 198 20 31 +726 81 18 28 +813 73 25 34 +832 145 22 33 +877 175 27 31 +940 73 20 27 +882 40 19 26 +882 11 17 23 +807 28 18 22 +672 1 17 23 +738 3 21 28 +944 6 16 23 +7 17 14 18 +59 25 19 23 +123 23 18 21 +180 21 20 24 +243 20 19 25 +307 13 20 28 +1011 80 13 26 +966 192 27 31 +935 146 24 30 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_529.jpg +714 256 51 56 +398 122 119 89 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_1029.jpg +109 248 11 13 +139 249 9 11 +207 253 10 14 +236 272 11 13 +268 257 11 13 +297 255 11 13 +351 255 11 14 +326 260 10 12 +392 256 10 14 +422 253 10 13 +417 271 10 13 +463 254 10 13 +490 245 10 13 +524 242 10 13 +611 235 9 12 +653 232 11 15 +695 233 10 16 +741 223 11 16 +753 223 16 21 +819 243 10 11 +927 213 11 16 +182 248 8 12 +559 247 11 8 +668 237 6 7 +441 271 7 9 +473 251 5 7 +710 225 17 23 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_495.jpg +454 72 60 66 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_382.jpg +660 46 58 78 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_661.jpg +18 294 8 14 +76 234 8 12 +96 325 11 15 +23 342 9 12 +195 259 11 14 +222 267 11 13 +251 253 8 9 +161 300 10 12 +414 426 19 22 +443 515 22 9 +808 361 16 19 +443 313 7 12 +475 303 6 12 +508 261 8 6 +224 196 4 7 +294 197 5 7 +423 194 5 7 +395 200 5 6 +372 204 6 8 +350 203 5 6 +343 189 4 6 +350 186 3 5 +357 188 4 6 +390 187 4 6 +372 185 5 7 +435 197 4 5 +467 185 5 7 +792 221 7 9 +794 208 5 8 +590 228 4 6 +1 373 14 17 +454 206 5 5 +865 198 7 7 +855 210 8 9 +571 213 7 7 +513 212 5 8 +616 169 6 8 +686 180 6 6 +550 179 5 7 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_362.jpg +228 485 75 102 +731 554 72 120 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_616.jpg +351 219 168 168 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_163.jpg +263 134 48 70 +533 460 57 72 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_504.jpg +409 158 32 46 +620 231 28 33 +627 133 7 10 +102 179 11 17 +174 213 34 46 +351 151 6 8 +282 132 5 8 +691 134 6 11 +941 155 14 19 +985 170 12 11 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_348.jpg +903 631 12 15 +966 634 13 15 +609 291 20 37 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_272.jpg +142 114 118 156 +352 166 100 148 +572 284 82 92 +720 308 96 112 +902 278 120 128 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_156.jpg +566 44 65 99 +318 47 39 48 +715 16 45 50 +456 90 51 95 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_908.jpg +394 186 60 50 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_342.jpg +286 143 39 36 +876 648 17 19 +808 699 15 18 +762 711 12 17 +740 695 12 17 +673 670 16 18 +551 699 14 18 +309 712 15 18 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_668.jpg +609 403 28 34 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_1000.jpg +370 69 38 39 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_777.jpg +690 234 56 80 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_275.jpg +584 100 54 88 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_611.jpg +338 56 78 106 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_87.jpg +456 284 88 116 +703 49 45 73 +894 42 43 61 +435 36 44 42 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_119.jpg +372 142 56 62 +556 180 52 54 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_765.jpg +466 442 40 49 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_696.jpg +610 249 24 32 +364 215 23 26 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_389.jpg +826 364 8 11 +804 368 6 8 +771 365 10 11 +741 365 7 10 +672 364 8 11 +640 365 7 10 +609 362 7 9 +572 361 7 9 +545 364 7 8 +511 360 8 10 +464 370 7 11 +406 376 9 10 +367 385 9 9 +323 381 8 10 +275 387 7 10 +234 380 9 12 +51 367 9 15 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_354.jpg +354 444 6 7 +402 441 6 7 +466 479 8 7 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_682.jpg +275 154 114 114 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_495.jpg +270 212 104 157 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_203.jpg +520 104 52 84 +720 52 58 88 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_595.jpg +576 226 68 92 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_943.jpg +761 387 71 83 +573 815 84 111 +416 186 77 104 +151 844 83 103 +110 291 34 83 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_486.jpg +453 301 168 197 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_819.jpg +284 264 76 102 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_463.jpg +421 704 6 8 +410 687 6 9 +405 705 5 6 +513 702 8 11 +560 704 5 9 +585 710 7 9 +613 704 6 8 +700 710 7 10 +650 743 10 9 +639 691 7 8 +753 674 5 9 +728 675 4 8 +714 670 5 8 +682 679 5 7 +716 702 5 11 +919 691 5 5 +901 736 10 15 +838 741 10 13 +819 769 14 13 +911 759 12 17 +964 773 10 15 +802 750 9 12 +958 730 8 10 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_568.jpg +105 127 23 25 +180 164 10 16 +281 115 5 8 +251 116 5 8 +361 115 30 45 +702 120 14 20 +910 121 13 17 +922 316 26 49 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_458.jpg +710 262 107 115 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_869.jpg +417 321 36 26 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_941.jpg +741 500 76 99 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_778.jpg +687 304 21 33 +561 340 17 17 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_310.jpg +112 221 57 58 +183 156 40 47 +426 226 46 51 +714 364 47 49 +490 256 47 71 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_270.jpg +560 539 114 117 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_283.jpg +476 204 58 90 +626 38 68 90 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_169.jpg +334 828 32 48 +200 849 9 14 +268 850 9 12 +404 878 7 7 +813 852 5 10 +820 856 6 8 +746 844 6 9 +386 860 6 6 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_359.jpg +570 345 15 21 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_44.jpg +406 109 87 125 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_249.jpg +507 87 44 50 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_591.jpg +827 224 11 14 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_81.jpg +24 137 56 52 +495 148 26 33 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_751.jpg +416 296 62 64 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_817.jpg +402 366 68 93 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_546.jpg +38 280 8 10 +83 285 7 12 +216 248 64 72 +296 295 9 14 +387 285 54 70 +466 299 8 9 +499 297 6 11 +534 282 14 21 +574 288 13 25 +645 316 19 28 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_440.jpg +444 210 57 81 +492 706 69 111 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_344.jpg +368 141 90 116 +563 64 107 120 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_77.jpg +598 124 54 60 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_658.jpg +451 409 6 12 +45 429 7 9 +85 417 7 10 +112 418 10 10 +54 400 5 7 +4 389 6 9 +187 414 8 12 +169 415 7 10 +194 444 9 10 +207 409 6 7 +277 408 7 8 +784 400 8 8 +504 410 6 7 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_351.jpg +760 100 88 106 +256 68 64 112 +194 100 72 124 +# 39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_875.jpg +766 304 78 76 +704 192 56 66 +472 282 58 70 +# 39--Ice_Skating/39_Ice_Skating_iceskiing_39_138.jpg +20 110 39 46 +90 330 45 37 +283 211 78 71 +388 336 35 40 +553 214 95 77 +787 372 55 77 +257 244 14 19 +2 316 26 38 +49 272 28 48 +29 271 17 20 +# 4--Dancing/4_Dancing_Dancing_4_1000.jpg +316 240 400 608 +# 4--Dancing/4_Dancing_Dancing_4_1043.jpg +81 450 35 30 +653 141 48 35 +676 228 1 3 +# 4--Dancing/4_Dancing_Dancing_4_84.jpg +134 175 46 56 +51 69 21 29 +150 79 17 23 +222 73 18 24 +273 136 19 22 +341 157 41 49 +479 139 35 42 +452 139 17 25 +534 195 35 54 +701 160 42 53 +851 108 40 48 +606 85 16 22 +733 69 22 25 +792 62 18 25 +85 148 18 23 +764 130 22 25 +676 0 16 15 +716 0 14 11 +940 207 19 30 +# 4--Dancing/4_Dancing_Dancing_4_327.jpg +432 279 57 117 +# 4--Dancing/4_Dancing_Dancing_4_1036.jpg +502 88 92 178 +636 74 110 198 +# 4--Dancing/4_Dancing_Dancing_4_41.jpg +547 102 44 76 +601 111 48 62 +21 402 21 27 +96 403 22 26 +150 398 19 24 +196 391 19 24 +256 385 15 22 +39 383 17 22 +131 378 14 21 +57 364 16 20 +103 353 17 21 +176 371 17 20 +228 370 16 20 +307 320 19 24 +# 4--Dancing/4_Dancing_Dancing_4_384.jpg +468 130 128 186 +# 4--Dancing/4_Dancing_Dancing_4_422.jpg +310 386 90 120 +390 388 78 114 +488 72 96 136 +588 84 88 118 +# 4--Dancing/4_Dancing_Dancing_4_289.jpg +494 104 68 82 +# 4--Dancing/4_Dancing_Dancing_4_156.jpg +450 116 68 73 +529 51 65 93 +904 20 61 63 +150 9 43 50 +669 7 42 56 +215 5 40 48 +306 0 21 22 +# 4--Dancing/4_Dancing_Dancing_4_715.jpg +411 109 41 61 +658 123 43 60 +488 469 38 46 +417 445 30 34 +295 467 34 36 +146 454 33 37 +74 457 39 43 +626 452 34 42 +# 4--Dancing/4_Dancing_Dancing_4_194.jpg +789 220 40 62 +584 473 54 45 +# 4--Dancing/4_Dancing_Dancing_4_718.jpg +462 162 96 141 +# 4--Dancing/4_Dancing_Dancing_4_319.jpg +83 58 53 65 +0 13 29 38 +213 160 38 41 +342 45 33 39 +442 137 29 38 +410 74 29 38 +486 59 33 40 +746 517 92 46 +552 35 34 41 +596 0 34 39 +622 136 46 59 +731 56 34 42 +964 84 38 41 +211 24 22 29 +193 55 29 36 +150 63 32 39 +198 92 26 33 +257 81 26 35 +275 47 22 30 +301 40 34 40 +321 9 24 30 +328 246 36 40 +446 55 35 34 +429 46 27 41 +656 51 25 37 +688 62 29 36 +709 41 32 44 +956 26 29 41 +# 4--Dancing/4_Dancing_Dancing_4_253.jpg +350 57 56 97 +483 344 40 58 +657 120 59 83 +# 4--Dancing/4_Dancing_Dancing_4_489.jpg +174 236 51 68 +278 239 44 63 +129 116 39 55 +188 71 41 64 +296 63 39 53 +254 132 41 56 +403 124 41 58 +347 81 32 49 +422 56 39 56 +481 99 41 61 +531 57 39 57 +395 238 44 57 +541 225 40 58 +602 209 48 64 +707 281 42 59 +778 237 47 62 +617 73 39 55 +668 99 45 61 +686 25 45 56 +760 39 30 42 +749 101 42 56 +826 101 46 61 +806 18 35 47 +886 99 37 47 +908 41 38 51 +871 21 20 22 +938 11 19 24 +981 19 17 19 +900 1 18 24 +914 132 41 54 +62 33 17 22 +22 15 17 25 +108 24 17 20 +110 50 13 18 +164 24 16 19 +151 13 14 17 +83 19 15 19 +51 22 14 17 +851 3 15 18 +# 4--Dancing/4_Dancing_Dancing_4_224.jpg +309 59 88 133 +541 52 83 140 +# 4--Dancing/4_Dancing_Dancing_4_162.jpg +218 170 246 278 +# 4--Dancing/4_Dancing_Dancing_4_514.jpg +384 108 64 112 +514 24 86 124 +# 4--Dancing/4_Dancing_Dancing_4_57.jpg +559 137 32 53 +592 199 37 69 +733 157 24 34 +846 160 28 44 +817 162 18 31 +781 190 18 29 +947 199 19 26 +893 163 20 29 +772 168 24 32 +1005 182 19 33 +969 129 20 28 +# 4--Dancing/4_Dancing_Dancing_4_915.jpg +465 135 214 276 +# 4--Dancing/4_Dancing_Dancing_4_240.jpg +684 119 56 79 +839 95 56 82 +109 152 59 81 +255 187 54 79 +400 219 52 65 +530 163 59 79 +# 4--Dancing/4_Dancing_Dancing_4_228.jpg +444 115 41 55 +273 156 39 52 +607 100 48 59 +843 37 54 67 +# 4--Dancing/4_Dancing_Dancing_4_517.jpg +390 110 270 394 +512 36 272 364 +# 4--Dancing/4_Dancing_Dancing_4_922.jpg +335 180 324 483 +# 4--Dancing/4_Dancing_Dancing_4_21.jpg +724 182 74 122 +# 4--Dancing/4_Dancing_Dancing_4_97.jpg +245 306 19 30 +298 280 22 34 +380 326 18 28 +342 326 16 22 +535 320 17 27 +475 348 17 21 +# 4--Dancing/4_Dancing_Dancing_4_769.jpg +399 123 48 63 +461 237 41 52 +121 66 20 29 +909 150 22 36 +799 177 24 32 +# 4--Dancing/4_Dancing_Dancing_4_189.jpg +422 22 70 122 +590 16 72 98 +# 4--Dancing/4_Dancing_Dancing_4_885.jpg +384 132 174 315 +# 4--Dancing/4_Dancing_Dancing_4_960.jpg +398 94 216 268 +# 4--Dancing/4_Dancing_Dancing_4_375.jpg +224 158 66 98 +548 266 108 104 +# 4--Dancing/4_Dancing_Dancing_4_813.jpg +326 140 94 148 +482 46 124 170 +# 4--Dancing/4_Dancing_Dancing_4_1026.jpg +102 94 114 166 +300 146 88 148 +472 146 94 130 +600 86 98 138 +792 104 118 160 +890 134 74 128 +# 4--Dancing/4_Dancing_Dancing_4_854.jpg +202 210 98 140 +364 252 82 116 +548 308 90 110 +750 202 102 156 +# 4--Dancing/4_Dancing_Dancing_4_983.jpg +306 150 256 296 +860 40 150 270 +# 4--Dancing/4_Dancing_Dancing_4_878.jpg +466 173 102 160 +# 4--Dancing/4_Dancing_Dancing_4_53.jpg +294 204 70 78 +100 208 82 116 +404 242 66 82 +576 280 60 92 +# 4--Dancing/4_Dancing_Dancing_4_124.jpg +483 719 71 107 +574 684 81 142 +# 4--Dancing/4_Dancing_Dancing_4_494.jpg +345 159 234 291 +# 4--Dancing/4_Dancing_Dancing_4_378.jpg +150 71 88 116 +315 115 53 102 +675 48 81 100 +779 89 69 92 +620 399 20 40 +529 532 28 35 +# 4--Dancing/4_Dancing_Dancing_4_1029.jpg +336 42 80 138 +560 61 86 151 +243 84 55 67 +# 4--Dancing/4_Dancing_Dancing_4_1028.jpg +322 137 238 358 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_460.jpg +495 163 92 143 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_521.jpg +244 104 62 87 +317 425 64 76 +490 313 68 93 +641 90 61 67 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_57.jpg +530 161 167 261 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_1022.jpg +526 316 88 118 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_161.jpg +584 194 70 126 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_980.jpg +585 96 173 359 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_361.jpg +514 466 92 124 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_771.jpg +394 192 110 142 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_668.jpg +870 210 92 100 +652 200 78 82 +504 186 76 112 +378 176 108 126 +286 192 106 146 +192 62 114 128 +118 254 150 156 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_869.jpg +802 472 72 115 +594 539 67 92 +500 275 67 95 +302 267 65 90 +175 472 77 87 +345 834 75 92 +614 817 75 102 +417 547 75 97 +709 290 65 100 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_740.jpg +404 769 216 329 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_273.jpg +147 69 29 37 +234 66 25 35 +318 73 28 33 +406 79 29 35 +488 65 27 36 +581 76 29 35 +673 76 27 33 +100 113 33 44 +199 131 33 37 +299 132 34 40 +390 124 33 39 +489 128 33 37 +592 130 32 36 +694 130 33 37 +755 73 27 32 +793 131 33 35 +895 132 33 37 +182 234 35 40 +374 240 37 37 +488 264 35 42 +549 279 35 40 +670 253 37 43 +825 233 39 46 +823 61 30 37 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_1035.jpg +454 96 90 134 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_950.jpg +108 85 69 71 +339 48 65 76 +486 79 55 68 +643 81 61 74 +862 52 67 84 +957 24 33 38 +1009 30 15 33 +978 9 21 29 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_642.jpg +85 282 46 55 +208 274 40 53 +265 344 38 44 +337 319 42 48 +297 296 16 24 +415 277 48 53 +528 297 45 47 +625 292 50 56 +785 299 47 58 +866 252 56 60 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_197.jpg +490 471 125 178 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_698.jpg +526 471 39 51 +627 272 49 28 +580 275 45 45 +252 46 41 49 +358 77 35 39 +295 121 55 40 +583 46 36 36 +779 92 45 37 +864 43 33 34 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_255.jpg +562 376 68 36 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_646.jpg +958 69 18 20 +999 79 12 13 +1002 43 9 15 +177 84 12 14 +120 302 48 45 +261 260 45 51 +379 278 49 58 +393 155 39 48 +549 198 45 57 +503 312 47 56 +592 308 49 52 +713 168 42 52 +838 165 49 55 +770 320 54 59 +915 306 60 61 +935 85 11 12 +276 66 7 9 +369 71 7 10 +443 54 4 7 +488 47 7 7 +894 84 11 9 +184 72 8 6 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_596.jpg +564 158 154 156 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_171.jpg +494 320 81 123 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_593.jpg +841 262 41 41 +940 302 35 47 +95 166 16 21 +167 140 17 23 +113 120 13 19 +98 43 18 20 +31 47 17 26 +22 109 17 38 +231 255 20 25 +238 189 15 22 +946 256 19 22 +867 194 17 22 +931 178 14 18 +557 136 21 24 +910 30 17 17 +946 119 18 18 +710 101 20 22 +0 274 18 27 +17 217 15 22 +141 236 19 23 +37 252 39 42 +69 297 37 40 +170 266 42 49 +304 189 40 56 +422 212 34 45 +543 189 41 46 +629 210 39 47 +752 203 38 44 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_845.jpg +390 114 74 104 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_627.jpg +649 1066 201 207 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_894.jpg +416 168 125 189 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_488.jpg +452 147 135 186 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_260.jpg +59 537 31 34 +198 532 27 38 +359 282 37 59 +451 222 40 45 +549 606 9 12 +604 657 5 7 +635 667 7 7 +680 629 7 11 +647 630 8 12 +1001 595 23 37 +907 561 25 31 +767 541 25 29 +800 550 20 22 +856 557 13 14 +942 631 18 24 +951 586 12 12 +119 642 11 12 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_285.jpg +782 420 96 56 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_47.jpg +586 66 72 102 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_492.jpg +360 153 38 48 +474 156 34 45 +557 133 39 45 +654 125 38 51 +40 5 16 23 +171 17 15 24 +213 22 19 28 +270 19 16 29 +309 74 19 32 +313 53 13 19 +311 11 21 29 +339 0 16 22 +358 1 16 21 +381 1 20 23 +412 0 21 19 +452 0 20 20 +333 94 12 22 +362 80 19 25 +380 78 16 24 +401 69 18 27 +447 113 16 22 +469 81 18 25 +487 66 19 26 +508 59 23 27 +548 50 25 28 +586 84 17 22 +601 79 13 27 +427 83 16 16 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_48.jpg +29 421 13 21 +813 175 28 19 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_762.jpg +580 441 123 183 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_887.jpg +95 151 46 53 +189 89 40 47 +314 75 45 55 +431 83 46 53 +550 68 47 52 +686 49 46 49 +746 101 48 57 +839 65 47 46 +874 106 47 62 +626 134 43 51 +482 139 50 55 +356 150 49 52 +214 138 45 54 +104 281 48 58 +250 280 48 58 +383 292 50 58 +522 278 51 58 +660 280 58 54 +809 273 49 56 +137 400 52 66 +330 406 52 60 +522 389 59 60 +760 380 54 60 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_776.jpg +620 599 96 120 +322 861 87 130 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_175.jpg +196 76 84 66 +556 204 74 78 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_659.jpg +286 196 31 38 +233 316 35 41 +355 318 37 46 +412 189 31 40 +484 221 35 40 +565 166 34 41 +675 186 34 44 +454 353 41 46 +615 332 41 49 +755 297 38 47 +413 481 45 56 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_920.jpg +122 156 66 80 +256 170 66 80 +394 168 58 88 +484 172 62 80 +564 154 82 78 +744 128 78 92 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_749.jpg +834 282 134 78 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_138.jpg +530 90 90 100 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_401.jpg +406 418 146 203 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_945.jpg +840 177 46 58 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_242.jpg +486 460 90 52 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_274.jpg +128 126 43 52 +237 121 45 52 +370 130 46 47 +497 111 41 48 +631 126 41 46 +768 142 43 55 +874 166 44 52 +57 339 4 5 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_24.jpg +680 294 82 56 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_364.jpg +547 1228 141 105 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_389.jpg +488 509 156 213 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_783.jpg +456 356 72 108 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_609.jpg +490 366 104 154 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_612.jpg +310 210 96 92 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_911.jpg +488 368 80 88 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_891.jpg +70 308 32 35 +121 301 31 36 +185 312 30 35 +260 282 30 35 +323 296 28 35 +380 288 29 36 +450 306 29 32 +523 306 29 33 +581 323 25 31 +669 283 30 34 +760 334 31 33 +696 337 28 34 +678 398 30 33 +762 414 31 34 +607 394 29 37 +546 381 30 35 +477 388 30 31 +922 363 33 36 +874 354 32 37 +842 348 28 32 +824 336 23 33 +387 486 28 31 +458 480 27 33 +521 483 30 36 +607 488 31 35 +678 492 31 35 +735 503 34 35 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_422.jpg +396 328 64 44 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_108.jpg +484 915 94 70 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_727.jpg +322 126 15 14 +81 481 25 30 +218 547 37 30 +694 565 37 30 +869 440 21 24 +666 175 18 17 +594 77 14 12 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_156.jpg +354 153 72 87 +459 420 69 84 +465 658 66 90 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_1043.jpg +344 430 16 19 +380 431 16 22 +551 483 19 22 +554 421 17 19 +784 412 18 18 +882 430 18 20 +956 413 20 22 +839 464 18 12 +714 540 16 22 +527 317 18 10 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_115.jpg +224 180 56 100 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_484.jpg +498 270 70 104 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_331.jpg +853 294 15 19 +915 295 18 22 +930 254 17 20 +923 416 14 17 +955 409 15 23 +977 413 14 20 +1003 412 17 21 +716 284 16 17 +745 175 19 23 +442 305 20 27 +370 317 19 20 +27 264 17 22 +697 365 20 26 +726 362 18 20 +501 92 57 61 +786 413 15 18 +774 369 19 21 +855 407 16 17 +742 302 16 22 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_420.jpg +363 219 81 87 +390 276 60 90 +411 333 63 90 +598 402 81 120 +760 462 90 123 +474 396 78 105 +321 207 63 102 +168 174 57 87 +246 195 60 87 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_1044.jpg +262 98 240 376 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_580.jpg +162 276 158 90 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_566.jpg +464 156 58 84 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_638.jpg +453 325 30 37 +872 330 6 8 +971 322 11 12 +# 40--Gymnastics/40_Gymnastics_Gymnastics_40_805.jpg +477 755 176 241 +# 41--Swimming/41_Swimming_Swimming_41_580.jpg +458 122 160 220 +# 41--Swimming/41_Swimming_Swimmer_41_35.jpg +686 286 274 190 +# 41--Swimming/41_Swimming_Swimmer_41_308.jpg +432 352 84 82 +# 41--Swimming/41_Swimming_Swimmer_41_369.jpg +262 190 132 202 +# 41--Swimming/41_Swimming_Swimmer_41_935.jpg +440 10 380 498 +# 41--Swimming/41_Swimming_Swimmer_41_449.jpg +648 282 52 56 +# 41--Swimming/41_Swimming_Swimmer_41_170.jpg +362 238 350 484 +# 41--Swimming/41_Swimming_Swimming_41_379.jpg +538 332 164 162 +# 41--Swimming/41_Swimming_Swimming_41_641.jpg +472 16 228 262 +# 41--Swimming/41_Swimming_Swimmer_41_831.jpg +458 132 177 228 +# 41--Swimming/41_Swimming_Swimmer_41_883.jpg +570 302 60 82 +# 41--Swimming/41_Swimming_Swimmer_41_538.jpg +328 126 338 382 +# 41--Swimming/41_Swimming_Swimming_41_283.jpg +728 208 92 148 +# 41--Swimming/41_Swimming_Swimming_41_271.jpg +620 280 350 350 +# 41--Swimming/41_Swimming_Swimmer_41_288.jpg +168 230 242 152 +# 41--Swimming/41_Swimming_Swimmer_41_68.jpg +292 178 98 56 +# 41--Swimming/41_Swimming_Swimmer_41_701.jpg +156 307 714 870 +# 41--Swimming/41_Swimming_Swimmer_41_976.jpg +448 296 133 176 +# 41--Swimming/41_Swimming_Swimmer_41_610.jpg +462 102 168 228 +# 41--Swimming/41_Swimming_Swimming_41_74.jpg +416 170 120 172 +# 41--Swimming/41_Swimming_Swimmer_41_772.jpg +341 100 414 666 +315 1120 31 50 +561 1102 33 58 +12 638 49 43 +71 681 48 41 +# 41--Swimming/41_Swimming_Swimmer_41_56.jpg +102 160 92 58 +# 41--Swimming/41_Swimming_Swimmer_41_773.jpg +120 78 408 540 +# 41--Swimming/41_Swimming_Swimming_41_161.jpg +416 184 288 398 +# 41--Swimming/41_Swimming_Swimming_41_521.jpg +336 258 116 96 +# 41--Swimming/41_Swimming_Swimming_41_535.jpg +552 344 174 132 +# 41--Swimming/41_Swimming_Swimming_41_73.jpg +132 220 128 134 +# 41--Swimming/41_Swimming_Swimming_41_412.jpg +472 333 24 26 +70 238 27 31 +925 342 22 17 +# 41--Swimming/41_Swimming_Swimming_41_243.jpg +63 81 182 140 +743 251 55 57 +905 247 57 71 +252 644 23 27 +256 605 23 27 +111 728 65 69 +421 752 33 46 +468 751 37 52 +509 779 38 61 +550 811 34 47 +423 900 29 63 +625 872 41 57 +737 896 47 63 +84 670 15 29 +# 41--Swimming/41_Swimming_Swimmer_41_843.jpg +183 364 15 16 +258 372 13 17 +418 387 15 20 +774 64 87 120 +# 41--Swimming/41_Swimming_Swimmer_41_792.jpg +348 483 228 252 +# 41--Swimming/41_Swimming_Swimmer_41_358.jpg +412 208 270 344 +# 41--Swimming/41_Swimming_Swimmer_41_148.jpg +180 346 210 150 +# 41--Swimming/41_Swimming_Swimming_41_52.jpg +194 252 244 172 +# 41--Swimming/41_Swimming_Swimmer_41_483.jpg +400 92 222 328 +# 41--Swimming/41_Swimming_Swimmer_41_262.jpg +520 205 475 571 +# 41--Swimming/41_Swimming_Swimming_41_26.jpg +286 394 86 70 +# 41--Swimming/41_Swimming_Swimming_41_106.jpg +550 312 126 140 +# 41--Swimming/41_Swimming_Swimmer_41_718.jpg +87 324 708 831 +# 41--Swimming/41_Swimming_Swimmer_41_704.jpg +90 348 126 184 +184 344 98 176 +366 248 98 158 +470 194 76 132 +540 228 56 100 +542 90 80 130 +652 90 74 126 +732 94 74 120 +764 42 78 112 +# 41--Swimming/41_Swimming_Swimming_41_275.jpg +442 346 146 186 +# 41--Swimming/41_Swimming_Swimmer_41_232.jpg +532 290 172 266 +# 41--Swimming/41_Swimming_Swimmer_41_401.jpg +432 133 160 231 +858 920 130 222 +# 41--Swimming/41_Swimming_Swimmer_41_564.jpg +192 252 94 104 +# 41--Swimming/41_Swimming_Swimmer_41_19.jpg +446 278 156 196 +# 41--Swimming/41_Swimming_Swimmer_41_26.jpg +554 164 260 244 +# 41--Swimming/41_Swimming_Swimmer_41_399.jpg +382 228 216 256 +# 41--Swimming/41_Swimming_Swimmer_41_488.jpg +460 234 114 180 +# 41--Swimming/41_Swimming_Swimming_41_730.jpg +352 380 118 52 +520 342 116 62 +640 410 100 56 +# 41--Swimming/41_Swimming_Swimmer_41_1001.jpg +204 370 20 29 +# 41--Swimming/41_Swimming_Swimmer_41_1028.jpg +354 162 248 270 +# 41--Swimming/41_Swimming_Swimming_41_238.jpg +196 488 80 116 +272 92 62 74 +872 72 74 110 +708 534 52 52 +# 41--Swimming/41_Swimming_Swimmer_41_688.jpg +528 104 228 268 +898 370 122 158 +# 41--Swimming/41_Swimming_Swimming_41_472.jpg +118 126 434 542 +# 41--Swimming/41_Swimming_Swimming_41_822.jpg +581 457 15 17 +643 435 14 20 +727 448 11 15 +730 525 15 19 +531 387 6 16 +873 562 20 26 +193 738 32 30 +299 698 30 43 +882 745 22 23 +# 41--Swimming/41_Swimming_Swimmer_41_943.jpg +225 405 511 671 +# 41--Swimming/41_Swimming_Swimmer_41_376.jpg +452 108 208 260 +# 41--Swimming/41_Swimming_Swimmer_41_471.jpg +467 117 126 157 +# 41--Swimming/41_Swimming_Swimmer_41_607.jpg +226 105 5 14 +395 83 122 185 +673 68 7 8 +832 55 8 9 +517 208 15 16 +# 41--Swimming/41_Swimming_Swimming_41_172.jpg +190 19 23 10 +274 18 27 29 +266 63 40 25 +505 131 32 48 +# 41--Swimming/41_Swimming_Swimmer_41_275.jpg +230 154 564 566 +# 41--Swimming/41_Swimming_Swimmer_41_113.jpg +78 498 139 160 +# 41--Swimming/41_Swimming_Swimmer_41_885.jpg +376 66 318 338 +# 41--Swimming/41_Swimming_Swimmer_41_931.jpg +386 172 170 216 +# 41--Swimming/41_Swimming_Swimmer_41_55.jpg +440 180 168 224 +# 41--Swimming/41_Swimming_Swimming_41_380.jpg +258 150 342 312 +# 41--Swimming/41_Swimming_Swimmer_41_927.jpg +516 140 138 174 +# 41--Swimming/41_Swimming_Swimming_41_699.jpg +65 367 67 80 +128 305 63 61 +233 323 60 75 +431 232 53 66 +387 306 55 67 +552 205 65 85 +746 312 56 66 +784 262 57 54 +922 269 55 70 +# 41--Swimming/41_Swimming_Swimmer_41_507.jpg +300 271 339 474 +# 41--Swimming/41_Swimming_Swimmer_41_755.jpg +879 201 16 18 +863 238 16 16 +871 263 17 20 +939 402 20 21 +872 296 15 19 +908 339 18 22 +# 41--Swimming/41_Swimming_Swimmer_41_440.jpg +414 122 190 282 +# 41--Swimming/41_Swimming_Swimmer_41_659.jpg +512 69 162 246 +# 41--Swimming/41_Swimming_Swimmer_41_711.jpg +301 208 217 292 +# 41--Swimming/41_Swimming_Swimming_41_714.jpg +492 246 112 154 +# 41--Swimming/41_Swimming_Swimmer_41_1002.jpg +435 138 219 333 +# 41--Swimming/41_Swimming_Swimming_41_466.jpg +460 176 90 128 +# 41--Swimming/41_Swimming_Swimmer_41_380.jpg +564 107 12 12 +604 109 11 14 +584 173 17 20 +590 208 15 16 +465 226 19 19 +396 286 17 23 +# 41--Swimming/41_Swimming_Swimmer_41_293.jpg +564 180 70 130 +# 41--Swimming/41_Swimming_Swimming_41_128.jpg +462 206 84 112 +# 41--Swimming/41_Swimming_Swimming_41_240.jpg +422 416 180 224 +416 156 114 156 +# 41--Swimming/41_Swimming_Swimmer_41_43.jpg +792 236 96 200 +# 42--Car_Racing/42_Car_Racing_Nascar_42_922.jpg +248 196 56 72 +350 126 84 94 +532 68 98 110 +# 42--Car_Racing/42_Car_Racing_Car_Racing_42_939.jpg +749 185 43 47 +# 42--Car_Racing/42_Car_Racing_Car_Racing_42_600.jpg +81 104 16 22 +191 144 9 12 +250 114 16 15 +301 151 9 10 +333 129 15 18 +358 147 11 14 +604 166 23 40 +# 42--Car_Racing/42_Car_Racing_Car_Racing_42_906.jpg +706 71 24 35 +754 79 29 38 +841 109 29 37 +193 100 32 40 +237 83 26 31 +293 99 31 38 +331 114 28 35 +392 148 26 26 +416 92 31 36 +498 75 29 41 +566 73 28 31 +587 104 27 39 +651 75 27 36 +# 42--Car_Racing/42_Car_Racing_Nascar_42_661.jpg +821 254 29 43 +# 42--Car_Racing/42_Car_Racing_Nascar_42_482.jpg +393 183 303 405 +# 42--Car_Racing/42_Car_Racing_Car_Racing_42_1045.jpg +392 248 154 224 +646 278 138 182 +# 42--Car_Racing/42_Car_Racing_Car_Racing_42_602.jpg +959 231 13 18 +823 173 8 9 +813 181 9 9 +85 176 7 10 +0 258 10 29 +136 178 5 6 +774 183 7 10 +302 173 3 8 +# 42--Car_Racing/42_Car_Racing_Nascar_42_440.jpg +0 171 12 21 +66 160 17 20 +156 171 12 19 +166 170 13 20 +599 175 23 25 +657 139 23 24 +696 163 23 28 +730 149 18 33 +804 158 14 20 +866 169 13 17 +# 42--Car_Racing/42_Car_Racing_Nascar_42_442.jpg +882 793 21 25 +789 816 18 27 +723 816 21 24 +637 810 21 26 +# 42--Car_Racing/42_Car_Racing_Nascar_42_462.jpg +717 335 30 35 +970 303 35 45 +895 392 14 17 +# 42--Car_Racing/42_Car_Racing_Nascar_42_911.jpg +510 168 278 322 +# 42--Car_Racing/42_Car_Racing_Nascar_42_828.jpg +175 118 17 27 +297 129 10 17 +329 67 53 71 +459 82 53 64 +534 78 51 61 +757 123 20 26 +# 42--Car_Racing/42_Car_Racing_Car_Racing_42_263.jpg +30 132 67 89 +400 183 42 58 +606 177 28 48 +801 126 42 53 +958 194 43 51 +# 42--Car_Racing/42_Car_Racing_Nascar_42_468.jpg +590 4 190 236 +# 42--Car_Racing/42_Car_Racing_Nascar_42_900.jpg +368 6 422 524 +# 42--Car_Racing/42_Car_Racing_Car_Racing_42_743.jpg +606 87 19 27 +753 100 5 9 +704 113 5 11 +125 121 14 17 +97 117 12 13 +# 42--Car_Racing/42_Car_Racing_Nascar_42_650.jpg +908 174 9 21 +982 192 6 15 +689 195 18 23 +654 219 14 11 +854 207 9 11 +628 219 17 18 +782 143 13 28 +268 164 27 34 +151 193 26 30 +382 160 31 41 +108 130 43 61 +# 42--Car_Racing/42_Car_Racing_Car_Racing_42_857.jpg +102 124 18 23 +186 136 19 24 +159 142 8 8 +# 42--Car_Racing/42_Car_Racing_Nascar_42_823.jpg +1 278 8 15 +146 262 8 9 +128 264 6 5 +179 339 16 15 +193 284 6 9 +254 256 8 12 +378 261 7 12 +413 230 18 29 +894 239 17 17 +991 222 16 20 +849 247 7 9 +635 243 5 7 +662 244 8 9 +345 256 8 12 +116 263 6 9 +704 300 16 20 +# 43--Row_Boat/43_Row_Boat_Canoe_43_842.jpg +485 86 48 65 +771 216 40 65 +# 43--Row_Boat/43_Row_Boat_Canoe_43_757.jpg +517 205 45 55 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_688.jpg +484 337 31 37 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_797.jpg +482 368 15 25 +# 43--Row_Boat/43_Row_Boat_Canoe_43_881.jpg +262 284 626 384 +# 43--Row_Boat/43_Row_Boat_Canoe_43_784.jpg +206 474 28 23 +222 437 24 25 +349 462 29 31 +400 423 20 27 +657 444 23 25 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_563.jpg +614 103 54 61 +# 43--Row_Boat/43_Row_Boat_Canoe_43_1048.jpg +209 218 57 72 +334 237 56 78 +437 283 53 67 +567 248 51 78 +667 268 52 69 +760 231 58 69 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_341.jpg +331 198 10 19 +428 112 12 21 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_907.jpg +189 157 26 27 +407 147 22 26 +499 147 19 28 +800 159 24 27 +# 43--Row_Boat/43_Row_Boat_Canoe_43_125.jpg +196 166 37 53 +422 212 35 47 +596 229 41 50 +# 43--Row_Boat/43_Row_Boat_Canoe_43_726.jpg +192 347 17 19 +485 334 22 37 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_301.jpg +289 277 28 38 +512 296 29 37 +502 259 26 29 +716 260 34 37 +# 43--Row_Boat/43_Row_Boat_Canoe_43_547.jpg +295 148 47 51 +777 125 40 50 +# 43--Row_Boat/43_Row_Boat_Canoe_43_942.jpg +408 154 4 5 +603 148 4 4 +635 147 4 4 +177 477 28 28 +263 385 13 13 +370 455 25 24 +369 516 13 29 +465 436 22 26 +403 254 3 4 +251 260 4 4 +263 261 2 4 +276 260 3 3 +999 396 15 14 +909 462 26 28 +837 388 11 10 +758 362 8 8 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_106.jpg +298 98 12 17 +312 94 14 20 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_13.jpg +470 341 28 38 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_758.jpg +370 198 14 17 +513 299 36 40 +# 43--Row_Boat/43_Row_Boat_Canoe_43_956.jpg +356 158 58 58 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_839.jpg +541 252 62 114 +# 43--Row_Boat/43_Row_Boat_Canoe_43_940.jpg +120 398 25 34 +0 278 9 32 +247 239 25 38 +310 190 29 33 +398 201 28 43 +515 212 25 33 +677 297 27 40 +757 296 29 41 +529 427 30 41 +# 43--Row_Boat/43_Row_Boat_Canoe_43_81.jpg +278 121 43 49 +# 43--Row_Boat/43_Row_Boat_Canoe_43_51.jpg +681 221 37 41 +# 43--Row_Boat/43_Row_Boat_Canoe_43_251.jpg +460 251 19 22 +602 183 16 21 +# 43--Row_Boat/43_Row_Boat_Canoe_43_538.jpg +485 402 14 16 +711 393 14 17 +# 43--Row_Boat/43_Row_Boat_Canoe_43_458.jpg +747 423 10 12 +774 433 11 7 +793 431 9 10 +819 429 11 12 +849 429 11 11 +886 429 12 13 +293 333 5 5 +310 331 5 5 +282 330 3 4 +277 323 4 5 +333 344 5 4 +325 335 5 6 +363 342 3 5 +378 343 6 6 +408 377 6 7 +405 353 5 6 +417 355 5 6 +430 335 4 7 +461 398 7 7 +455 373 5 6 +449 393 7 7 +432 389 6 7 +415 387 6 6 +394 378 5 8 +382 377 5 6 +374 376 6 7 +366 377 5 5 +486 383 7 6 +477 378 5 7 +463 374 4 6 +456 381 5 6 +509 375 7 6 +525 390 6 9 +489 396 6 9 +508 403 7 7 +542 407 7 7 +526 406 6 7 +532 403 5 9 +542 395 7 7 +553 398 6 7 +561 398 6 7 +561 412 7 7 +581 410 8 11 +580 400 7 8 +598 403 7 9 +601 412 7 10 +612 402 9 10 +624 418 7 11 +639 412 8 10 +658 419 7 10 +663 409 7 10 +686 419 9 10 +710 423 10 12 +# 43--Row_Boat/43_Row_Boat_Canoe_43_93.jpg +631 252 22 25 +# 43--Row_Boat/43_Row_Boat_Canoe_43_438.jpg +423 346 22 33 +758 349 26 31 +# 43--Row_Boat/43_Row_Boat_Canoe_43_325.jpg +135 389 17 18 +182 383 15 23 +251 396 14 14 +320 403 11 17 +385 386 14 19 +443 403 18 20 +493 384 18 17 +569 405 16 19 +600 404 16 17 +691 396 21 21 +796 419 15 20 +916 423 13 17 +989 366 17 19 +# 43--Row_Boat/43_Row_Boat_Canoe_43_133.jpg +498 102 60 88 +# 43--Row_Boat/43_Row_Boat_Canoe_43_234.jpg +343 222 21 35 +786 215 34 52 +# 43--Row_Boat/43_Row_Boat_Canoe_43_227.jpg +455 220 37 49 +643 229 38 49 +# 43--Row_Boat/43_Row_Boat_Canoe_43_341.jpg +403 592 59 99 +# 43--Row_Boat/43_Row_Boat_Canoe_43_1047.jpg +474 22 108 166 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_1.jpg +292 184 27 50 +470 157 19 38 +# 43--Row_Boat/43_Row_Boat_Canoe_43_372.jpg +255 294 26 35 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_1024.jpg +548 148 105 153 +# 43--Row_Boat/43_Row_Boat_Canoe_43_429.jpg +343 318 21 24 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_500.jpg +447 290 28 37 +# 43--Row_Boat/43_Row_Boat_Canoe_43_276.jpg +511 213 21 33 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_287.jpg +532 177 29 39 +# 43--Row_Boat/43_Row_Boat_Canoe_43_565.jpg +485 315 93 149 +# 43--Row_Boat/43_Row_Boat_Rowboat_43_717.jpg +238 376 36 34 +313 348 30 24 +# 44--Aerobics/44_Aerobics_Aerobics_44_337.jpg +276 311 94 147 +568 359 102 145 +# 44--Aerobics/44_Aerobics_Aerobics_44_659.jpg +519 196 29 41 +# 44--Aerobics/44_Aerobics_Aerobics_44_916.jpg +546 580 53 47 +702 301 47 57 +792 204 40 48 +619 198 42 53 +489 187 40 50 +498 63 38 60 +521 315 44 58 +250 306 47 58 +372 196 42 56 +346 178 35 55 +131 211 44 46 +# 44--Aerobics/44_Aerobics_Aerobics_44_407.jpg +186 208 68 88 +# 44--Aerobics/44_Aerobics_Aerobics_44_370.jpg +830 0 41 24 +943 32 49 67 +672 66 63 84 +581 7 43 49 +304 145 75 97 +249 12 44 50 +175 59 59 71 +# 44--Aerobics/44_Aerobics_Aerobics_44_809.jpg +491 211 34 45 +833 194 32 42 +176 250 33 46 +# 44--Aerobics/44_Aerobics_Aerobics_44_3.jpg +812 133 20 33 +709 152 19 28 +568 131 23 42 +392 168 19 33 +467 182 16 26 +157 168 24 42 +# 44--Aerobics/44_Aerobics_Aerobics_44_433.jpg +656 275 80 97 +468 271 48 58 +136 336 77 104 +170 207 23 33 +721 229 25 31 +588 239 24 39 +974 181 19 28 +# 44--Aerobics/44_Aerobics_Aerobics_44_240.jpg +788 879 99 72 +672 619 93 72 +120 405 91 81 +# 44--Aerobics/44_Aerobics_Aerobics_44_852.jpg +945 203 20 22 +907 192 17 18 +846 196 14 17 +700 262 41 45 +685 239 26 35 +676 227 20 22 +630 221 16 20 +618 212 15 19 +486 231 23 27 +493 202 16 19 +438 253 29 33 +387 275 37 42 +327 223 19 23 +349 213 16 18 +268 240 18 29 +237 262 26 28 +478 224 14 18 +190 238 16 17 +169 243 17 20 +119 292 30 39 +92 257 22 25 +223 224 15 18 +0 242 13 18 +# 44--Aerobics/44_Aerobics_Aerobics_44_1032.jpg +368 255 284 443 +# 44--Aerobics/44_Aerobics_Aerobics_44_597.jpg +783 286 40 45 +963 319 29 32 +725 330 22 31 +567 316 21 31 +416 319 43 55 +326 344 24 30 +154 344 13 15 +106 347 19 28 +1 316 51 47 +# 44--Aerobics/44_Aerobics_Aerobics_44_216.jpg +836 20 18 21 +793 81 17 18 +728 67 16 32 +739 96 14 18 +679 132 12 18 +322 340 15 14 +314 365 12 15 +234 393 16 14 +224 413 13 13 +123 482 12 11 +79 472 15 17 +575 137 16 33 +568 172 13 22 +153 384 21 33 +# 44--Aerobics/44_Aerobics_Aerobics_44_76.jpg +82 108 76 72 +354 208 54 60 +518 190 60 66 +706 54 96 108 +# 44--Aerobics/44_Aerobics_Aerobics_44_173.jpg +640 144 142 178 +216 160 160 228 +# 44--Aerobics/44_Aerobics_Aerobics_44_17.jpg +828 346 51 61 +596 298 52 41 +477 246 47 30 +486 123 36 36 +378 65 27 38 +381 253 47 40 +207 189 37 38 +159 147 35 38 +275 319 53 63 +# 44--Aerobics/44_Aerobics_Aerobics_44_194.jpg +462 140 12 15 +635 279 16 11 +474 484 41 21 +459 375 24 13 +428 314 21 17 +15 473 31 17 +108 368 22 12 +241 303 21 10 +569 248 19 9 +580 215 16 8 +424 154 12 13 +# 44--Aerobics/44_Aerobics_Aerobics_44_549.jpg +799 40 47 64 +444 50 61 93 +246 119 39 43 +560 318 18 25 +679 295 16 24 +346 163 29 33 +# 44--Aerobics/44_Aerobics_Aerobics_44_610.jpg +378 111 96 120 +# 44--Aerobics/44_Aerobics_Aerobics_44_755.jpg +467 251 176 200 +# 44--Aerobics/44_Aerobics_Aerobics_44_184.jpg +522 54 72 134 +# 44--Aerobics/44_Aerobics_Aerobics_44_237.jpg +558 124 164 208 +344 206 114 156 +206 240 106 126 +# 44--Aerobics/44_Aerobics_Aerobics_44_379.jpg +520 390 102 135 +408 375 84 105 +# 44--Aerobics/44_Aerobics_Aerobics_44_794.jpg +242 98 134 184 +530 152 106 122 +# 44--Aerobics/44_Aerobics_Aerobics_44_343.jpg +943 237 14 16 +927 158 13 18 +898 200 20 30 +797 179 13 14 +761 173 18 21 +664 166 8 11 +590 174 29 43 +688 169 10 14 +387 161 22 26 +288 179 38 53 +# 44--Aerobics/44_Aerobics_Aerobics_44_430.jpg +776 143 49 59 +564 107 68 91 +392 112 38 53 +124 91 63 81 +# 44--Aerobics/44_Aerobics_Aerobics_44_332.jpg +937 314 13 21 +988 302 14 17 +832 302 16 22 +869 303 12 13 +932 301 16 11 +769 294 11 15 +750 305 14 20 +707 312 18 24 +655 300 10 13 +548 308 13 19 +427 307 26 33 +447 301 17 27 +385 304 12 18 +397 298 12 17 +314 300 10 15 +299 304 11 16 +536 287 10 12 +244 286 16 22 +210 301 15 16 +181 306 23 26 +124 302 13 15 +75 301 10 14 +20 294 13 16 +867 289 9 13 +# 44--Aerobics/44_Aerobics_Aerobics_44_936.jpg +736 416 67 83 +533 432 69 80 +432 411 75 96 +283 531 80 88 +387 821 69 104 +544 827 72 112 +# 44--Aerobics/44_Aerobics_Aerobics_44_246.jpg +792 1027 35 49 +242 1176 22 33 +319 1217 11 14 +102 1204 13 19 +10 1201 12 17 +79 1211 8 11 +405 1208 8 11 +186 1211 8 12 +777 648 53 67 +657 639 62 75 +271 599 60 81 +636 199 12 16 +883 150 8 11 +392 200 10 15 +341 220 14 27 +# 44--Aerobics/44_Aerobics_Aerobics_44_231.jpg +436 104 134 170 +# 44--Aerobics/44_Aerobics_Aerobics_44_585.jpg +821 210 35 51 +528 186 31 37 +178 206 28 42 +291 192 31 59 +# 44--Aerobics/44_Aerobics_Aerobics_44_400.jpg +213 168 152 219 +# 44--Aerobics/44_Aerobics_Aerobics_44_652.jpg +260 46 90 102 +718 70 72 98 +# 44--Aerobics/44_Aerobics_Aerobics_44_127.jpg +786 251 34 55 +716 245 23 32 +612 286 20 30 +93 273 21 47 +201 286 13 32 +661 264 14 23 +# 44--Aerobics/44_Aerobics_Aerobics_44_71.jpg +552 84 88 110 +# 44--Aerobics/44_Aerobics_Aerobics_44_640.jpg +929 422 27 42 +873 207 22 34 +627 232 22 28 +649 420 26 43 +378 415 25 34 +413 221 22 33 +133 240 22 36 +93 445 24 35 +# 44--Aerobics/44_Aerobics_Aerobics_44_339.jpg +666 168 25 30 +813 51 42 60 +518 140 28 41 +366 137 38 53 +254 161 27 39 +130 39 61 77 +# 44--Aerobics/44_Aerobics_Aerobics_44_742.jpg +776 258 24 38 +702 242 29 31 +594 259 24 32 +463 135 34 54 +341 218 22 35 +273 244 27 43 +164 194 26 35 +# 44--Aerobics/44_Aerobics_Aerobics_44_443.jpg +381 226 111 176 +# 44--Aerobics/44_Aerobics_Aerobics_44_769.jpg +1 857 195 273 +270 470 59 72 +176 538 42 49 +491 581 34 43 +584 542 41 54 +# 44--Aerobics/44_Aerobics_Aerobics_44_650.jpg +180 98 412 500 +# 44--Aerobics/44_Aerobics_Aerobics_44_66.jpg +904 231 29 38 +781 199 48 63 +676 199 37 38 +256 139 66 98 +435 234 36 49 +474 260 25 28 +187 224 27 33 +90 254 17 20 +# 44--Aerobics/44_Aerobics_Aerobics_44_937.jpg +310 166 248 338 +# 44--Aerobics/44_Aerobics_Aerobics_44_707.jpg +172 448 130 82 +# 44--Aerobics/44_Aerobics_Aerobics_44_329.jpg +326 74 16 18 +874 292 15 27 +943 252 13 20 +682 321 20 28 +680 250 11 17 +# 44--Aerobics/44_Aerobics_Aerobics_44_578.jpg +462 219 129 84 +# 44--Aerobics/44_Aerobics_Aerobics_44_167.jpg +884 313 64 87 +925 243 22 30 +959 261 15 17 +1007 273 17 14 +792 273 39 49 +844 233 17 21 +882 263 11 14 +770 272 24 34 +748 247 15 17 +726 268 14 16 +684 268 19 29 +611 310 26 30 +575 249 16 18 +882 486 142 191 +375 281 30 40 +273 303 69 99 +379 265 19 24 +177 281 23 26 +# 44--Aerobics/44_Aerobics_Aerobics_44_96.jpg +944 506 38 67 +962 343 41 46 +847 336 12 20 +616 356 17 30 +611 321 16 20 +515 351 19 18 +441 439 39 49 +453 376 20 38 +429 434 27 22 +189 139 19 32 +# 44--Aerobics/44_Aerobics_Aerobics_44_120.jpg +482 118 88 108 +# 44--Aerobics/44_Aerobics_Aerobics_44_35.jpg +463 687 144 199 +714 527 95 132 +843 331 77 98 +561 319 71 80 +104 362 74 92 +120 233 52 83 +509 251 67 86 +# 44--Aerobics/44_Aerobics_Aerobics_44_688.jpg +272 272 221 341 +722 462 79 166 +794 356 127 217 +562 341 66 139 +# 44--Aerobics/44_Aerobics_Aerobics_44_629.jpg +715 285 50 65 +826 247 48 62 +552 257 49 72 +353 264 47 64 +322 93 38 57 +# 44--Aerobics/44_Aerobics_Aerobics_44_919.jpg +565 74 45 77 +914 54 25 38 +255 63 31 42 +110 85 31 55 +# 44--Aerobics/44_Aerobics_Aerobics_44_583.jpg +361 214 346 446 +# 44--Aerobics/44_Aerobics_Aerobics_44_762.jpg +228 250 116 150 +364 150 120 148 +614 84 124 146 +# 45--Balloonist/45_Balloonist_Balloonist_45_615.jpg +94 114 84 102 +248 166 120 164 +488 164 70 60 +746 162 86 104 +# 45--Balloonist/45_Balloonist_Balloonist_45_186.jpg +456 200 195 330 +# 45--Balloonist/45_Balloonist_Balloonist_45_531.jpg +344 144 58 60 +# 45--Balloonist/45_Balloonist_Balloonist_45_692.jpg +814 734 36 34 +683 741 10 13 +782 717 8 11 +657 716 7 10 +701 720 12 15 +906 724 7 12 +948 721 11 15 +933 716 8 10 +# 45--Balloonist/45_Balloonist_Balloonist_45_277.jpg +462 583 85 132 +# 45--Balloonist/45_Balloonist_Balloonist_45_838.jpg +33 223 20 25 +126 368 37 50 +180 245 32 41 +183 307 36 40 +190 383 31 40 +233 380 43 54 +641 346 29 36 +490 280 25 40 +458 273 27 40 +462 222 30 37 +539 234 25 39 +590 238 22 30 +652 250 27 32 +690 227 26 31 +711 337 31 42 +726 368 38 50 +906 360 36 39 +831 351 38 44 +788 312 29 41 +839 302 31 38 +817 238 29 35 +764 279 26 33 +731 239 22 25 +276 332 32 40 +279 299 30 36 +277 211 27 32 +353 392 32 39 +407 343 31 43 +460 378 32 50 +491 349 31 39 +526 382 31 41 +565 369 27 35 +608 376 39 52 +# 45--Balloonist/45_Balloonist_Balloonist_45_211.jpg +474 793 72 99 +# 45--Balloonist/45_Balloonist_Balloonist_45_217.jpg +562 338 304 398 +# 45--Balloonist/45_Balloonist_Balloonist_45_369.jpg +304 326 58 62 +244 192 58 104 +770 224 64 102 +# 45--Balloonist/45_Balloonist_Balloonist_45_416.jpg +487 1328 31 49 +403 590 80 82 +# 45--Balloonist/45_Balloonist_Balloonist_45_207.jpg +467 295 233 339 +# 45--Balloonist/45_Balloonist_Balloonist_45_402.jpg +600 246 56 76 +553 203 54 66 +469 87 59 73 +351 218 50 67 +280 253 44 56 +# 45--Balloonist/45_Balloonist_Balloonist_45_1028.jpg +320 636 19 26 +834 518 16 29 +# 45--Balloonist/45_Balloonist_Balloonist_45_936.jpg +150 148 330 486 +# 45--Balloonist/45_Balloonist_Balloonist_45_160.jpg +398 316 159 229 +# 45--Balloonist/45_Balloonist_Balloonist_45_107.jpg +465 350 11 13 +560 350 14 14 +557 332 12 15 +# 45--Balloonist/45_Balloonist_Balloonist_45_685.jpg +399 334 39 48 +# 45--Balloonist/45_Balloonist_Balloonist_45_225.jpg +440 120 100 226 +544 176 162 216 +46 2 140 152 +# 45--Balloonist/45_Balloonist_Balloonist_45_434.jpg +578 80 166 236 +# 45--Balloonist/45_Balloonist_Balloonist_45_142.jpg +157 307 168 200 +# 45--Balloonist/45_Balloonist_Balloonist_45_86.jpg +454 364 106 136 +728 294 74 86 +# 45--Balloonist/45_Balloonist_Balloonist_45_518.jpg +496 75 87 100 +412 100 72 95 +431 351 66 74 +983 157 37 45 +798 179 22 37 +627 180 52 63 +77 32 36 88 +# 45--Balloonist/45_Balloonist_Balloonist_45_733.jpg +264 24 240 308 +618 164 184 240 +# 45--Balloonist/45_Balloonist_Balloonist_45_974.jpg +860 67 43 57 +410 241 219 298 +# 45--Balloonist/45_Balloonist_Balloonist_45_508.jpg +282 220 78 102 +474 270 66 80 +548 224 78 102 +656 272 68 80 +720 366 76 100 +# 45--Balloonist/45_Balloonist_Balloonist_45_118.jpg +204 285 58 93 +334 208 73 93 +449 265 53 76 +526 203 57 68 +716 88 73 88 +844 138 45 62 +154 24 11 18 +367 337 51 85 +436 218 44 55 +504 99 11 14 +# 45--Balloonist/45_Balloonist_Balloonist_45_769.jpg +358 446 54 48 +# 45--Balloonist/45_Balloonist_Balloonist_45_939.jpg +398 62 186 254 +# 45--Balloonist/45_Balloonist_Balloonist_45_550.jpg +83 308 47 49 +# 45--Balloonist/45_Balloonist_Balloonist_45_149.jpg +470 275 218 356 +# 45--Balloonist/45_Balloonist_Balloonist_45_273.jpg +103 315 77 82 +124 166 52 76 +169 206 34 48 +377 157 32 38 +466 162 37 41 +429 40 31 38 +623 190 54 68 +706 199 67 86 +720 295 115 157 +820 123 14 13 +866 101 15 25 +746 98 8 20 +199 117 20 19 +377 109 20 22 +# 45--Balloonist/45_Balloonist_Balloonist_45_134.jpg +338 64 52 76 +630 180 60 82 +# 45--Balloonist/45_Balloonist_Balloonist_45_857.jpg +494 356 19 28 +610 352 22 31 +# 46--Jockey/46_Jockey_Jockey_46_569.jpg +199 216 9 11 +245 216 17 16 +272 141 10 15 +846 182 11 11 +411 212 7 10 +394 216 6 8 +782 37 5 6 +812 41 6 7 +62 1 8 7 +509 6 7 9 +525 9 8 9 +659 27 7 8 +686 26 5 8 +630 19 7 8 +605 12 6 8 +463 0 7 8 +277 150 41 58 +440 124 44 55 +155 85 33 76 +865 129 49 51 +700 18 38 50 +410 136 12 12 +384 133 12 13 +365 136 11 13 +403 177 10 12 +318 128 12 17 +320 213 18 17 +189 132 13 14 +213 144 9 10 +227 139 12 18 +395 148 10 11 +# 46--Jockey/46_Jockey_Jockey_46_172.jpg +832 160 110 154 +# 46--Jockey/46_Jockey_Jockey_46_537.jpg +486 36 106 164 +# 46--Jockey/46_Jockey_Jockey_46_166.jpg +419 187 91 120 +# 46--Jockey/46_Jockey_Jockey_46_44.jpg +398 549 40 30 +459 114 38 36 +780 400 24 26 +# 46--Jockey/46_Jockey_Jockey_46_497.jpg +485 199 229 325 +# 46--Jockey/46_Jockey_Jockey_46_823.jpg +410 8 266 321 +# 46--Jockey/46_Jockey_Jockey_46_106.jpg +422 130 86 140 +# 46--Jockey/46_Jockey_Jockey_46_130.jpg +562 78 68 92 +# 46--Jockey/46_Jockey_Jockey_46_923.jpg +628 495 69 84 +# 46--Jockey/46_Jockey_Jockey_46_308.jpg +708 48 62 78 +188 128 52 84 +# 46--Jockey/46_Jockey_Jockey_46_188.jpg +346 59 49 66 +453 114 46 59 +# 46--Jockey/46_Jockey_Jockey_46_909.jpg +272 181 17 20 +354 195 14 16 +482 184 8 20 +487 154 9 15 +506 152 6 8 +576 137 6 9 +613 145 5 7 +642 189 13 19 +649 246 17 24 +506 287 31 38 +212 573 47 72 +496 551 55 70 +454 406 49 58 +908 275 26 32 +770 207 18 21 +784 185 16 19 +735 176 12 19 +693 180 14 23 +736 267 18 21 +915 128 11 10 +532 162 11 13 +544 153 9 12 +551 141 6 7 +0 191 12 39 +650 287 17 19 +# 46--Jockey/46_Jockey_Jockey_46_79.jpg +323 237 46 61 +390 245 39 58 +413 217 38 58 +423 169 42 57 +459 149 31 47 +665 22 32 47 +618 30 31 41 +594 43 36 49 +539 41 43 50 +552 78 38 57 +678 0 30 20 +189 401 30 48 +539 104 18 43 +# 46--Jockey/46_Jockey_Jockey_46_933.jpg +220 142 116 178 +422 130 136 172 +650 130 120 170 +# 46--Jockey/46_Jockey_Jockey_46_444.jpg +98 349 37 46 +216 336 37 43 +342 335 41 49 +484 331 38 52 +664 361 42 48 +857 345 43 52 +814 138 33 40 +710 148 40 45 +615 137 35 49 +521 150 38 45 +408 169 37 42 +315 145 35 45 +218 139 31 39 +143 215 3 4 +# 46--Jockey/46_Jockey_Jockey_46_508.jpg +489 186 49 55 +# 46--Jockey/46_Jockey_Jockey_46_728.jpg +370 208 186 240 +# 46--Jockey/46_Jockey_Jockey_46_393.jpg +154 30 78 100 +# 46--Jockey/46_Jockey_Jockey_46_652.jpg +278 226 92 136 +# 46--Jockey/46_Jockey_Jockey_46_202.jpg +486 66 54 76 +# 46--Jockey/46_Jockey_Jockey_46_259.jpg +555 28 141 215 +# 46--Jockey/46_Jockey_Jockey_46_51.jpg +536 76 104 152 +# 46--Jockey/46_Jockey_Jockey_46_352.jpg +87 148 48 57 +167 236 49 61 +300 251 45 59 +207 140 43 53 +221 46 40 51 +337 42 36 49 +420 74 36 45 +337 121 40 54 +436 246 43 61 +495 181 36 48 +521 71 32 45 +611 65 35 48 +631 144 39 54 +558 242 43 54 +678 232 43 59 +694 68 34 47 +791 55 37 48 +779 135 40 52 +902 131 43 56 +809 226 47 58 +# 46--Jockey/46_Jockey_Jockey_46_254.jpg +240 96 160 254 +# 46--Jockey/46_Jockey_Jockey_46_718.jpg +419 86 33 43 +# 46--Jockey/46_Jockey_Jockey_46_409.jpg +430 104 70 104 +# 46--Jockey/46_Jockey_Jockey_46_758.jpg +363 147 227 296 +# 46--Jockey/46_Jockey_Jockey_46_779.jpg +935 117 41 54 +537 129 51 54 +994 117 29 47 +790 214 32 35 +# 46--Jockey/46_Jockey_Jockey_46_76.jpg +654 230 58 84 +560 236 56 82 +414 250 58 82 +300 246 58 90 +# 46--Jockey/46_Jockey_Jockey_46_54.jpg +270 237 50 64 +506 236 59 60 +756 365 71 57 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_468.jpg +883 25 39 56 +1001 0 23 35 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_195.jpg +288 8 51 57 +895 241 68 92 +756 97 62 75 +702 109 49 78 +570 115 54 83 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_575.jpg +590 48 74 92 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_254.jpg +444 84 148 174 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_715.jpg +404 470 93 87 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_746.jpg +512 263 55 52 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_617.jpg +171 11 540 684 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_912.jpg +846 351 36 55 +729 308 37 50 +833 226 30 37 +777 128 29 32 +847 122 29 38 +880 116 34 44 +908 62 35 40 +916 1 33 39 +841 57 28 37 +859 0 24 27 +817 0 25 21 +776 8 25 31 +713 122 31 35 +592 169 22 27 +629 115 22 29 +694 114 20 28 +750 112 18 30 +732 65 22 30 +684 59 21 25 +601 101 21 26 +595 51 21 20 +628 19 21 24 +769 115 23 33 +687 29 20 20 +739 10 18 26 +677 8 14 21 +657 9 15 20 +496 108 22 22 +547 58 18 24 +568 4 16 21 +433 26 20 27 +478 20 17 21 +443 4 15 19 +514 20 16 18 +250 178 19 35 +371 98 20 24 +326 95 20 27 +380 37 19 22 +385 16 16 20 +357 5 13 16 +323 60 17 18 +276 3 14 18 +301 0 11 9 +428 2 11 12 +36 22 17 20 +59 15 18 19 +92 36 18 21 +85 0 13 14 +107 0 17 13 +120 21 15 17 +179 46 19 20 +156 16 18 23 +181 2 15 16 +216 28 11 16 +191 36 11 14 +236 22 16 20 +226 11 14 20 +206 0 11 12 +344 0 9 12 +94 309 31 54 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_785.jpg +405 217 217 220 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_588.jpg +613 117 49 47 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_845.jpg +547 267 390 517 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_193.jpg +314 135 28 36 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_561.jpg +214 8 60 50 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_42.jpg +215 94 51 64 +525 0 28 20 +82 0 21 25 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_405.jpg +834 286 54 82 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_385.jpg +335 33 99 142 +630 109 92 113 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_812.jpg +486 336 75 102 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_300.jpg +926 363 31 38 +818 341 32 32 +839 96 27 36 +225 541 33 35 +101 469 25 38 +9 0 20 30 +0 94 13 36 +16 156 17 32 +55 255 15 35 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_641.jpg +382 273 217 239 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_777.jpg +582 126 214 240 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_703.jpg +470 130 128 150 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_660.jpg +488 230 112 118 +218 788 126 108 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_432.jpg +579 163 53 56 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_72.jpg +749 4 25 29 +687 3 25 22 +634 5 24 33 +571 10 23 25 +508 11 26 34 +537 11 20 31 +974 14 22 26 +918 5 20 32 +327 64 52 62 +228 0 26 26 +225 23 22 26 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_636.jpg +92 106 94 60 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_610.jpg +432 336 156 116 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_443.jpg +1010 200 14 25 +998 258 15 21 +950 258 17 19 +975 196 14 19 +900 200 16 25 +927 192 14 19 +882 202 18 21 +859 165 12 20 +834 266 13 16 +811 209 20 16 +863 208 14 21 +779 210 10 13 +779 227 13 20 +759 226 10 13 +706 261 10 14 +749 266 9 12 +687 237 14 14 +707 235 11 11 +583 201 13 18 +421 219 16 21 +412 197 13 18 +370 213 14 19 +356 219 13 17 +350 198 14 21 +319 201 14 19 +196 253 18 25 +68 191 15 19 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_207.jpg +1002 474 19 22 +921 479 15 20 +884 482 20 25 +859 489 16 16 +718 490 17 21 +744 424 15 22 +602 465 17 28 +578 400 20 25 +471 425 16 23 +404 507 40 54 +380 420 22 27 +493 480 14 18 +63 339 22 36 +244 418 20 28 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_583.jpg +373 194 270 289 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_827.jpg +708 32 68 90 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_179.jpg +376 56 92 104 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_236.jpg +989 95 22 26 +943 47 19 22 +973 28 13 15 +1005 24 11 14 +828 129 24 25 +855 156 15 19 +794 114 19 27 +726 183 18 22 +716 108 21 27 +750 48 14 18 +680 45 14 18 +673 187 26 38 +697 180 16 16 +758 172 18 24 +601 192 23 22 +588 132 19 23 +523 107 23 27 +546 200 20 25 +389 277 94 104 +455 185 22 30 +484 189 17 26 +436 123 17 28 +417 128 17 27 +387 135 19 23 +386 189 24 29 +350 194 20 24 +265 66 18 25 +277 119 16 24 +266 206 19 23 +160 178 16 21 +189 127 15 24 +154 132 16 26 +85 105 19 27 +122 178 18 23 +110 195 20 23 +94 184 15 17 +65 196 20 24 +33 191 19 24 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_511.jpg +997 537 14 16 +413 258 7 9 +637 296 8 13 +765 275 8 12 +858 188 8 12 +978 219 6 8 +974 170 5 6 +960 172 6 8 +933 192 9 10 +910 195 10 9 +949 153 7 9 +939 142 7 10 +927 142 6 8 +972 137 5 8 +1004 149 8 10 +956 110 6 11 +963 117 7 9 +1000 123 8 10 +33 165 6 7 +34 195 5 6 +10 153 6 8 +53 187 8 9 +32 134 10 14 +32 115 5 7 +24 137 6 8 +57 140 8 13 +70 108 6 10 +73 98 10 12 +67 81 9 12 +0 112 8 14 +7 75 6 10 +6 108 7 10 +33 70 6 9 +15 61 8 13 +73 65 6 9 +30 52 8 9 +21 33 6 7 +50 34 7 9 +7 25 6 9 +63 16 6 9 +84 33 7 10 +35 16 6 9 +2 14 8 11 +12 37 5 6 +62 3 5 7 +92 1 5 7 +89 24 8 7 +101 19 8 12 +116 26 7 10 +128 23 7 13 +117 9 7 9 +248 165 7 7 +255 150 6 7 +241 132 6 7 +227 135 5 7 +245 144 7 10 +67 176 11 11 +87 141 7 11 +103 137 7 9 +98 129 7 8 +171 160 10 10 +147 128 8 12 +198 112 5 8 +217 101 6 8 +222 104 5 8 +125 80 6 8 +136 70 10 11 +161 101 5 6 +142 88 6 9 +135 109 5 7 +125 107 8 13 +95 108 8 11 +91 81 8 8 +123 65 7 9 +103 75 7 9 +101 37 5 8 +101 63 6 6 +154 67 5 8 +162 76 6 8 +267 112 7 10 +309 113 7 9 +256 89 7 8 +245 93 5 8 +230 97 4 7 +310 129 6 8 +144 11 6 10 +166 24 6 7 +160 0 9 11 +181 33 6 7 +176 59 6 9 +185 83 8 11 +199 3 6 8 +218 0 5 7 +210 50 6 8 +216 28 5 8 +215 77 7 8 +204 82 10 13 +237 68 5 8 +247 37 6 9 +267 70 6 8 +264 60 6 8 +231 27 5 7 +222 38 6 8 +212 39 4 5 +272 35 6 8 +251 21 6 7 +280 58 6 7 +298 52 7 10 +283 15 5 8 +323 73 7 9 +335 33 6 8 +335 19 7 10 +325 21 5 8 +353 5 8 9 +356 20 5 7 +368 6 5 6 +362 34 5 6 +356 62 5 6 +382 80 9 10 +358 83 6 7 +384 52 5 7 +390 9 6 7 +391 36 7 10 +336 75 7 8 +342 106 5 8 +348 106 6 9 +344 121 7 9 +320 126 6 7 +366 111 8 11 +367 97 6 9 +373 118 7 9 +332 142 7 11 +383 123 6 11 +388 122 7 10 +420 128 13 15 +409 84 6 8 +395 90 6 8 +398 118 6 8 +405 121 5 8 +399 142 7 9 +430 141 10 11 +405 105 6 8 +437 87 5 7 +410 122 6 8 +424 95 7 11 +443 123 6 6 +6 198 4 8 +49 188 5 7 +18 191 10 6 +1 197 5 8 +2 166 6 10 +17 146 6 9 +43 163 6 8 +56 182 6 7 +51 168 6 9 +55 163 6 9 +79 178 5 6 +123 173 4 6 +98 170 5 7 +139 131 6 9 +89 110 6 8 +86 96 7 9 +9 67 5 7 +9 74 6 9 +14 44 7 9 +0 43 7 12 +92 73 6 8 +114 42 7 7 +81 7 4 7 +53 20 7 10 +69 8 6 9 +110 0 5 6 +92 53 7 9 +198 14 7 10 +199 35 4 6 +252 9 5 8 +269 9 6 9 +306 77 6 7 +317 86 7 10 +281 72 6 8 +318 109 6 9 +368 143 8 7 +367 123 6 7 +385 105 5 6 +457 114 6 9 +449 121 9 8 +461 100 6 7 +538 120 7 10 +520 107 6 8 +526 119 9 11 +566 125 9 11 +548 102 6 7 +556 93 7 10 +588 116 6 11 +579 128 7 8 +569 137 9 12 +586 137 5 9 +534 81 6 7 +514 81 5 7 +512 105 6 6 +430 20 5 7 +428 3 6 9 +414 6 9 12 +400 4 5 9 +442 5 5 7 +457 15 7 9 +449 46 8 10 +445 62 5 7 +445 24 5 7 +471 48 6 7 +427 51 6 7 +431 60 7 7 +482 84 6 8 +474 67 6 9 +467 72 5 5 +476 33 6 8 +484 21 8 13 +479 14 5 8 +485 65 4 7 +499 62 5 9 +527 70 5 6 +518 51 4 8 +532 32 6 10 +508 16 6 9 +497 8 6 10 +558 6 7 9 +522 36 5 8 +573 17 7 10 +554 68 6 7 +586 70 6 9 +579 74 5 9 +554 59 5 6 +543 51 6 7 +542 68 5 7 +566 37 5 7 +604 49 5 9 +613 85 7 11 +596 74 7 8 +618 39 8 10 +591 23 6 8 +599 9 5 8 +587 0 13 15 +613 23 5 5 +628 43 5 9 +657 51 6 8 +666 89 7 9 +619 97 8 11 +670 9 7 10 +675 3 7 9 +894 129 6 11 +918 111 7 9 +903 111 5 8 +871 121 9 8 +859 99 7 8 +885 97 6 9 +920 92 6 9 +991 157 8 10 +991 98 7 8 +941 106 9 12 +963 96 6 9 +957 76 6 8 +906 83 5 9 +907 54 6 9 +941 52 9 12 +938 38 7 9 +963 62 6 6 +967 40 6 8 +906 28 8 11 +935 18 5 5 +975 29 7 9 +928 7 6 8 +881 49 8 8 +873 77 7 9 +886 32 7 8 +896 6 7 9 +633 123 5 7 +599 108 9 11 +603 124 7 9 +665 122 6 9 +685 136 8 8 +633 105 6 9 +722 120 4 6 +714 130 5 7 +710 143 5 9 +730 142 8 9 +697 141 6 6 +704 157 6 9 +716 93 8 10 +728 88 9 12 +707 96 5 7 +699 116 8 8 +708 115 5 7 +780 147 6 7 +791 145 6 9 +759 159 7 9 +747 162 7 9 +733 159 6 9 +720 154 6 10 +751 127 6 9 +760 126 7 11 +761 111 8 9 +772 128 6 9 +794 127 7 11 +823 112 7 10 +798 107 6 8 +770 102 6 9 +810 91 6 8 +791 89 6 8 +822 98 7 8 +811 109 6 10 +615 66 6 9 +623 8 4 6 +623 16 7 9 +629 66 7 10 +693 91 7 9 +665 36 6 6 +654 84 7 9 +701 46 6 8 +686 45 6 9 +684 31 4 8 +697 23 6 11 +712 23 7 11 +723 33 8 9 +726 47 6 8 +716 12 5 9 +730 5 5 7 +727 21 5 8 +692 12 5 7 +697 15 6 7 +684 20 7 8 +718 66 4 6 +764 86 7 9 +718 80 7 8 +747 67 5 8 +728 68 6 6 +756 92 7 6 +778 86 8 8 +775 63 6 10 +763 4 5 7 +769 29 5 9 +787 33 5 8 +776 40 5 8 +783 57 5 7 +804 38 5 7 +805 59 7 8 +789 17 6 10 +823 40 6 9 +824 26 6 6 +808 23 8 10 +803 8 5 8 +800 1 7 7 +799 27 5 6 +778 10 6 9 +854 73 6 10 +840 71 6 9 +856 28 8 9 +810 5 7 9 +828 11 5 6 +22 585 9 21 +498 34 8 9 +678 62 6 8 +863 47 8 7 +828 54 7 8 +149 43 9 11 +57 52 10 10 +112 103 5 9 +280 126 8 11 +935 79 8 8 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_354.jpg +438 90 60 84 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_631.jpg +727 84 40 69 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_761.jpg +1010 125 14 43 +885 115 26 37 +703 128 28 30 +691 42 32 39 +595 45 23 32 +555 17 26 29 +471 138 27 38 +472 29 25 41 +349 113 30 48 +346 40 27 36 +197 120 26 38 +182 38 26 39 +236 29 30 41 +74 34 29 38 +38 120 28 33 +285 554 26 39 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_177.jpg +225 83 19 26 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_38.jpg +628 91 46 45 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_874.jpg +278 198 148 262 +650 208 128 224 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_491.jpg +982 197 15 17 +916 135 14 19 +949 64 11 14 +892 138 13 16 +842 130 16 20 +867 140 15 22 +749 192 17 23 +405 192 15 15 +347 195 10 13 +316 196 14 17 +231 184 10 15 +197 200 14 16 +183 154 14 20 +97 185 18 23 +87 133 15 19 +35 193 14 13 +7 151 12 17 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_240.jpg +759 106 70 101 +648 65 36 40 +535 48 40 51 +960 110 37 54 +81 91 36 40 +221 117 39 43 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_657.jpg +132 194 84 94 +268 102 96 128 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_779.jpg +803 116 37 52 +787 254 37 51 +784 391 34 47 +151 523 8 10 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_196.jpg +450 124 54 66 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_645.jpg +672 121 49 63 +593 117 51 62 +527 148 47 60 +391 129 51 63 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_936.jpg +656 16 86 112 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_266.jpg +252 100 62 72 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_338.jpg +350 36 316 422 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_782.jpg +506 130 126 218 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_354.jpg +760 414 80 84 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_171.jpg +114 192 198 116 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_566.jpg +690 278 60 62 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_19.jpg +564 40 58 80 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_731.jpg +27 26 9 12 +59 22 9 13 +44 6 9 15 +24 1 9 12 +9 13 8 10 +0 7 8 16 +61 8 10 9 +84 23 10 11 +61 49 11 11 +70 64 12 14 +91 0 10 12 +82 7 7 8 +11 71 8 12 +0 80 6 12 +15 82 9 13 +29 78 10 10 +52 83 11 14 +67 77 11 14 +83 82 11 12 +68 96 11 11 +34 97 10 13 +18 97 10 11 +119 215 13 13 +82 212 12 13 +30 209 13 19 +14 168 11 15 +57 172 10 14 +28 154 11 13 +19 135 12 14 +1 149 14 16 +10 117 11 11 +40 127 10 13 +64 116 11 14 +84 121 13 17 +81 144 11 15 +73 150 12 13 +85 99 10 14 +100 90 9 11 +106 79 10 14 +117 81 7 11 +143 77 9 13 +158 79 11 16 +126 103 10 13 +109 106 10 15 +103 122 11 12 +105 137 11 16 +123 145 11 13 +115 162 10 18 +165 159 10 13 +157 143 10 13 +144 134 11 15 +140 115 10 14 +149 110 9 12 +154 98 10 12 +169 101 10 14 +163 124 12 13 +113 10 11 13 +101 4 11 14 +130 7 8 11 +141 11 10 12 +165 7 9 13 +147 0 13 13 +180 3 9 11 +191 9 11 12 +204 8 9 10 +184 71 8 10 +170 86 9 11 +170 74 11 11 +181 89 12 16 +204 80 9 14 +211 72 9 12 +210 100 9 16 +185 114 9 14 +217 2 8 10 +231 7 9 9 +221 0 10 8 +250 7 10 9 +243 6 8 10 +261 5 9 12 +274 10 9 11 +288 11 9 12 +303 2 9 13 +311 19 9 13 +312 1 7 11 +324 4 9 13 +340 6 10 14 +363 14 9 11 +373 12 8 11 +377 1 9 10 +247 55 9 12 +226 45 8 9 +263 69 9 12 +250 83 10 13 +225 69 11 15 +226 88 11 15 +265 102 10 13 +231 102 11 15 +226 117 10 12 +205 119 11 14 +196 161 11 13 +190 220 10 11 +190 175 12 15 +236 156 10 13 +213 157 8 6 +190 140 11 12 +178 129 11 14 +232 138 10 14 +247 130 11 11 +262 134 12 14 +104 177 10 13 +423 169 20 24 +469 179 12 13 +461 147 12 15 +449 141 13 14 +402 135 13 12 +439 131 11 10 +389 150 9 10 +362 166 11 12 +413 156 10 12 +370 156 11 13 +360 116 10 15 +279 143 11 17 +301 134 12 16 +296 121 8 9 +282 107 9 9 +309 117 8 10 +338 119 10 13 +348 109 10 13 +345 99 8 9 +305 91 10 16 +323 86 10 14 +341 86 8 10 +330 110 11 11 +307 80 10 13 +287 86 10 13 +272 57 8 11 +227 59 7 8 +321 74 8 10 +255 118 8 7 +404 112 11 15 +377 99 8 11 +390 89 10 13 +378 83 9 10 +353 76 10 11 +365 73 9 9 +396 61 8 14 +405 78 9 10 +401 89 9 9 +406 99 12 12 +417 101 8 13 +429 104 11 14 +428 86 8 11 +415 77 8 7 +444 66 10 12 +435 58 9 14 +446 89 7 10 +463 97 7 11 +465 82 9 12 +453 118 11 14 +453 106 10 11 +477 87 10 11 +476 69 10 12 +387 34 8 10 +403 36 10 10 +390 4 9 10 +402 1 10 10 +419 4 11 12 +435 3 9 12 +447 3 9 15 +463 5 8 11 +454 5 7 10 +472 1 7 10 +483 6 7 10 +492 9 9 11 +483 0 9 6 +501 4 9 11 +512 8 11 13 +481 47 10 11 +494 64 10 12 +500 43 10 9 +507 55 8 12 +514 71 10 14 +500 90 12 13 +494 100 12 11 +484 113 10 10 +478 97 10 11 +495 118 10 11 +502 135 13 19 +521 122 11 18 +526 154 9 12 +546 113 10 14 +535 134 11 12 +544 77 8 11 +538 57 9 11 +544 65 8 10 +524 62 11 14 +534 4 12 14 +555 13 9 13 +565 21 9 11 +581 7 7 9 +584 0 9 11 +626 9 8 9 +617 23 8 11 +632 22 8 10 +642 29 9 10 +612 48 8 9 +547 47 7 10 +569 59 11 14 +556 66 9 10 +584 53 7 8 +599 60 9 10 +638 41 10 10 +653 45 10 13 +649 5 9 10 +637 52 10 14 +626 53 10 12 +630 70 12 14 +583 69 8 10 +576 79 9 13 +587 84 10 14 +558 80 10 17 +566 105 8 12 +595 103 9 10 +609 93 10 11 +622 97 11 11 +643 93 9 12 +633 87 10 9 +624 109 12 11 +655 64 8 11 +578 24 9 8 +640 166 13 16 +556 177 13 15 +565 125 10 10 +573 139 26 28 +602 142 11 14 +613 151 10 10 +603 117 11 14 +623 131 10 14 +645 120 11 12 +671 111 9 11 +679 115 12 12 +666 90 10 14 +679 90 10 11 +656 92 9 12 +692 90 8 12 +711 111 12 14 +644 151 12 14 +560 209 7 8 +530 183 9 11 +545 216 13 16 +539 140 10 14 +796 190 10 13 +769 183 12 12 +725 171 14 16 +746 164 10 13 +719 133 12 15 +765 124 10 16 +780 139 11 10 +759 145 11 11 +761 166 10 11 +726 104 10 10 +705 92 8 9 +714 98 8 10 +766 100 8 11 +761 114 10 14 +751 118 8 13 +800 115 13 17 +804 93 10 13 +756 89 11 13 +747 87 9 13 +713 77 11 12 +703 78 10 11 +685 61 11 14 +691 71 8 10 +659 77 7 12 +665 65 8 10 +665 44 10 9 +662 25 9 10 +679 12 9 12 +689 2 9 11 +693 25 10 12 +693 37 9 11 +702 51 8 9 +711 41 9 11 +719 49 8 11 +717 18 10 12 +701 14 8 11 +712 4 9 10 +723 2 10 11 +732 13 11 12 +723 14 9 11 +746 10 10 10 +679 57 10 13 +716 64 10 12 +720 61 12 14 +725 73 9 12 +741 59 10 11 +732 47 10 18 +734 25 11 15 +743 33 11 13 +749 46 12 13 +754 64 12 14 +758 72 10 12 +748 82 9 10 +767 56 9 12 +755 17 11 16 +767 28 9 15 +899 194 17 20 +843 181 13 18 +818 157 12 15 +831 131 13 14 +832 112 12 13 +881 128 11 15 +886 147 13 12 +814 185 8 10 +852 160 8 10 +910 171 13 14 +905 135 12 14 +871 110 11 17 +888 102 12 14 +913 109 11 14 +868 92 11 13 +850 97 10 12 +838 88 12 10 +906 87 11 13 +783 78 10 14 +828 71 6 8 +816 79 10 13 +808 73 7 10 +818 60 8 11 +831 53 10 13 +802 57 10 13 +792 64 10 13 +778 67 8 10 +779 44 10 12 +778 39 7 9 +786 31 8 11 +794 46 10 13 +787 17 9 11 +776 12 9 10 +763 3 9 16 +804 12 8 11 +802 3 9 10 +814 14 10 11 +824 28 11 14 +819 39 11 12 +838 31 8 10 +835 17 10 16 +829 9 8 9 +842 4 10 10 +850 53 8 8 +857 59 9 12 +838 65 9 12 +844 60 11 9 +862 79 7 8 +849 73 8 12 +1002 192 15 16 +929 169 14 17 +973 138 12 12 +997 131 14 14 +947 114 14 14 +984 104 12 12 +984 119 13 15 +1004 113 18 20 +932 201 12 14 +936 154 14 17 +969 92 10 12 +980 94 8 13 +990 79 12 15 +985 67 10 12 +963 74 10 10 +921 87 12 18 +945 80 10 15 +921 69 8 10 +900 75 9 11 +870 63 9 10 +878 50 10 12 +889 50 10 12 +906 59 11 11 +915 41 11 11 +926 47 10 14 +944 50 10 13 +947 40 10 10 +972 46 10 14 +977 37 10 10 +970 31 9 11 +949 16 10 11 +976 11 9 8 +963 3 9 11 +942 1 10 8 +911 13 8 11 +929 7 7 8 +881 35 8 8 +862 17 9 10 +855 7 10 16 +877 6 9 12 +889 11 10 10 +866 37 10 12 +960 60 11 14 +946 62 10 15 +939 25 9 11 +862 0 8 6 +889 0 10 8 +604 22 10 11 +535 107 10 13 +830 197 9 12 +1005 6 11 14 +1013 86 11 12 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_778.jpg +420 270 166 172 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_152.jpg +778 274 28 48 +994 27 19 27 +683 44 25 29 +626 3 24 30 +194 213 30 46 +469 0 20 21 +424 0 18 20 +310 7 20 25 +171 9 19 23 +117 11 16 19 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_572.jpg +205 193 35 48 +77 3 31 38 +233 27 25 31 +647 102 24 31 +890 96 23 35 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_837.jpg +246 143 213 322 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_567.jpg +430 119 182 222 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_536.jpg +902 342 15 19 +981 287 9 12 +944 256 9 11 +1002 281 10 14 +1016 293 8 14 +832 323 17 23 +777 287 20 28 +724 320 18 17 +704 193 8 13 +854 197 7 9 +665 235 19 22 +624 264 19 29 +549 291 19 28 +477 318 18 27 +394 289 21 24 +333 289 24 25 +407 190 9 13 +448 199 9 12 +112 416 25 24 +2 394 20 28 +82 190 10 15 +2 191 11 13 +973 340 15 20 +911 324 12 16 +# 47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_710.jpg +516 356 56 52 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_122.jpg +406 156 117 161 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_439.jpg +622 153 136 184 +148 52 45 60 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_814.jpg +57 412 24 25 +156 409 26 28 +211 411 22 24 +251 402 24 28 +307 400 18 23 +378 209 231 279 +695 351 207 340 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_1015.jpg +980 310 42 38 +962 318 23 30 +966 197 21 20 +860 320 58 56 +849 329 23 39 +849 211 21 20 +781 213 17 20 +760 217 19 19 +705 227 20 17 +841 345 15 21 +218 404 32 36 +328 390 36 37 +426 366 23 27 +452 354 19 28 +465 363 35 38 +552 356 40 41 +630 352 35 38 +632 210 22 25 +600 248 14 16 +568 227 18 21 +546 240 20 23 +508 245 21 19 +455 224 25 25 +440 241 18 20 +344 212 25 29 +278 263 19 24 +238 274 20 23 +221 249 23 22 +676 345 44 51 +179 386 28 33 +94 134 42 52 +793 340 32 39 +922 321 25 37 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_321.jpg +125 22 35 52 +351 30 25 47 +895 29 35 48 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_678.jpg +489 268 18 26 +555 242 15 23 +580 320 16 27 +603 328 19 27 +623 330 22 33 +651 309 52 64 +335 334 43 54 +397 351 27 34 +427 336 21 30 +461 332 21 29 +253 288 74 122 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_75.jpg +379 90 43 52 +855 155 50 66 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_656.jpg +566 198 32 31 +616 357 22 41 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_205.jpg +508 224 96 118 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_695.jpg +368 398 368 428 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_892.jpg +188 265 114 161 +473 134 121 151 +658 191 121 175 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_72.jpg +806 298 13 13 +841 301 10 13 +864 300 13 17 +897 306 9 10 +903 316 19 18 +924 304 9 13 +935 299 13 16 +954 309 12 15 +974 313 13 11 +1011 307 10 11 +0 318 9 21 +15 312 25 22 +54 323 25 23 +51 306 15 16 +95 332 28 26 +133 330 17 18 +165 293 34 32 +239 299 11 10 +251 278 31 30 +334 284 25 27 +405 289 18 26 +425 305 8 10 +456 307 19 21 +482 300 18 20 +502 301 6 8 +667 314 6 8 +604 294 11 16 +630 296 11 14 +613 328 24 30 +705 287 14 16 +764 306 14 18 +789 302 11 15 +575 288 6 17 +541 289 10 18 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_652.jpg +346 90 125 173 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_841.jpg +171 668 259 298 +610 243 342 491 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_164.jpg +192 225 653 758 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_596.jpg +437 142 135 191 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_537.jpg +428 361 73 101 +111 295 28 83 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_357.jpg +352 357 322 377 +# 48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_785.jpg +224 0 116 178 +# 49--Greeting/49_Greeting_peoplegreeting_49_59.jpg +254 30 64 128 +322 116 70 100 +512 128 76 88 +646 86 72 94 +766 76 74 108 +# 49--Greeting/49_Greeting_peoplegreeting_49_456.jpg +94 266 118 154 +98 168 92 136 +236 208 126 144 +382 318 110 154 +854 310 122 162 +# 49--Greeting/49_Greeting_peoplegreeting_49_203.jpg +396 36 304 406 +# 49--Greeting/49_Greeting_peoplegreeting_49_266.jpg +391 256 50 57 +637 264 41 51 +843 243 48 67 +251 94 54 61 +# 49--Greeting/49_Greeting_peoplegreeting_49_589.jpg +260 196 298 360 +# 49--Greeting/49_Greeting_peoplegreeting_49_759.jpg +5 14 1008 1224 +# 49--Greeting/49_Greeting_peoplegreeting_49_73.jpg +256 106 74 98 +482 88 72 88 +660 58 82 90 +# 49--Greeting/49_Greeting_peoplegreeting_49_56.jpg +582 64 43 61 +710 81 55 65 +803 62 46 56 +911 67 49 59 +439 85 96 116 +220 127 60 100 +# 49--Greeting/49_Greeting_peoplegreeting_49_564.jpg +682 416 140 170 +334 42 202 292 +# 49--Greeting/49_Greeting_peoplegreeting_49_920.jpg +689 204 111 142 +564 612 51 84 +15 681 37 50 +604 274 80 108 +464 297 87 109 +237 184 91 127 +# 49--Greeting/49_Greeting_peoplegreeting_49_787.jpg +222 68 128 204 +672 158 118 176 +# 49--Greeting/49_Greeting_peoplegreeting_49_353.jpg +249 434 14 20 +296 428 13 15 +378 412 10 12 +547 411 12 17 +# 49--Greeting/49_Greeting_peoplegreeting_49_140.jpg +819 62 62 96 +697 69 71 86 +499 110 66 82 +292 97 74 87 +184 50 74 96 +# 49--Greeting/49_Greeting_peoplegreeting_49_923.jpg +392 138 52 86 +494 128 56 70 +544 154 52 68 +600 274 54 78 +658 68 52 94 +# 49--Greeting/49_Greeting_peoplegreeting_49_656.jpg +426 251 107 138 +# 49--Greeting/49_Greeting_peoplegreeting_49_302.jpg +222 598 177 180 +378 369 333 444 +# 49--Greeting/49_Greeting_peoplegreeting_49_124.jpg +490 294 64 100 +# 49--Greeting/49_Greeting_peoplegreeting_49_894.jpg +714 255 49 60 +556 289 36 58 +483 246 49 61 +410 301 50 69 +98 265 38 51 +55 284 46 62 +770 379 61 72 +928 261 47 48 +904 271 41 50 +779 264 53 56 +1008 286 16 28 +38 285 53 72 +647 280 45 58 +566 319 45 56 +# 49--Greeting/49_Greeting_peoplegreeting_49_98.jpg +94 100 100 150 +442 218 74 118 +896 66 82 126 +# 49--Greeting/49_Greeting_peoplegreeting_49_948.jpg +264 2 154 234 +528 208 166 218 +# 49--Greeting/49_Greeting_peoplegreeting_49_218.jpg +391 635 68 78 +685 610 75 92 +195 759 37 48 +853 376 23 31 +581 319 26 35 +749 321 22 26 +0 911 27 50 +476 1182 75 43 +660 1159 53 66 +# 49--Greeting/49_Greeting_peoplegreeting_49_387.jpg +690 246 31 38 +636 243 28 41 +516 225 32 43 +253 279 16 18 +408 294 5 6 +375 289 3 5 +564 261 9 15 +# 49--Greeting/49_Greeting_peoplegreeting_49_991.jpg +396 194 10 12 +404 185 11 14 +828 151 12 12 +813 146 12 10 +# 49--Greeting/49_Greeting_peoplegreeting_49_486.jpg +406 116 146 228 +# 49--Greeting/49_Greeting_peoplegreeting_49_903.jpg +411 107 144 189 +# 49--Greeting/49_Greeting_peoplegreeting_49_896.jpg +312 444 145 136 +# 49--Greeting/49_Greeting_peoplegreeting_49_53.jpg +435 338 60 74 +666 213 46 68 +705 296 38 59 +800 404 57 73 +830 335 40 62 +851 289 45 60 +345 343 46 61 +331 228 51 60 +219 340 47 64 +172 286 43 58 +223 226 39 55 +85 394 59 70 +118 354 40 63 +567 178 44 51 +690 374 51 78 +770 679 69 79 +327 653 70 82 +348 609 55 76 +226 140 21 31 +499 282 47 69 +227 267 37 66 +# 49--Greeting/49_Greeting_peoplegreeting_49_207.jpg +244 158 49 60 +371 122 54 67 +# 49--Greeting/49_Greeting_peoplegreeting_49_153.jpg +301 157 214 476 +536 262 386 587 +# 49--Greeting/49_Greeting_peoplegreeting_49_162.jpg +236 18 66 132 +290 106 74 100 +484 122 72 84 +674 68 70 92 +806 62 70 112 +# 49--Greeting/49_Greeting_peoplegreeting_49_344.jpg +217 244 90 142 +631 328 93 127 +# 49--Greeting/49_Greeting_peoplegreeting_49_890.jpg +502 122 146 106 +# 49--Greeting/49_Greeting_peoplegreeting_49_307.jpg +684 279 19 24 +717 281 15 26 +621 273 16 23 +871 309 26 28 +872 282 28 32 +270 237 22 29 +362 251 15 25 +176 273 29 36 +162 339 15 23 +# 49--Greeting/49_Greeting_peoplegreeting_49_337.jpg +368 304 238 302 +# 49--Greeting/49_Greeting_peoplegreeting_49_50.jpg +352 116 56 72 +332 206 62 82 +654 256 138 204 +# 49--Greeting/49_Greeting_peoplegreeting_49_943.jpg +353 447 57 73 +289 440 45 64 +191 433 53 63 +122 450 59 68 +343 76 20 26 +255 148 18 21 +207 141 17 18 +149 97 15 21 +# 49--Greeting/49_Greeting_peoplegreeting_49_48.jpg +722 129 57 68 +588 197 50 56 +530 152 42 79 +382 232 38 48 +293 221 33 44 +230 267 30 36 +195 279 23 36 +161 286 23 27 +0 74 154 300 +99 296 19 26 +27 343 83 98 +# 49--Greeting/49_Greeting_peoplegreeting_49_810.jpg +262 172 46 78 +293 195 46 61 +742 180 27 35 +806 173 25 31 +505 152 87 85 +241 38 42 41 +725 11 25 27 +# 49--Greeting/49_Greeting_peoplegreeting_49_192.jpg +274 152 68 110 +454 148 78 106 +802 230 68 94 +# 49--Greeting/49_Greeting_peoplegreeting_49_10.jpg +276 84 88 146 +720 94 96 158 +# 49--Greeting/49_Greeting_peoplegreeting_49_783.jpg +252 110 230 316 +# 5--Car_Accident/5_Car_Accident_Accident_5_234.jpg +189 486 18 23 +270 557 16 21 +304 484 21 21 +316 439 18 19 +30 573 21 22 +430 593 18 26 +422 512 18 19 +351 467 16 18 +427 425 15 22 +263 449 18 12 +422 406 18 18 +474 335 11 12 +337 352 12 12 +345 331 9 15 +659 489 18 22 +811 317 13 15 +786 301 10 12 +866 356 13 15 +797 353 12 15 +888 292 11 14 +803 260 8 11 +876 261 11 12 +808 238 9 13 +776 197 6 8 +783 210 8 9 +800 169 4 6 +774 173 6 7 +379 319 9 14 +448 189 6 8 +421 176 6 8 +432 173 6 6 +451 172 6 7 +519 185 6 7 +486 193 7 9 +520 159 5 6 +123 274 5 7 +773 140 5 8 +807 284 9 10 +546 161 5 7 +504 159 5 6 +483 169 6 7 +491 165 6 6 +539 149 6 6 +246 485 18 23 +306 402 14 18 +145 272 5 5 +418 321 10 10 +522 193 7 7 +534 163 5 5 +496 172 5 5 +553 153 5 6 +559 124 5 6 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_51.jpg +771 268 17 23 +834 242 16 22 +710 254 17 23 +# 5--Car_Accident/5_Car_Accident_Accident_5_448.jpg +889 407 18 25 +804 435 20 24 +928 562 21 27 +299 540 25 33 +53 438 17 22 +87 396 14 15 +192 450 14 21 +193 424 15 19 +232 377 12 15 +16 344 12 11 +136 349 14 16 +293 392 16 20 +296 431 13 19 +203 416 13 15 +215 410 12 11 +267 347 8 16 +865 234 10 13 +293 304 5 9 +200 308 7 11 +384 195 5 8 +369 200 5 7 +379 214 5 7 +450 223 6 8 +447 203 5 8 +408 210 6 7 +270 212 8 11 +261 224 5 8 +277 232 3 7 +310 244 6 8 +81 371 5 13 +776 185 5 9 +758 183 5 8 +799 192 5 6 +803 184 4 6 +790 188 4 5 +796 183 4 5 +817 190 5 4 +827 187 5 7 +853 194 4 7 +839 196 5 6 +843 200 6 6 +870 197 5 7 +931 198 5 8 +889 203 6 8 +879 208 5 7 +945 210 5 7 +905 205 5 7 +914 201 5 8 +865 206 6 8 +873 229 7 10 +881 232 9 10 +901 231 9 11 +831 207 4 9 +911 220 4 8 +984 204 4 6 +991 202 6 7 +999 200 5 7 +964 251 6 10 +980 319 11 17 +290 477 11 19 +360 298 7 9 +927 261 11 12 +982 238 11 14 +882 214 7 10 +921 228 10 12 +47 317 7 10 +66 299 8 12 +154 313 10 14 +315 262 8 11 +235 335 7 13 +271 264 7 8 +259 267 7 10 +223 267 8 8 +238 241 7 10 +197 219 5 7 +298 261 7 9 +221 229 5 6 +337 243 7 7 +357 249 7 10 +384 245 8 10 +367 251 7 9 +401 239 7 9 +427 241 8 8 +447 242 7 9 +315 213 6 6 +389 209 7 8 +361 217 6 6 +177 286 8 12 +199 287 8 10 +340 268 7 9 +182 254 7 8 +201 239 7 7 +251 621 16 22 +699 404 12 23 +743 475 12 20 +160 242 6 11 +135 222 5 6 +95 271 5 7 +118 258 5 7 +151 259 6 11 +306 277 6 10 +266 287 7 10 +186 322 8 14 +205 354 10 16 +320 369 7 15 +230 293 7 12 +108 297 6 12 +154 193 7 8 +374 232 7 11 +255 206 5 7 +340 201 7 11 +297 215 5 6 +146 294 8 12 +222 318 4 14 +133 243 6 8 +249 255 8 9 +261 247 8 10 +233 226 6 8 +236 219 6 7 +409 197 7 9 +276 241 7 8 +285 223 8 8 +335 252 6 8 +# 5--Car_Accident/5_Car_Accident_Accident_5_633.jpg +99 88 60 91 +159 287 17 21 +241 281 18 25 +450 466 90 41 +386 301 12 14 +418 298 10 13 +729 323 8 10 +801 328 8 11 +815 320 11 12 +851 331 8 11 +# 5--Car_Accident/5_Car_Accident_Accident_5_869.jpg +373 173 242 328 +# 5--Car_Accident/5_Car_Accident_Accident_5_460.jpg +114 314 20 22 +184 315 9 12 +129 293 13 15 +237 312 10 14 +866 368 13 11 +919 354 9 10 +213 315 7 12 +271 316 10 12 +6 299 9 20 +155 309 10 16 +977 352 4 6 +317 320 8 12 +62 314 12 16 +# 5--Car_Accident/5_Car_Accident_Accident_5_243.jpg +391 114 11 13 +454 101 11 16 +478 107 11 12 +# 5--Car_Accident/5_Car_Accident_Accident_5_982.jpg +322 248 45 56 +475 231 42 51 +733 201 47 56 +# 5--Car_Accident/5_Car_Accident_Accident_5_77.jpg +21 28 15 15 +32 10 13 15 +2 0 13 15 +111 90 23 25 +78 63 23 39 +185 80 22 23 +230 69 22 27 +289 75 22 29 +376 63 23 27 +449 84 19 26 +439 55 22 29 +489 69 21 25 +540 80 20 22 +575 65 24 34 +589 44 19 30 +520 40 14 16 +392 0 12 10 +628 61 23 34 +653 59 22 35 +692 70 22 30 +717 67 22 31 +740 56 10 14 +681 30 8 9 +712 10 10 13 +739 27 9 12 +759 37 24 29 +794 22 11 14 +796 58 15 19 +812 57 23 28 +872 60 12 16 +881 49 10 10 +886 61 26 32 +907 58 21 27 +931 51 33 43 +975 45 23 30 +949 39 25 33 +768 23 8 8 +# 5--Car_Accident/5_Car_Accident_Accident_5_641.jpg +93 266 27 34 +633 280 7 9 +557 259 7 6 +897 304 13 9 +# 5--Car_Accident/5_Car_Accident_Accident_5_203.jpg +624 234 11 17 +786 289 21 31 +1009 344 15 21 +622 338 16 22 +# 5--Car_Accident/5_Car_Accident_Accident_5_735.jpg +975 340 16 20 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_94.jpg +263 350 33 46 +457 313 8 13 +517 292 13 15 +553 302 7 10 +612 301 6 8 +624 308 6 7 +629 302 5 7 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_574.jpg +779 152 30 35 +730 153 30 30 +413 153 12 15 +353 162 17 19 +131 174 13 14 +165 177 11 14 +# 5--Car_Accident/5_Car_Accident_Accident_5_202.jpg +758 85 14 18 +822 77 14 19 +898 84 14 17 +991 73 16 22 +710 90 8 8 +794 83 8 10 +743 90 18 22 +934 79 18 20 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_133.jpg +233 232 17 22 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_544.jpg +217 171 186 226 +498 52 207 276 +710 191 56 73 +62 232 59 71 +41 273 25 35 +433 176 45 80 +# 5--Car_Accident/5_Car_Accident_Accident_5_948.jpg +665 154 19 20 +# 5--Car_Accident/5_Car_Accident_Accident_5_177.jpg +301 212 19 20 +393 208 18 23 +595 131 17 20 +696 198 19 25 +748 187 15 20 +834 218 22 24 +811 279 22 27 +870 255 19 27 +892 217 20 25 +880 183 14 24 +907 243 17 24 +975 213 19 27 +990 190 15 19 +905 164 13 18 +860 346 20 37 +831 331 14 23 +639 234 14 24 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_492.jpg +892 51 34 44 +# 5--Car_Accident/5_Car_Accident_Accident_5_244.jpg +462 306 22 22 +493 210 21 24 +626 319 46 33 +649 207 20 27 +706 188 25 26 +798 195 23 29 +568 194 20 22 +841 201 27 41 +920 191 18 24 +998 184 26 58 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_279.jpg +342 4 28 34 +246 16 26 33 +# 5--Car_Accident/5_Car_Accident_Accident_5_576.jpg +518 486 64 86 +# 5--Car_Accident/5_Car_Accident_Accident_5_510.jpg +71 237 33 62 +56 254 36 52 +197 290 36 50 +281 279 26 36 +440 244 24 35 +388 240 25 38 +323 286 27 36 +559 262 20 34 +508 553 43 57 +580 472 47 52 +699 267 22 30 +770 263 20 27 +250 258 19 22 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_868.jpg +302 200 58 78 +# 5--Car_Accident/5_Car_Accident_Accident_5_287.jpg +159 137 9 10 +849 36 17 21 +685 124 7 7 +208 172 7 7 +388 154 8 8 +346 141 5 8 +# 5--Car_Accident/5_Car_Accident_Accident_5_66.jpg +21 241 34 42 +264 141 6 8 +406 140 7 9 +459 145 5 9 +462 146 6 10 +490 148 6 9 +608 143 5 7 +672 148 6 8 +956 225 14 21 +# 5--Car_Accident/5_Car_Accident_Accident_5_340.jpg +39 223 27 30 +102 213 24 28 +116 221 17 23 +168 206 16 20 +196 235 22 27 +266 214 20 25 +344 220 15 21 +294 216 12 15 +7 204 7 8 +15 220 8 12 +0 209 5 11 +510 239 21 25 +555 101 15 17 +688 71 23 26 +842 114 27 32 +167 171 9 7 +228 166 8 8 +197 166 8 10 +394 160 8 10 +437 157 8 9 +168 159 7 8 +341 168 5 7 +949 100 32 35 +886 53 19 27 +985 39 25 27 +294 167 6 6 +764 124 20 22 +# 5--Car_Accident/5_Car_Accident_Accident_5_642.jpg +439 518 26 42 +493 621 14 17 +539 744 13 16 +583 745 12 17 +809 711 20 30 +710 766 14 16 +700 752 16 10 +59 671 14 17 +476 720 10 13 +# 5--Car_Accident/5_Car_Accident_Accident_5_34.jpg +525 70 17 20 +609 66 16 21 +691 52 19 24 +762 68 23 28 +95 80 12 12 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_610.jpg +679 152 70 77 +331 39 60 68 +# 5--Car_Accident/5_Car_Accident_Accident_5_925.jpg +742 316 158 248 +302 354 182 228 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_451.jpg +683 163 29 23 +322 147 7 9 +# 5--Car_Accident/5_Car_Accident_Accident_5_628.jpg +44 155 20 25 +78 220 21 27 +177 223 22 27 +155 315 25 30 +267 281 27 31 +231 236 23 27 +282 178 23 28 +327 217 22 29 +374 151 27 32 +380 212 24 28 +439 190 23 29 +583 28 21 37 +854 203 26 32 +915 243 18 24 +935 291 25 30 +373 69 30 34 +0 241 14 38 +91 352 25 29 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_365.jpg +274 156 39 45 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_644.jpg +56 176 252 330 +648 118 210 316 +# 5--Car_Accident/5_Car_Accident_Accident_5_388.jpg +67 136 22 22 +137 161 21 26 +215 267 16 23 +# 5--Car_Accident/5_Car_Accident_Accident_5_937.jpg +152 0 27 43 +407 0 27 33 +645 1 32 35 +730 130 33 48 +884 2 31 37 +120 71 38 31 +# 5--Car_Accident/5_Car_Accident_Accident_5_607.jpg +111 22 19 27 +213 0 16 16 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_457.jpg +284 108 94 158 +472 200 122 156 +# 5--Car_Accident/5_Car_Accident_Accident_5_796.jpg +64 110 72 106 +234 116 52 88 +408 158 58 96 +678 320 130 120 +# 5--Car_Accident/5_Car_Accident_Accident_5_515.jpg +465 221 27 30 +824 208 34 37 +922 475 41 49 +# 5--Car_Accident/5_Car_Accident_Accident_5_668.jpg +503 229 16 21 +570 226 15 22 +723 205 26 26 +882 237 17 23 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_475.jpg +329 251 24 30 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_38.jpg +0 229 23 41 +777 241 9 13 +831 235 11 15 +894 244 10 11 +913 234 11 14 +942 249 8 12 +318 115 67 56 +# 5--Car_Accident/5_Car_Accident_Accident_5_474.jpg +241 120 19 24 +348 139 16 29 +554 138 15 19 +593 147 18 27 +608 149 15 22 +621 143 20 23 +802 146 17 22 +178 107 19 30 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_773.jpg +630 444 13 16 +686 440 14 18 +551 430 13 17 +467 438 13 19 +396 432 14 16 +327 410 14 19 +# 5--Car_Accident/5_Car_Accident_Car_Crash_5_866.jpg +446 336 98 94 +350 82 68 52 +# 5--Car_Accident/5_Car_Accident_Accident_5_777.jpg +502 151 18 22 +553 163 11 13 +636 154 15 17 +600 171 10 13 +630 174 13 16 +726 172 11 13 +760 151 14 17 +825 155 14 14 +795 152 10 17 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_19.jpg +133 373 44 53 +212 425 27 41 +518 395 40 50 +453 419 24 33 +405 435 30 40 +862 428 44 60 +654 445 29 44 +747 491 15 21 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_641.jpg +371 169 317 466 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_410.jpg +486 99 132 189 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_323.jpg +554 156 70 98 +676 166 82 98 +474 268 82 116 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_715.jpg +0 0 0 0 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_283.jpg +20 456 65 112 +120 463 59 69 +243 375 58 70 +185 114 34 56 +364 354 34 54 +437 270 29 36 +458 314 24 25 +466 354 32 39 +652 171 43 49 +593 308 41 43 +810 179 38 42 +888 296 31 38 +782 333 28 51 +767 340 28 47 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_33.jpg +252 98 72 108 +670 60 78 86 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_827.jpg +464 95 156 209 +247 247 129 171 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_374.jpg +136 348 70 86 +270 306 54 100 +434 326 56 82 +590 288 70 80 +736 310 58 86 +872 134 102 92 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_17.jpg +919 385 88 102 +723 362 68 84 +497 388 60 81 +381 395 70 78 +193 383 77 113 +28 397 94 117 +220 64 66 80 +355 161 54 65 +473 106 59 73 +646 137 51 73 +808 97 60 79 +21 71 11 13 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_609.jpg +214 355 67 71 +313 330 35 62 +288 251 44 50 +413 240 24 28 +143 262 43 44 +128 242 28 35 +93 225 17 36 +212 221 32 37 +333 210 15 22 +539 283 42 48 +600 261 29 37 +724 246 25 30 +801 218 20 22 +467 184 27 30 +890 178 20 22 +844 195 21 26 +773 190 17 26 +809 192 21 17 +714 194 12 16 +740 160 11 13 +835 176 11 11 +787 160 12 14 +873 175 9 14 +834 137 7 12 +821 170 10 12 +881 159 7 7 +895 134 7 15 +535 128 7 11 +543 134 6 10 +480 132 6 8 +490 166 7 11 +194 183 13 14 +175 171 11 13 +217 168 6 8 +85 357 22 43 +637 172 11 12 +678 198 17 23 +998 261 26 38 +873 286 38 38 +772 134 10 10 +851 129 10 14 +931 128 12 15 +996 124 13 15 +587 168 13 16 +613 169 8 13 +551 171 12 13 +542 207 33 31 +140 203 15 18 +190 219 14 22 +150 187 16 16 +226 192 23 33 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_303.jpg +721 1510 75 101 +481 318 132 167 +226 1510 78 102 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_647.jpg +323 215 348 511 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_752.jpg +428 36 110 158 +716 200 84 86 +592 108 72 80 +742 56 56 70 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_788.jpg +82 80 41 46 +246 111 39 48 +411 114 38 42 +544 138 29 39 +661 96 35 39 +761 148 34 41 +927 145 41 47 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_790.jpg +402 16 104 126 +664 148 124 130 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_493.jpg +493 460 38 48 +643 448 32 40 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_282.jpg +321 231 43 48 +164 249 53 58 +41 241 53 73 +423 151 37 49 +617 137 39 39 +645 191 48 69 +930 223 43 51 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_173.jpg +321 421 24 30 +587 164 36 45 +763 234 28 36 +707 295 27 33 +255 223 17 23 +404 276 31 31 +857 295 18 21 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_170.jpg +697 102 41 43 +943 248 32 41 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_134.jpg +445 348 8 17 +421 346 8 16 +852 284 33 35 +799 300 10 8 +653 303 5 8 +724 297 9 11 +724 338 10 11 +632 342 8 12 +617 308 6 8 +599 333 9 13 +563 348 9 13 +509 337 6 8 +486 323 6 6 +553 314 6 8 +14 340 20 16 +83 355 15 17 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_449.jpg +334 374 130 140 +142 450 156 166 +444 232 108 128 +602 78 70 100 +832 34 100 118 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_396.jpg +196 123 120 157 +157 811 162 235 +711 817 106 137 +750 1049 90 81 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_654.jpg +456 244 212 210 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_75.jpg +401 201 95 104 +58 388 17 18 +114 379 30 33 +174 382 31 35 +239 383 27 36 +239 450 28 38 +184 470 15 21 +158 472 16 19 +127 467 14 17 +56 454 28 36 +62 529 18 23 +181 598 26 34 +117 606 26 30 +52 598 33 40 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_764.jpg +354 48 147 186 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_43.jpg +200 68 42 58 +298 105 40 55 +462 90 34 50 +203 262 48 63 +396 257 38 50 +518 249 40 52 +627 405 44 56 +489 427 43 72 +353 410 44 60 +715 88 34 49 +580 40 38 46 +754 233 38 58 +640 247 38 51 +772 409 44 57 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_345.jpg +317 146 176 233 +581 156 187 223 +957 104 59 112 +9 202 53 79 +895 116 46 62 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_645.jpg +635 366 19 27 +405 174 10 15 +622 347 15 24 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_249.jpg +149 268 61 71 +353 269 53 59 +572 230 53 70 +659 183 55 74 +806 184 59 76 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_189.jpg +358 176 110 68 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_326.jpg +61 305 112 142 +356 314 43 53 +825 328 112 146 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_479.jpg +358 546 100 148 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_595.jpg +146 128 134 192 +692 172 152 214 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_144.jpg +306 296 148 202 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_408.jpg +414 178 278 370 +682 122 264 358 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_432.jpg +157 99 9 11 +481 601 22 34 +737 607 9 11 +625 601 7 10 +673 608 6 8 +652 608 8 9 +12 828 14 14 +142 857 11 11 +351 873 3 7 +395 864 8 9 +478 871 6 8 +410 865 7 8 +511 843 35 41 +565 831 32 34 +629 830 30 36 +720 844 38 42 +183 101 9 9 +131 104 7 8 +106 101 7 8 +316 66 22 31 +355 78 17 24 +375 60 18 24 +417 69 24 29 +446 78 18 27 +494 66 14 18 +494 71 42 65 +568 91 21 30 +628 77 38 47 +716 41 58 76 +249 322 19 26 +335 289 30 39 +392 321 18 20 +468 322 28 33 +543 293 18 27 +618 282 20 26 +693 275 22 29 +836 326 9 17 +888 352 10 12 +985 322 13 15 +148 566 13 15 +129 574 7 13 +177 564 9 15 +250 613 27 39 +314 614 22 30 +402 604 24 30 +776 109 9 8 +477 588 18 24 +698 610 6 7 +475 861 7 8 +269 864 13 27 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_196.jpg +187 337 333 408 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_165.jpg +191 395 15 18 +298 386 48 61 +403 415 50 60 +491 402 14 19 +474 451 42 51 +522 397 39 52 +589 394 42 50 +490 359 9 13 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_749.jpg +339 119 138 184 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_180.jpg +481 146 127 175 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_213.jpg +114 158 96 168 +200 324 102 134 +416 272 114 140 +596 220 96 158 +696 198 102 176 +938 438 66 152 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_649.jpg +412 133 175 261 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_720.jpg +371 81 128 192 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_464.jpg +274 416 138 156 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_488.jpg +316 98 240 278 +614 92 198 270 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_679.jpg +126 52 190 230 +314 154 216 308 +524 214 210 310 +730 98 222 338 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_18.jpg +133 403 27 40 +233 377 25 30 +335 366 25 29 +389 367 23 29 +468 350 15 31 +544 354 20 21 +610 345 21 26 +639 342 20 21 +718 335 18 25 +808 347 18 23 +849 376 22 29 +764 400 18 29 +927 378 17 25 +666 400 17 27 +561 430 15 26 +# 50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_735.jpg +200 27 379 552 +# 51--Dresses/51_Dresses_wearingdress_51_280.jpg +451 219 108 126 +765 242 60 67 +953 259 67 84 +648 381 48 52 +30 57 105 116 +846 422 32 43 +260 460 28 32 +# 51--Dresses/51_Dresses_wearingdress_51_692.jpg +167 292 115 176 +679 100 127 162 +# 51--Dresses/51_Dresses_wearingdress_51_1041.jpg +593 39 130 178 +# 51--Dresses/51_Dresses_wearingdress_51_789.jpg +367 81 123 175 +# 51--Dresses/51_Dresses_wearingdress_51_105.jpg +323 83 147 194 +566 266 134 191 +# 51--Dresses/51_Dresses_wearingdress_51_727.jpg +274 76 104 178 +602 40 64 82 +796 22 58 96 +# 51--Dresses/51_Dresses_wearingdress_51_869.jpg +416 93 144 224 +6 10 112 182 +# 51--Dresses/51_Dresses_wearingdress_51_606.jpg +254 216 98 139 +620 74 115 161 +# 51--Dresses/51_Dresses_wearingdress_51_815.jpg +231 153 285 336 +# 51--Dresses/51_Dresses_wearingdress_51_633.jpg +430 94 108 152 +# 51--Dresses/51_Dresses_wearingdress_51_221.jpg +146 100 88 114 +352 148 80 92 +732 54 58 90 +918 102 58 68 +# 51--Dresses/51_Dresses_wearingdress_51_599.jpg +196 64 148 209 +703 48 119 177 +831 3 96 109 +# 51--Dresses/51_Dresses_wearingdress_51_689.jpg +559 6 75 129 +462 153 117 159 +# 51--Dresses/51_Dresses_wearingdress_51_465.jpg +349 198 323 454 +# 51--Dresses/51_Dresses_wearingdress_51_150.jpg +190 64 82 112 +720 52 72 114 +# 51--Dresses/51_Dresses_wearingdress_51_610.jpg +122 26 62 90 +396 32 56 82 +628 30 56 78 +870 42 56 88 +# 51--Dresses/51_Dresses_wearingdress_51_13.jpg +288 204 277 348 +# 51--Dresses/51_Dresses_wearingdress_51_161.jpg +389 130 202 289 +# 51--Dresses/51_Dresses_wearingdress_51_7.jpg +775 22 71 83 +958 125 36 47 +532 60 34 47 +492 88 27 33 +293 21 63 82 +375 99 21 34 +200 84 18 23 +168 83 19 23 +57 64 19 29 +119 72 17 23 +8 59 19 28 +902 130 31 32 +# 51--Dresses/51_Dresses_wearingdress_51_306.jpg +419 141 104 141 +# 51--Dresses/51_Dresses_wearingdress_51_464.jpg +22 376 147 173 +392 214 267 353 +743 412 61 75 +# 51--Dresses/51_Dresses_wearingdress_51_492.jpg +262 120 175 259 +358 166 256 328 +741 379 78 90 +# 51--Dresses/51_Dresses_wearingdress_51_106.jpg +511 122 116 125 +# 51--Dresses/51_Dresses_wearingdress_51_685.jpg +896 27 51 75 +661 61 58 70 +428 45 55 71 +283 46 49 66 +93 48 53 64 +# 51--Dresses/51_Dresses_wearingdress_51_1031.jpg +440 112 168 236 +# 51--Dresses/51_Dresses_wearingdress_51_904.jpg +978 68 46 86 +713 61 68 90 +628 75 70 96 +560 89 66 92 +220 57 71 89 +# 51--Dresses/51_Dresses_wearingdress_51_612.jpg +177 9 190 246 +# 51--Dresses/51_Dresses_wearingdress_51_536.jpg +450 91 118 163 +# 51--Dresses/51_Dresses_wearingdress_51_445.jpg +184 317 75 115 +344 307 85 101 +861 307 85 101 +# 51--Dresses/51_Dresses_wearingdress_51_830.jpg +567 32 56 76 +270 13 85 67 +935 40 27 33 +1003 25 20 31 +796 31 26 31 +# 51--Dresses/51_Dresses_wearingdress_51_386.jpg +357 203 384 540 +# 51--Dresses/51_Dresses_wearingdress_51_1012.jpg +519 173 112 141 +123 470 34 46 +86 499 31 51 +40 513 35 44 +0 544 43 55 +168 472 33 40 +231 430 29 38 +251 450 26 38 +# 51--Dresses/51_Dresses_wearingdress_51_377.jpg +200 810 112 202 +298 770 150 230 +306 308 390 562 +790 640 166 246 +2 564 104 328 +# 51--Dresses/51_Dresses_wearingdress_51_327.jpg +461 139 139 186 +# 51--Dresses/51_Dresses_wearingdress_51_388.jpg +598 308 188 238 +# 51--Dresses/51_Dresses_wearingdress_51_763.jpg +120 128 220 262 +546 170 256 288 +# 51--Dresses/51_Dresses_wearingdress_51_837.jpg +486 108 120 159 +# 51--Dresses/51_Dresses_wearingdress_51_183.jpg +74 186 102 170 +282 74 112 154 +466 68 86 126 +614 60 110 120 +876 112 120 150 +# 51--Dresses/51_Dresses_wearingdress_51_139.jpg +464 75 139 176 +# 51--Dresses/51_Dresses_wearingdress_51_348.jpg +480 109 122 163 +# 51--Dresses/51_Dresses_wearingdress_51_883.jpg +228 160 88 126 +660 68 112 148 +# 51--Dresses/51_Dresses_wearingdress_51_96.jpg +384 315 249 321 +# 51--Dresses/51_Dresses_wearingdress_51_340.jpg +564 348 69 90 +# 51--Dresses/51_Dresses_wearingdress_51_737.jpg +473 178 80 101 +763 383 57 78 +# 51--Dresses/51_Dresses_wearingdress_51_736.jpg +492 106 173 201 +# 51--Dresses/51_Dresses_wearingdress_51_339.jpg +669 304 99 157 +443 187 160 238 +# 51--Dresses/51_Dresses_wearingdress_51_691.jpg +202 67 80 114 +683 69 93 128 +# 51--Dresses/51_Dresses_wearingdress_51_398.jpg +378 201 396 501 +# 51--Dresses/51_Dresses_wearingdress_51_113.jpg +345 72 126 177 +# 51--Dresses/51_Dresses_wearingdress_51_654.jpg +869 46 43 48 +607 17 25 29 +311 33 25 22 +77 34 27 22 +76 457 44 50 +373 451 23 26 +648 438 21 23 +905 438 21 25 +# 51--Dresses/51_Dresses_wearingdress_51_748.jpg +472 160 123 147 +# 51--Dresses/51_Dresses_wearingdress_51_588.jpg +160 142 123 173 +654 93 108 148 +# 51--Dresses/51_Dresses_wearingdress_51_335.jpg +405 149 114 151 +601 214 42 50 +579 122 47 57 +727 75 32 47 +334 102 50 57 +212 100 58 63 +279 87 36 46 +395 76 26 30 +# 51--Dresses/51_Dresses_wearingdress_51_670.jpg +334 135 208 288 +# 51--Dresses/51_Dresses_wearingdress_51_549.jpg +96 28 72 106 +350 48 66 100 +606 38 68 106 +866 40 80 118 +# 51--Dresses/51_Dresses_wearingdress_51_226.jpg +296 268 218 281 +# 51--Dresses/51_Dresses_wearingdress_51_178.jpg +335 90 155 206 +# 51--Dresses/51_Dresses_wearingdress_51_17.jpg +628 84 216 250 +# 51--Dresses/51_Dresses_wearingdress_51_580.jpg +449 111 118 161 +# 51--Dresses/51_Dresses_wearingdress_51_233.jpg +550 87 134 199 +# 51--Dresses/51_Dresses_wearingdress_51_451.jpg +382 190 184 260 +# 51--Dresses/51_Dresses_wearingdress_51_414.jpg +697 97 79 101 +516 22 121 227 +425 63 58 71 +380 48 46 67 +335 120 61 80 +194 99 94 110 +134 0 55 40 +257 0 40 26 +79 120 61 76 +1 137 74 97 +12 70 62 81 +# 51--Dresses/51_Dresses_wearingdress_51_512.jpg +429 263 225 280 +# 51--Dresses/51_Dresses_wearingdress_51_77.jpg +408 243 26 24 +502 85 7 11 +# 51--Dresses/51_Dresses_wearingdress_51_268.jpg +711 83 54 90 +154 68 82 116 +333 120 51 64 +68 99 30 43 +# 51--Dresses/51_Dresses_wearingdress_51_741.jpg +282 216 405 507 +# 51--Dresses/51_Dresses_wearingdress_51_94.jpg +224 16 118 158 +# 51--Dresses/51_Dresses_wearingdress_51_140.jpg +447 92 105 127 +615 150 25 30 +130 361 42 56 +186 299 38 52 +266 325 31 37 +# 51--Dresses/51_Dresses_wearingdress_51_672.jpg +154 69 130 167 +690 66 111 167 +594 204 64 82 +# 51--Dresses/51_Dresses_wearingdress_51_914.jpg +823 97 66 72 +630 84 30 38 +452 47 52 68 +126 60 65 75 +445 261 37 50 +448 470 25 48 +819 776 132 158 +548 768 80 93 +97 860 105 116 +# 51--Dresses/51_Dresses_wearingdress_51_739.jpg +436 123 136 172 +726 219 80 94 +908 278 35 42 +868 240 30 44 +579 297 45 58 +640 259 30 38 +682 184 33 36 +270 296 32 39 +225 242 27 33 +253 172 28 35 +360 227 23 37 +50 257 55 96 +996 256 28 35 +# 51--Dresses/51_Dresses_wearingdress_51_874.jpg +144 381 526 652 +# 51--Dresses/51_Dresses_wearingdress_51_1035.jpg +855 190 94 88 +576 283 95 91 +411 53 82 110 +170 245 112 119 +734 30 19 23 +598 41 12 19 +643 36 16 23 +263 100 19 20 +288 56 17 21 +325 54 20 16 +# 52--Photographers/52_Photographers_photographertakingphoto_52_721.jpg +293 345 407 504 +# 52--Photographers/52_Photographers_photographertakingphoto_52_359.jpg +558 74 58 78 +# 52--Photographers/52_Photographers_photographertakingphoto_52_30.jpg +288 244 100 114 +# 52--Photographers/52_Photographers_taketouristphotos_52_80.jpg +335 520 47 51 +871 263 7 9 +# 52--Photographers/52_Photographers_taketouristphotos_52_661.jpg +326 192 32 44 +# 52--Photographers/52_Photographers_photographertakingphoto_52_358.jpg +400 168 116 158 +# 52--Photographers/52_Photographers_photographertakingphoto_52_695.jpg +593 129 173 227 +170 164 72 98 +# 52--Photographers/52_Photographers_photographertakingphoto_52_125.jpg +820 128 74 96 +# 52--Photographers/52_Photographers_photographertakingphoto_52_228.jpg +473 143 31 42 +531 191 32 37 +594 161 30 43 +346 152 33 46 +389 168 30 40 +774 82 33 66 +# 52--Photographers/52_Photographers_taketouristphotos_52_97.jpg +246 286 184 158 +# 52--Photographers/52_Photographers_taketouristphotos_52_281.jpg +414 654 34 46 +617 585 44 60 +151 640 29 40 +# 52--Photographers/52_Photographers_photographertakingphoto_52_316.jpg +458 266 154 216 +92 112 124 162 +786 106 118 190 +# 52--Photographers/52_Photographers_photographertakingphoto_52_61.jpg +440 220 148 310 +# 52--Photographers/52_Photographers_photographertakingphoto_52_653.jpg +476 150 270 362 +# 52--Photographers/52_Photographers_taketouristphotos_52_328.jpg +266 118 130 238 +# 52--Photographers/52_Photographers_photographertakingphoto_52_666.jpg +515 204 370 512 +217 222 367 499 +# 52--Photographers/52_Photographers_photographertakingphoto_52_456.jpg +556 212 198 246 +# 52--Photographers/52_Photographers_photographertakingphoto_52_506.jpg +432 303 41 55 +# 52--Photographers/52_Photographers_taketouristphotos_52_487.jpg +635 144 48 59 +940 380 70 65 +1 343 32 51 +# 52--Photographers/52_Photographers_taketouristphotos_52_266.jpg +684 619 109 135 +610 457 52 80 +324 388 24 32 +240 413 29 39 +460 369 30 43 +380 339 7 20 +173 210 14 21 +278 338 11 19 +233 345 5 9 +346 380 15 20 +56 332 4 8 +444 381 20 26 +419 389 13 17 +412 342 9 12 +# 52--Photographers/52_Photographers_photographertakingphoto_52_780.jpg +754 290 76 90 +# 52--Photographers/52_Photographers_photographertakingphoto_52_76.jpg +638 132 82 80 +# 52--Photographers/52_Photographers_taketouristphotos_52_51.jpg +445 934 36 45 +# 52--Photographers/52_Photographers_taketouristphotos_52_3.jpg +267 52 80 108 +703 150 82 112 +# 52--Photographers/52_Photographers_photographertakingphoto_52_90.jpg +190 152 254 300 +# 52--Photographers/52_Photographers_photographertakingphoto_52_416.jpg +520 108 162 206 +# 52--Photographers/52_Photographers_taketouristphotos_52_331.jpg +444 386 25 31 +482 358 29 31 +# 52--Photographers/52_Photographers_taketouristphotos_52_536.jpg +514 219 144 232 +# 52--Photographers/52_Photographers_photographertakingphoto_52_815.jpg +235 292 487 644 +# 52--Photographers/52_Photographers_taketouristphotos_52_141.jpg +118 392 66 76 +436 386 96 84 +# 52--Photographers/52_Photographers_photographertakingphoto_52_219.jpg +187 278 41 63 +301 341 42 62 +421 392 39 51 +565 353 41 55 +707 430 38 45 +661 283 43 56 +766 228 40 54 +# 52--Photographers/52_Photographers_taketouristphotos_52_659.jpg +250 130 100 144 +# 52--Photographers/52_Photographers_photographertakingphoto_52_84.jpg +300 122 172 272 +# 52--Photographers/52_Photographers_photographertakingphoto_52_743.jpg +690 168 124 180 +# 52--Photographers/52_Photographers_photographertakingphoto_52_113.jpg +391 159 169 254 +# 52--Photographers/52_Photographers_photographertakingphoto_52_263.jpg +627 262 31 35 +685 247 28 39 +753 283 26 37 +800 283 25 43 +353 295 22 34 +403 291 21 33 +132 296 25 31 +37 275 23 33 +62 388 21 26 +471 303 12 21 +316 308 14 17 +269 297 16 18 +# 52--Photographers/52_Photographers_taketouristphotos_52_86.jpg +404 338 102 132 +# 52--Photographers/52_Photographers_photographertakingphoto_52_315.jpg +170 166 86 130 +472 166 56 78 +688 98 90 136 +234 114 62 94 +# 52--Photographers/52_Photographers_photographertakingphoto_52_635.jpg +746 280 18 21 +798 285 15 22 +219 124 24 30 +259 122 26 35 +363 340 68 95 +123 333 81 90 +# 52--Photographers/52_Photographers_photographertakingphoto_52_759.jpg +163 85 665 939 +# 52--Photographers/52_Photographers_taketouristphotos_52_15.jpg +237 325 135 185 +769 398 138 221 +# 52--Photographers/52_Photographers_photographertakingphoto_52_578.jpg +66 144 80 110 +110 110 74 106 +276 164 76 100 +352 124 56 80 +498 250 64 82 +572 140 56 86 +758 170 52 74 +806 138 54 90 +948 168 62 70 +# 52--Photographers/52_Photographers_photographertakingphoto_52_776.jpg +327 240 510 566 +# 52--Photographers/52_Photographers_taketouristphotos_52_123.jpg +199 126 139 169 +626 196 69 126 +750 250 105 154 +# 52--Photographers/52_Photographers_photographertakingphoto_52_310.jpg +642 184 86 104 +# 52--Photographers/52_Photographers_photographertakingphoto_52_428.jpg +504 310 122 114 +# 52--Photographers/52_Photographers_taketouristphotos_52_288.jpg +68 209 13 15 +169 200 11 14 +268 219 13 15 +844 203 32 45 +883 30 24 39 +987 73 30 45 +767 10 30 38 +722 25 20 32 +1000 305 23 39 +830 385 35 51 +703 299 32 49 +# 52--Photographers/52_Photographers_photographertakingphoto_52_568.jpg +379 331 379 435 +# 52--Photographers/52_Photographers_taketouristphotos_52_159.jpg +288 785 36 49 +438 788 39 53 +584 788 35 49 +747 764 37 55 +# 52--Photographers/52_Photographers_photographertakingphoto_52_303.jpg +438 222 70 88 +# 52--Photographers/52_Photographers_photographertakingphoto_52_96.jpg +180 331 20 28 +# 52--Photographers/52_Photographers_taketouristphotos_52_208.jpg +242 86 136 185 +672 79 37 54 +820 59 38 63 +960 69 40 52 +# 52--Photographers/52_Photographers_photographertakingphoto_52_755.jpg +468 250 122 88 +# 52--Photographers/52_Photographers_photographertakingphoto_52_701.jpg +260 138 108 150 +288 286 100 112 +# 52--Photographers/52_Photographers_photographertakingphoto_52_809.jpg +666 340 282 336 +378 194 318 322 +18 114 350 470 +# 52--Photographers/52_Photographers_photographertakingphoto_52_807.jpg +338 26 521 720 +# 52--Photographers/52_Photographers_photographertakingphoto_52_479.jpg +210 282 108 158 +300 96 106 144 +368 288 108 156 +504 198 156 168 +# 52--Photographers/52_Photographers_photographertakingphoto_52_130.jpg +388 348 16 31 +486 382 13 19 +654 346 9 11 +# 53--Raid/53_Raid_policeraid_53_47.jpg +253 100 14 21 +532 215 31 39 +# 53--Raid/53_Raid_policeraid_53_674.jpg +70 58 29 31 +164 92 21 28 +282 48 57 67 +500 62 43 52 +639 90 26 30 +844 64 31 39 +# 53--Raid/53_Raid_policeraid_53_770.jpg +200 58 48 72 +396 88 53 69 +941 179 6 6 +# 53--Raid/53_Raid_policeraid_53_171.jpg +442 56 116 144 +926 20 90 126 +# 53--Raid/53_Raid_policeraid_53_458.jpg +566 144 54 71 +310 235 33 48 +# 53--Raid/53_Raid_policeraid_53_574.jpg +136 34 84 114 +340 38 68 104 +# 53--Raid/53_Raid_policeraid_53_649.jpg +691 39 88 140 +167 45 80 136 +# 53--Raid/53_Raid_policeraid_53_445.jpg +211 194 45 59 +304 198 40 49 +364 132 56 72 +422 159 28 49 +116 164 46 62 +66 180 42 57 +615 142 47 61 +512 190 37 50 +764 156 47 55 +978 154 36 51 +820 129 54 67 +529 281 110 138 +# 53--Raid/53_Raid_policeraid_53_364.jpg +135 43 59 100 +624 94 31 39 +476 82 61 85 +759 58 51 69 +965 40 56 91 +# 53--Raid/53_Raid_policeraid_53_208.jpg +97 100 41 53 +448 77 50 73 +396 68 36 62 +353 68 47 69 +309 84 38 51 +697 64 48 63 +580 40 45 62 +892 53 48 73 +# 53--Raid/53_Raid_policeraid_53_6.jpg +381 242 23 31 +774 286 30 24 +841 357 17 33 +942 232 25 32 +972 234 23 34 +# 53--Raid/53_Raid_policeraid_53_280.jpg +504 124 60 100 +# 53--Raid/53_Raid_policeraid_53_854.jpg +621 281 41 50 +732 225 30 44 +833 383 39 50 +# 53--Raid/53_Raid_policeraid_53_686.jpg +164 226 74 112 +376 306 60 82 +792 140 80 112 +632 294 60 86 +# 53--Raid/53_Raid_policeraid_53_827.jpg +637 174 39 52 +563 180 36 48 +960 195 47 69 +861 186 38 54 +# 53--Raid/53_Raid_policeraid_53_438.jpg +244 32 376 516 +# 53--Raid/53_Raid_policeraid_53_178.jpg +613 123 59 89 +889 283 29 38 +# 53--Raid/53_Raid_policeraid_53_736.jpg +111 221 31 35 +188 223 32 35 +284 201 35 39 +490 185 43 51 +376 186 37 43 +619 316 39 46 +836 337 42 50 +# 53--Raid/53_Raid_policeraid_53_54.jpg +645 173 57 78 +355 166 33 45 +156 164 22 36 +121 165 34 39 +# 53--Raid/53_Raid_policeraid_53_619.jpg +224 318 56 88 +714 316 58 98 +758 106 56 82 +444 108 54 80 +# 53--Raid/53_Raid_policeraid_53_256.jpg +580 108 58 98 +650 72 62 92 +# 53--Raid/53_Raid_policeraid_53_599.jpg +130 160 70 99 +242 327 23 25 +356 304 26 35 +581 266 16 23 +526 275 13 16 +485 287 18 19 +507 299 19 17 +619 264 17 22 +668 255 12 17 +725 243 12 14 +877 281 12 12 +837 269 15 21 +849 298 21 25 +821 310 19 27 +639 327 19 27 +# 53--Raid/53_Raid_policeraid_53_92.jpg +496 219 38 55 +640 288 30 39 +716 263 25 34 +585 288 15 21 +# 53--Raid/53_Raid_policeraid_53_489.jpg +55 133 36 58 +373 172 6 11 +70 227 19 33 +451 271 11 25 +688 160 13 30 +# 53--Raid/53_Raid_policeraid_53_858.jpg +49 216 19 29 +109 217 57 73 +293 222 23 33 +409 241 25 32 +244 208 28 56 +831 126 45 58 +19 62 8 11 +# 53--Raid/53_Raid_policeraid_53_272.jpg +312 230 18 30 +583 235 22 28 +903 189 18 25 +# 53--Raid/53_Raid_policeraid_53_543.jpg +76 262 42 48 +153 301 24 23 +292 283 15 19 +166 161 19 25 +51 171 21 19 +0 181 36 88 +394 264 28 32 +461 290 13 28 +487 296 18 19 +531 288 16 23 +550 291 7 11 +584 183 94 125 +732 230 51 56 +796 223 48 80 +535 347 20 24 +563 310 7 7 +# 53--Raid/53_Raid_policeraid_53_396.jpg +300 2 492 542 +# 53--Raid/53_Raid_policeraid_53_14.jpg +358 224 69 75 +544 267 45 59 +767 0 56 35 +504 0 41 38 +# 53--Raid/53_Raid_policeraid_53_385.jpg +712 46 88 128 +502 102 92 132 +206 172 102 128 +398 2 80 72 +# 53--Raid/53_Raid_policeraid_53_805.jpg +190 215 26 40 +409 182 37 46 +823 195 27 38 +481 177 36 33 +# 53--Raid/53_Raid_policeraid_53_928.jpg +243 196 107 72 +699 136 35 56 +767 135 50 57 +# 53--Raid/53_Raid_policeraid_53_555.jpg +278 110 52 74 +# 53--Raid/53_Raid_policeraid_53_696.jpg +267 205 26 34 +124 211 27 36 +366 236 27 35 +422 220 24 34 +492 249 24 28 +452 245 20 24 +390 239 20 21 +554 250 22 30 +726 249 36 34 +646 269 23 24 +866 280 24 24 +891 280 17 19 +935 261 26 43 +347 228 21 27 +193 199 35 37 +# 53--Raid/53_Raid_policeraid_53_107.jpg +229 92 34 51 +851 318 43 23 +# 53--Raid/53_Raid_policeraid_53_212.jpg +343 127 20 32 +621 94 62 76 +485 88 57 68 +285 299 53 78 +647 322 65 79 +879 88 54 82 +229 92 25 56 +# 53--Raid/53_Raid_policeraid_53_207.jpg +123 111 23 41 +107 49 7 12 +65 52 5 10 +51 58 6 7 +147 45 8 12 +167 41 8 15 +198 41 6 9 +223 39 7 13 +264 36 7 13 +245 40 7 10 +123 52 6 9 +38 61 5 7 +# 53--Raid/53_Raid_policeraid_53_597.jpg +465 200 204 260 +# 53--Raid/53_Raid_policeraid_53_829.jpg +668 65 47 57 +# 53--Raid/53_Raid_policeraid_53_471.jpg +376 90 228 318 +# 53--Raid/53_Raid_policeraid_53_43.jpg +88 88 40 46 +261 122 40 49 +163 185 44 34 +470 118 35 44 +618 129 40 55 +654 39 30 39 +771 88 45 55 +# 53--Raid/53_Raid_policeraid_53_860.jpg +250 335 427 624 +# 53--Raid/53_Raid_policeraid_53_340.jpg +459 45 40 47 +551 108 55 65 +# 53--Raid/53_Raid_policeraid_53_368.jpg +30 112 19 20 +56 104 12 16 +217 100 13 16 +364 123 8 11 +304 118 9 11 +528 134 9 7 +545 135 37 45 +679 139 8 11 +722 149 12 11 +755 155 35 31 +817 69 108 135 +711 143 10 14 +# 53--Raid/53_Raid_policeraid_53_951.jpg +416 160 238 380 +# 54--Rescue/54_Rescue_rescuepeople_54_254.jpg +319 88 173 259 +# 54--Rescue/54_Rescue_rescuepeople_54_135.jpg +499 0 55 62 +329 51 36 44 +869 210 26 46 +942 220 26 37 +873 485 66 72 +59 557 45 55 +127 371 27 54 +150 177 26 42 +733 412 30 41 +684 128 14 16 +253 427 29 34 +162 224 30 48 +995 235 20 26 +# 54--Rescue/54_Rescue_rescuepeople_54_711.jpg +408 375 14 18 +483 305 10 20 +421 318 11 16 +# 54--Rescue/54_Rescue_rescuepeople_54_581.jpg +623 188 12 15 +653 197 9 14 +671 200 9 8 +691 191 9 14 +748 199 9 11 +803 196 9 11 +814 183 11 12 +364 230 10 9 +849 183 10 10 +902 207 9 12 +883 197 8 11 +935 198 9 11 +915 197 9 12 +925 208 7 12 +957 193 11 13 +966 192 12 12 +975 200 11 12 +907 373 10 12 +997 368 9 12 +1011 371 12 14 +1015 307 9 14 +399 356 7 13 +481 316 11 15 +479 352 10 15 +535 314 14 17 +506 181 10 13 +842 370 9 12 +815 368 8 11 +792 364 10 12 +789 390 8 12 +919 315 9 12 +964 308 10 12 +887 313 9 12 +874 378 9 12 +973 423 10 12 +480 179 8 12 +471 197 9 12 +452 193 8 12 +434 183 10 12 +403 182 9 9 +393 177 9 11 +384 189 7 10 +234 195 9 11 +248 187 8 10 +287 191 10 11 +304 191 8 11 +323 198 9 10 +210 202 7 10 +193 184 8 11 +32 185 8 11 +7 190 8 10 +105 194 9 11 +110 220 8 9 +97 360 10 11 +92 337 10 12 +56 348 8 12 +0 336 8 12 +118 387 7 9 +167 353 11 13 +203 329 8 11 +212 331 10 13 +234 353 9 11 +262 331 10 11 +315 339 11 13 +360 342 10 12 +363 361 7 10 +373 354 10 14 +335 357 7 10 +420 331 11 14 +594 355 10 14 +536 369 10 12 +539 337 10 12 +531 429 10 12 +620 364 7 12 +642 367 10 11 +690 374 9 12 +698 357 8 12 +742 372 11 13 +# 54--Rescue/54_Rescue_rescuepeople_54_222.jpg +509 213 41 57 +677 43 47 66 +845 40 51 63 +325 80 52 63 +217 67 40 49 +173 46 39 53 +124 32 31 39 +51 32 35 46 +# 54--Rescue/54_Rescue_firemanrescue_54_724.jpg +330 158 80 120 +# 54--Rescue/54_Rescue_rescuepeople_54_589.jpg +108 386 66 82 +292 358 58 76 +460 380 68 78 +602 374 64 74 +830 412 62 82 +954 496 54 56 +# 54--Rescue/54_Rescue_rescuepeople_54_845.jpg +258 210 74 106 +592 212 68 116 +# 54--Rescue/54_Rescue_rescuepeople_54_8.jpg +164 178 142 140 +442 190 134 146 +524 74 118 150 +# 54--Rescue/54_Rescue_firemanrescue_54_789.jpg +358 36 208 310 +# 54--Rescue/54_Rescue_rescuepeople_54_143.jpg +239 135 47 54 +421 205 27 46 +371 70 36 51 +# 54--Rescue/54_Rescue_firemanrescue_54_458.jpg +162 236 174 220 +# 54--Rescue/54_Rescue_firemanrescue_54_153.jpg +264 114 104 152 +# 54--Rescue/54_Rescue_rescuepeople_54_526.jpg +472 21 59 75 +# 54--Rescue/54_Rescue_rescuepeople_54_328.jpg +552 132 142 194 +# 54--Rescue/54_Rescue_rescuepeople_54_102.jpg +408 251 30 41 +547 234 12 18 +971 235 6 8 +989 229 7 10 +1007 231 6 9 +88 232 7 10 +190 233 6 9 +234 241 7 9 +268 242 11 14 +# 54--Rescue/54_Rescue_rescuepeople_54_191.jpg +448 285 50 58 +666 280 33 34 +810 254 58 66 +787 301 16 23 +932 273 40 49 +81 362 18 18 +979 302 15 13 +670 489 40 34 +# 54--Rescue/54_Rescue_rescuepeople_54_108.jpg +214 302 222 312 +449 253 268 328 +# 54--Rescue/54_Rescue_rescuepeople_54_1035.jpg +376 269 296 433 +# 54--Rescue/54_Rescue_firemanrescue_54_420.jpg +416 152 260 274 +# 54--Rescue/54_Rescue_firemanrescue_54_103.jpg +439 151 124 148 +# 54--Rescue/54_Rescue_rescuepeople_54_774.jpg +211 113 58 78 +345 122 53 64 +624 99 61 83 +694 123 46 58 +881 83 58 76 +# 54--Rescue/54_Rescue_firemanrescue_54_939.jpg +156 108 72 102 +# 54--Rescue/54_Rescue_rescuepeople_54_924.jpg +306 251 31 42 +473 246 30 32 +637 303 36 35 +842 258 21 28 +# 54--Rescue/54_Rescue_rescuepeople_54_493.jpg +317 102 33 42 +# 54--Rescue/54_Rescue_firemanrescue_54_814.jpg +381 171 120 99 +# 54--Rescue/54_Rescue_rescuepeople_54_158.jpg +307 315 467 661 +# 54--Rescue/54_Rescue_firemanrescue_54_327.jpg +328 134 344 458 +# 54--Rescue/54_Rescue_rescuepeople_54_855.jpg +444 140 62 70 +648 188 60 78 +556 340 64 62 +772 218 54 64 +# 54--Rescue/54_Rescue_rescuepeople_54_431.jpg +619 542 44 60 +519 514 42 46 +570 459 34 46 +778 517 41 46 +885 514 42 45 +869 418 41 47 +752 454 40 37 +662 449 29 37 +672 396 31 35 +597 358 20 26 +661 356 26 29 +818 375 30 35 +738 402 29 30 +688 315 24 24 +605 317 21 24 +534 384 20 24 +463 319 29 34 +775 327 25 29 +368 393 36 47 +304 350 27 30 +84 314 75 73 +299 281 19 21 +297 281 21 21 +415 231 24 29 +481 217 23 24 +550 187 21 25 +600 285 16 20 +572 317 17 22 +268 322 26 26 +395 501 27 45 +210 290 14 19 +97 199 20 21 +89 214 40 65 +295 197 11 14 +250 187 10 11 +238 195 9 13 +653 270 15 25 +727 174 10 15 +615 177 10 13 +707 184 9 11 +418 60 6 9 +748 297 23 20 +3 70 12 11 +567 428 24 27 +684 294 16 14 +619 297 11 19 +512 174 11 18 +379 257 15 19 +302 219 11 17 +# 54--Rescue/54_Rescue_rescuepeople_54_325.jpg +367 172 63 88 +558 292 63 84 +620 245 47 93 +25 256 35 60 +186 284 31 58 +846 234 46 93 +# 54--Rescue/54_Rescue_rescuepeople_54_557.jpg +699 136 34 48 +# 54--Rescue/54_Rescue_rescuepeople_54_406.jpg +428 349 40 45 +737 321 39 45 +# 54--Rescue/54_Rescue_rescuepeople_54_335.jpg +380 374 114 114 +526 238 102 120 +828 152 114 132 +# 54--Rescue/54_Rescue_rescuepeople_54_54.jpg +638 298 26 20 +730 393 35 34 +530 101 18 15 +520 77 12 13 +# 54--Rescue/54_Rescue_rescuepeople_54_860.jpg +320 437 160 243 +603 117 179 211 +# 54--Rescue/54_Rescue_firemanrescue_54_969.jpg +506 99 35 41 +# 54--Rescue/54_Rescue_rescuepeople_54_529.jpg +310 214 52 92 +462 188 62 90 +412 24 66 88 +830 168 60 80 +# 54--Rescue/54_Rescue_rescuepeople_54_602.jpg +218 197 59 72 +351 229 53 75 +499 200 59 85 +647 229 60 74 +839 240 78 79 +801 1444 5 7 +811 1445 6 9 +843 1440 9 10 +863 1443 7 9 +947 1447 8 9 +# 54--Rescue/54_Rescue_rescuepeople_54_840.jpg +250 12 154 211 +792 108 178 160 +# 54--Rescue/54_Rescue_rescuepeople_54_159.jpg +465 483 51 68 +886 25 31 36 +628 178 32 36 +418 230 23 23 +293 206 26 28 +347 115 25 32 +514 182 24 26 +583 94 27 27 +632 113 24 23 +709 84 28 27 +752 132 28 29 +790 78 23 26 +773 101 23 28 +808 57 23 29 +807 31 19 28 +774 25 32 31 +448 276 16 15 +776 65 19 22 +797 101 25 28 +# 54--Rescue/54_Rescue_rescuepeople_54_208.jpg +254 28 112 228 +# 54--Rescue/54_Rescue_firemanrescue_54_617.jpg +581 265 228 268 +# 54--Rescue/54_Rescue_rescuepeople_54_777.jpg +288 46 60 94 +414 298 74 106 +692 118 56 84 +780 336 66 88 +# 54--Rescue/54_Rescue_rescuepeople_54_1049.jpg +446 218 106 152 +646 390 92 128 +# 54--Rescue/54_Rescue_rescuepeople_54_531.jpg +368 123 208 307 +# 54--Rescue/54_Rescue_rescuepeople_54_188.jpg +397 86 232 269 +287 87 108 122 +942 165 55 54 +696 9 54 69 +# 54--Rescue/54_Rescue_firemanrescue_54_660.jpg +836 125 75 96 +439 361 182 209 +# 54--Rescue/54_Rescue_rescuepeople_54_817.jpg +211 703 49 50 +468 445 85 98 +787 653 71 66 +# 54--Rescue/54_Rescue_rescuepeople_54_1006.jpg +241 244 45 64 +311 255 34 47 +428 276 33 48 +619 159 49 62 +# 54--Rescue/54_Rescue_firemanrescue_54_478.jpg +524 86 58 98 +# 54--Rescue/54_Rescue_rescuepeople_54_926.jpg +160 94 96 106 +370 186 76 106 +528 198 96 134 +# 54--Rescue/54_Rescue_firemanrescue_54_908.jpg +438 299 70 48 +619 185 23 30 +858 159 28 36 +852 296 36 46 +# 54--Rescue/54_Rescue_rescuepeople_54_738.jpg +493 496 248 365 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_325.jpg +328 22 336 478 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_196.jpg +283 173 34 42 +508 165 27 39 +792 48 39 55 +747 163 30 37 +535 196 36 35 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_620.jpg +474 146 43 52 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_686.jpg +451 95 27 26 +394 99 13 16 +311 95 11 17 +107 117 51 43 +751 67 29 40 +579 68 37 47 +854 78 51 66 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_786.jpg +362 262 96 140 +472 114 98 142 +554 244 100 152 +714 250 106 140 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_708.jpg +212 226 58 80 +384 270 54 70 +542 246 60 74 +718 262 72 82 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_867.jpg +808 254 17 26 +716 284 22 32 +561 287 24 31 +508 264 31 44 +390 259 32 35 +232 280 24 29 +60 298 28 34 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_188.jpg +222 44 100 140 +538 110 82 138 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_466.jpg +228 568 69 120 +553 526 60 93 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_1027.jpg +604 56 112 164 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_179.jpg +81 249 40 41 +274 233 38 24 +156 158 30 41 +182 85 41 42 +231 31 49 28 +321 0 44 23 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_859.jpg +413 293 38 53 +264 288 45 58 +152 293 46 63 +66 193 67 95 +806 267 66 83 +588 265 52 63 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_499.jpg +440 54 84 124 +536 96 90 114 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_414.jpg +368 314 74 100 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_50.jpg +234 106 184 160 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_181.jpg +264 94 27 29 +405 160 23 23 +772 151 23 29 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_648.jpg +729 120 47 58 +416 100 51 67 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_711.jpg +601 141 324 333 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_298.jpg +456 134 60 82 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_643.jpg +785 211 18 27 +506 215 18 25 +470 232 18 27 +421 268 14 21 +297 259 18 25 +666 259 11 22 +517 262 13 19 +527 257 13 21 +6 294 27 33 +595 254 15 26 +553 271 11 19 +652 251 11 22 +691 258 12 20 +864 249 14 20 +431 247 14 15 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_569.jpg +62 245 19 28 +101 241 23 30 +166 269 6 9 +181 276 7 7 +176 241 5 7 +180 87 55 74 +338 46 47 63 +252 256 40 55 +374 246 20 31 +593 22 21 24 +696 18 11 25 +889 40 47 57 +935 3 56 46 +732 196 55 66 +914 203 37 50 +36 259 6 7 +39 253 9 11 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_640.jpg +378 460 100 68 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_365.jpg +469 223 63 80 +261 173 49 78 +659 120 67 84 +795 158 61 67 +796 227 52 94 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_773.jpg +990 94 34 73 +811 137 48 66 +805 92 51 59 +631 83 54 68 +712 73 34 60 +476 115 38 63 +385 92 49 60 +324 98 58 60 +331 194 36 81 +2 134 34 76 +637 157 39 66 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_801.jpg +137 106 75 115 +366 117 75 120 +572 106 71 109 +778 53 82 115 +499 501 95 126 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_245.jpg +680 120 124 136 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_122.jpg +37 170 28 39 +29 217 23 32 +17 252 31 39 +58 372 35 43 +102 280 28 39 +121 271 22 30 +170 264 22 34 +186 284 23 32 +248 257 22 25 +263 271 20 37 +291 256 18 35 +345 280 25 46 +361 246 17 29 +387 262 22 31 +431 272 27 34 +458 309 23 35 +461 234 22 30 +524 259 23 32 +531 289 20 36 +565 304 23 35 +606 257 17 30 +634 265 18 36 +669 293 23 40 +696 279 22 37 +743 235 23 37 +740 274 24 39 +727 369 30 52 +816 269 24 41 +840 208 22 39 +873 229 24 37 +868 302 24 41 +872 395 36 47 +965 217 35 42 +775 272 26 36 +593 291 24 27 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_838.jpg +606 6 366 500 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_1015.jpg +305 152 582 821 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_154.jpg +657 19 32 36 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_894.jpg +422 22 186 238 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_305.jpg +219 58 52 64 +435 129 55 69 +606 94 41 49 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_531.jpg +460 349 23 31 +530 373 22 29 +681 354 18 21 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_343.jpg +380 102 336 462 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_637.jpg +284 0 442 428 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_1013.jpg +368 114 126 164 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_932.jpg +414 176 82 126 +538 60 102 140 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_130.jpg +289 401 283 283 +214 729 298 232 +545 536 379 313 +491 934 238 340 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_523.jpg +333 273 437 701 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_809.jpg +693 633 58 100 +374 555 72 87 +817 374 86 97 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_380.jpg +26 183 31 40 +461 188 54 41 +570 291 20 22 +522 465 6 8 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_951.jpg +398 38 158 244 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_263.jpg +318 34 78 96 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_789.jpg +367 278 38 68 +329 128 31 59 +270 226 45 67 +234 184 35 64 +199 147 35 56 +148 215 45 71 +64 242 44 62 +89 174 39 54 +54 130 36 55 +823 315 38 62 +765 171 37 61 +731 335 38 54 +630 277 41 61 +672 198 38 64 +617 116 34 64 +592 218 34 58 +544 257 38 56 +454 242 42 64 +392 221 36 61 +467 161 36 59 +424 128 35 47 +922 162 40 60 +931 328 39 60 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_588.jpg +593 248 20 28 +664 247 26 35 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_12.jpg +770 174 84 128 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_183.jpg +382 128 238 344 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_721.jpg +278 284 506 659 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_774.jpg +383 6 13 20 +408 15 13 18 +354 30 12 18 +345 5 11 12 +332 0 7 9 +424 41 12 16 +375 25 9 16 +357 18 11 15 +845 61 56 82 +753 119 58 92 +795 277 56 83 +519 293 41 48 +126 265 41 51 +660 102 23 33 +603 143 27 39 +473 197 29 36 +483 173 26 26 +444 171 23 33 +363 174 28 39 +418 104 20 24 +346 96 19 31 +326 80 23 24 +381 72 12 23 +395 74 18 24 +673 10 11 14 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_764.jpg +118 76 72 96 +340 86 70 100 +554 64 66 96 +770 4 78 98 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_155.jpg +284 36 70 54 +478 224 58 52 +584 252 72 74 +644 32 66 72 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_726.jpg +480 106 144 210 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_424.jpg +309 335 20 25 +418 267 45 37 +590 310 15 19 +606 362 10 16 +644 372 18 22 +675 351 10 20 +715 328 10 20 +832 384 10 20 +886 387 9 17 +931 383 11 18 +973 359 14 18 +1014 385 10 24 +360 147 15 17 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_770.jpg +621 21 50 58 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_208.jpg +414 28 248 350 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_45.jpg +106 202 68 74 +448 190 62 92 +748 238 58 82 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_516.jpg +322 216 22 27 +381 214 28 33 +445 195 29 35 +505 194 28 31 +372 325 45 51 +473 327 47 53 +575 139 32 41 +665 200 30 34 +588 342 43 53 +691 375 40 53 +741 209 24 36 +774 213 27 29 +828 196 31 35 +778 370 50 52 +116 303 44 53 +257 214 25 31 +253 323 47 52 +# 55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_218.jpg +728 70 56 56 +# 56--Voter/56_Voter_peoplevoting_56_796.jpg +408 88 205 328 +# 56--Voter/56_Voter_peoplevoting_56_819.jpg +647 203 38 43 +754 217 39 40 +860 229 40 45 +116 434 42 45 +293 352 37 49 +386 473 38 41 +482 442 38 38 +279 613 44 60 +603 398 42 47 +# 56--Voter/56_Voter_peoplevoting_56_777.jpg +849 77 34 59 +# 56--Voter/56_Voter_peoplevoting_56_346.jpg +465 189 150 186 +# 56--Voter/56_Voter_peoplevoting_56_13.jpg +815 92 29 36 +966 152 22 28 +# 56--Voter/56_Voter_peoplevoting_56_459.jpg +742 89 35 47 +360 72 25 47 +246 75 31 65 +178 147 26 32 +498 80 19 39 +966 155 24 33 +860 90 29 42 +# 56--Voter/56_Voter_peoplevoting_56_714.jpg +970 191 45 53 +939 167 34 43 +582 154 34 44 +1 88 44 88 +47 114 39 69 +# 56--Voter/56_Voter_peoplevoting_56_531.jpg +610 122 78 118 +# 56--Voter/56_Voter_peoplevoting_56_1011.jpg +309 381 387 453 +# 56--Voter/56_Voter_peoplevoting_56_764.jpg +118 116 386 278 +# 56--Voter/56_Voter_peoplevoting_56_887.jpg +523 206 206 259 +# 56--Voter/56_Voter_peoplevoting_56_103.jpg +456 327 438 559 +# 56--Voter/56_Voter_peoplevoting_56_712.jpg +192 50 152 222 +# 56--Voter/56_Voter_peoplevoting_56_260.jpg +278 163 36 39 +470 134 39 46 +686 151 46 45 +848 417 25 37 +910 428 30 37 +1003 432 21 33 +841 323 22 35 +931 319 23 32 +948 222 22 30 +857 219 25 32 +931 128 24 30 +846 125 20 28 +850 26 25 36 +928 20 26 37 +0 415 17 31 +43 314 22 29 +28 214 25 35 +71 121 19 31 +61 45 26 29 +# 56--Voter/56_Voter_peoplevoting_56_21.jpg +644 284 130 156 +490 242 70 106 +860 294 68 94 +304 284 60 64 +# 56--Voter/56_Voter_peoplevoting_56_641.jpg +312 266 92 102 +442 268 138 148 +736 94 54 112 +# 56--Voter/56_Voter_peoplevoting_56_350.jpg +44 359 25 52 +7 326 34 43 +503 348 17 30 +830 270 13 11 +747 239 9 10 +728 201 7 8 +855 265 9 9 +908 250 13 13 +929 246 7 9 +915 302 6 13 +720 203 7 12 +677 208 6 10 +771 290 9 13 +538 192 8 10 +527 185 9 10 +515 161 7 12 +565 170 8 10 +470 152 8 13 +437 152 10 11 +497 159 8 11 +525 163 9 7 +405 125 6 7 +258 108 11 12 +226 104 13 9 +434 133 6 8 +139 69 10 9 +183 74 8 9 +206 95 9 10 +199 84 8 10 +114 59 8 10 +82 49 9 9 +52 45 11 10 +33 41 8 9 +12 41 11 11 +9 28 8 10 +75 44 6 9 +528 110 5 5 +845 298 12 16 +853 242 8 10 +427 124 5 7 +530 127 6 6 +515 150 6 6 +486 153 6 7 +732 190 7 9 +725 192 6 7 +831 239 7 12 +794 217 7 6 +521 315 13 23 +545 171 7 8 +799 361 10 17 +799 235 6 9 +768 206 6 7 +743 208 9 11 +734 221 7 10 +649 202 7 11 +634 196 7 10 +700 225 9 10 +667 188 7 11 +601 180 5 8 +581 169 6 9 +563 192 10 11 +284 116 8 10 +306 107 7 11 +335 118 6 10 +349 129 7 11 +369 133 6 11 +394 141 8 11 +283 84 6 6 +323 93 4 5 +356 105 6 7 +374 110 6 8 +386 120 6 7 +344 410 18 23 +# 56--Voter/56_Voter_peoplevoting_56_842.jpg +318 196 480 680 +# 56--Voter/56_Voter_peoplevoting_56_747.jpg +356 42 116 174 +# 56--Voter/56_Voter_peoplevoting_56_110.jpg +96 322 43 57 +208 309 29 52 +416 243 27 41 +811 259 14 19 +894 328 16 20 +# 56--Voter/56_Voter_peoplevoting_56_344.jpg +209 73 16 26 +461 86 18 22 +379 85 15 23 +312 53 17 27 +97 56 16 25 +60 79 15 21 +176 81 20 24 +554 76 15 23 +641 64 14 20 +702 63 15 26 +801 89 14 24 +893 82 13 21 +917 81 14 23 +981 71 14 20 +# 56--Voter/56_Voter_peoplevoting_56_873.jpg +483 180 105 132 +# 56--Voter/56_Voter_peoplevoting_56_378.jpg +232 124 334 452 +# 56--Voter/56_Voter_peoplevoting_56_717.jpg +414 6 108 106 +566 62 78 106 +832 6 100 110 +264 2 70 98 +# 56--Voter/56_Voter_peoplevoting_56_268.jpg +664 213 25 42 +809 215 21 34 +868 223 19 36 +942 208 16 23 +746 227 24 32 +572 213 20 37 +596 208 12 17 +632 193 16 18 +391 251 30 41 +248 282 30 32 +338 228 18 29 +356 220 14 25 +302 213 13 17 +223 197 16 24 +267 228 14 19 +280 189 10 22 +371 176 11 18 +396 185 10 15 +410 195 14 18 +360 201 14 20 +512 212 18 21 +474 217 23 29 +445 218 16 22 +240 183 12 21 +138 218 18 20 +109 185 11 14 +128 191 10 14 +77 186 10 13 +26 181 12 18 +198 181 11 16 +99 173 8 10 +303 175 9 10 +457 178 9 15 +679 191 15 21 +839 221 14 21 +445 202 12 14 +91 168 5 7 +419 217 20 35 +617 244 17 26 +559 178 9 10 +# 56--Voter/56_Voter_peoplevoting_56_228.jpg +115 76 36 67 +13 207 31 51 +467 402 20 43 +556 272 34 46 +602 304 39 53 +640 245 31 44 +799 328 22 34 +922 323 22 34 +995 493 4 5 +1004 493 3 5 +# 56--Voter/56_Voter_peoplevoting_56_902.jpg +708 182 158 222 +568 232 116 162 +# 56--Voter/56_Voter_peoplevoting_56_644.jpg +431 27 271 292 +336 290 19 34 +903 102 59 58 +# 56--Voter/56_Voter_peoplevoting_56_579.jpg +192 165 51 70 +# 56--Voter/56_Voter_peoplevoting_56_305.jpg +73 228 25 35 +448 196 27 55 +# 56--Voter/56_Voter_peoplevoting_56_620.jpg +818 134 38 65 +288 197 34 56 +136 194 44 64 +365 199 23 33 +346 249 17 20 +530 219 13 19 +545 215 11 18 +446 231 16 24 +# 56--Voter/56_Voter_peoplevoting_56_723.jpg +216 86 365 430 +474 380 425 401 +143 641 294 320 +328 1094 81 130 +664 1058 96 133 +# 56--Voter/56_Voter_peoplevoting_56_370.jpg +208 553 129 176 +278 358 84 129 +451 342 45 57 +395 282 36 47 +993 181 12 13 +# 56--Voter/56_Voter_peoplevoting_56_874.jpg +971 247 45 75 +483 366 54 93 +596 332 31 36 +670 254 25 28 +784 301 40 59 +514 276 18 26 +455 289 19 21 +403 269 18 20 +357 282 15 16 +367 254 11 12 +336 255 15 17 +424 250 11 13 +453 239 12 15 +507 229 10 13 +157 413 31 54 +250 393 28 52 +137 353 26 29 +157 390 22 32 +90 339 24 32 +40 338 19 25 +13 299 19 22 +66 284 13 17 +114 300 10 14 +154 277 16 17 +191 258 13 16 +465 226 12 12 +275 247 7 11 +631 193 13 14 +538 196 7 13 +493 217 10 11 +330 230 12 13 +694 442 64 141 +534 311 21 24 +566 298 23 31 +960 220 39 50 +# 56--Voter/56_Voter_peoplevoting_56_663.jpg +670 148 42 41 +489 285 51 72 +342 207 51 49 +106 225 12 13 +2 218 8 12 +196 196 13 19 +71 218 11 11 +282 190 11 11 +233 176 10 15 +316 184 9 12 +839 64 62 74 +# 56--Voter/56_Voter_peoplevoting_56_140.jpg +12 34 224 282 +# 56--Voter/56_Voter_peoplevoting_56_460.jpg +540 224 34 59 +# 56--Voter/56_Voter_peoplevoting_56_410.jpg +122 454 15 17 +99 436 10 19 +20 425 11 23 +35 416 16 17 +59 449 15 17 +178 434 15 22 +215 458 14 23 +272 467 15 24 +289 440 11 21 +237 452 15 22 +366 437 17 19 +322 442 13 17 +472 434 16 19 +199 417 13 13 +423 429 15 21 +424 452 13 23 +159 419 9 13 +614 463 15 18 +695 479 14 20 +744 475 16 21 +549 445 16 19 +574 447 14 16 +528 429 14 19 +817 459 12 20 +827 426 14 25 +492 446 16 21 +934 457 15 24 +989 452 12 15 +937 427 15 20 +896 426 16 18 +# 56--Voter/56_Voter_peoplevoting_56_441.jpg +212 240 98 160 +# 56--Voter/56_Voter_peoplevoting_56_339.jpg +539 144 34 49 +659 227 24 44 +335 307 22 33 +# 56--Voter/56_Voter_peoplevoting_56_953.jpg +387 131 128 192 +# 56--Voter/56_Voter_peoplevoting_56_323.jpg +302 176 294 412 +# 56--Voter/56_Voter_peoplevoting_56_782.jpg +724 16 74 94 +# 56--Voter/56_Voter_peoplevoting_56_118.jpg +289 95 83 118 +100 5 67 69 +363 12 78 98 +568 119 58 115 +456 6 29 62 +820 0 42 56 +# 56--Voter/56_Voter_peoplevoting_56_1046.jpg +432 432 110 157 +# 56--Voter/56_Voter_peoplevoting_56_946.jpg +108 40 102 166 +462 126 100 162 +562 50 106 156 +# 56--Voter/56_Voter_peoplevoting_56_781.jpg +115 65 59 85 +290 104 41 59 +364 85 42 66 +678 110 20 34 +753 113 18 30 +777 123 23 31 +653 157 24 34 +520 107 30 46 +896 125 11 13 +836 129 10 19 +960 136 9 20 +916 180 12 13 +604 141 18 24 +365 197 65 75 +644 130 28 30 +433 123 32 50 +232 152 28 42 +# 56--Voter/56_Voter_peoplevoting_56_528.jpg +905 229 56 66 +532 237 52 40 +455 181 40 44 +510 144 39 47 +621 123 32 47 +785 125 21 24 +938 89 10 23 +916 73 15 18 +893 62 15 17 +783 38 11 20 +938 49 11 16 +482 24 17 28 +418 46 16 26 +320 59 18 27 +616 56 10 29 +700 88 11 24 +179 61 18 32 +262 60 15 29 +# 56--Voter/56_Voter_peoplevoting_56_558.jpg +409 253 34 86 +621 369 6 10 +645 372 10 10 +635 365 8 11 +572 386 5 8 +542 389 8 8 +518 385 6 9 +522 398 6 8 +554 348 5 6 +558 359 5 8 +579 346 4 8 +519 355 6 7 +517 366 9 9 +531 349 6 8 +538 355 6 6 +513 341 5 7 +493 362 9 8 +485 360 5 7 +466 383 5 8 +489 397 6 9 +474 351 5 8 +569 327 6 6 +611 329 5 5 +620 337 4 4 +628 354 6 8 +583 361 6 7 +527 329 6 7 +79 409 9 10 +109 407 6 12 +117 398 8 13 +136 395 6 9 +148 408 7 12 +46 411 8 10 +16 411 8 11 +51 400 7 10 +23 379 6 8 +10 370 6 9 +6 387 6 8 +38 369 7 11 +42 360 8 9 +164 396 7 9 +152 398 9 11 +130 377 7 8 +148 373 5 7 +110 389 8 12 +106 376 6 9 +95 372 6 8 +86 385 8 9 +78 357 7 7 +99 362 6 8 +86 339 6 9 +57 349 4 6 +58 383 8 11 +44 379 8 9 +31 386 8 10 +185 385 7 11 +211 399 7 9 +173 368 6 11 +170 341 6 7 +217 357 7 7 +201 366 8 9 +193 360 6 8 +214 341 7 10 +146 350 7 7 +190 378 7 10 +203 378 8 8 +157 361 8 10 +23 360 6 7 +74 396 7 7 +645 280 51 77 +835 275 19 30 +894 426 10 12 +935 418 9 13 +926 413 11 13 +921 404 7 12 +927 386 7 13 +956 399 7 12 +964 408 7 12 +951 388 7 11 +962 381 8 12 +945 371 7 9 +916 372 8 9 +887 389 7 8 +904 332 6 9 +883 336 6 7 +596 422 10 14 +625 413 9 13 +651 406 8 13 +613 412 10 12 +604 403 9 9 +584 414 11 12 +489 421 9 11 +474 409 8 13 +482 404 7 12 +520 413 7 10 +601 385 7 11 +635 389 6 8 +659 381 5 10 +595 375 6 9 +# 56--Voter/56_Voter_peoplevoting_56_122.jpg +964 15 22 43 +691 48 33 59 +559 80 29 50 +619 55 26 45 +486 18 27 38 +531 14 33 40 +607 17 29 32 +800 7 31 35 +82 66 39 48 +160 27 28 29 +151 59 33 40 +211 30 29 34 +263 19 26 29 +350 27 24 32 +404 6 29 39 +440 33 25 39 +423 133 27 40 +299 12 19 33 +118 18 23 39 +90 17 34 48 +28 19 22 35 +912 194 39 47 +936 77 33 57 +839 57 31 50 +902 34 26 47 +326 98 30 68 +282 60 34 50 +663 0 33 41 +# 57--Angler/57_Angler_peoplefishing_57_926.jpg +404 140 122 150 +# 57--Angler/57_Angler_peoplefishing_57_153.jpg +206 246 148 178 +# 57--Angler/57_Angler_peoplefishing_57_251.jpg +415 300 45 53 +538 224 41 52 +728 239 12 15 +# 57--Angler/57_Angler_peoplefishing_57_250.jpg +754 106 106 120 +# 57--Angler/57_Angler_peoplefishing_57_51.jpg +819 127 56 80 +699 60 60 84 +478 128 49 47 +137 202 38 50 +74 124 47 68 +264 56 27 29 +# 57--Angler/57_Angler_peoplefishing_57_515.jpg +297 319 17 21 +# 57--Angler/57_Angler_peoplefishing_57_924.jpg +495 323 24 32 +188 319 28 34 +95 318 15 18 +51 285 11 16 +# 57--Angler/57_Angler_peoplefishing_57_110.jpg +647 388 15 18 +277 386 15 19 +# 57--Angler/57_Angler_peoplefishing_57_1009.jpg +82 108 70 74 +220 148 74 74 +344 152 78 102 +458 98 58 72 +544 164 66 84 +686 150 70 86 +852 138 74 84 +# 57--Angler/57_Angler_peoplefishing_57_206.jpg +284 806 34 37 +615 680 16 16 +172 720 17 17 +985 616 10 12 +924 710 18 20 +# 57--Angler/57_Angler_peoplefishing_57_589.jpg +571 337 158 194 +# 57--Angler/57_Angler_peoplefishing_57_661.jpg +208 416 10 16 +242 386 12 14 +102 429 16 19 +239 413 15 13 +284 418 10 12 +367 414 8 12 +407 409 7 13 +500 405 6 9 +450 414 5 9 +784 319 5 5 +754 317 5 6 +634 319 5 6 +968 331 7 7 +953 332 6 6 +# 57--Angler/57_Angler_peoplefishing_57_880.jpg +195 294 25 33 +131 277 29 36 +# 57--Angler/57_Angler_peoplefishing_57_566.jpg +327 421 12 15 +265 415 11 15 +250 410 12 13 +422 438 10 12 +501 426 10 14 +471 388 11 14 +501 220 11 15 +# 57--Angler/57_Angler_peoplefishing_57_17.jpg +663 168 27 46 +# 57--Angler/57_Angler_peoplefishing_57_394.jpg +221 353 49 58 +# 57--Angler/57_Angler_peoplefishing_57_868.jpg +632 668 161 234 +# 57--Angler/57_Angler_peoplefishing_57_900.jpg +498 378 188 234 +# 57--Angler/57_Angler_peoplefishing_57_104.jpg +795 386 28 35 +379 474 22 33 +# 57--Angler/57_Angler_peoplefishing_57_402.jpg +350 126 52 62 +674 54 56 50 +# 57--Angler/57_Angler_peoplefishing_57_254.jpg +635 153 28 34 +550 351 15 24 +# 57--Angler/57_Angler_peoplefishing_57_401.jpg +618 207 26 31 +# 57--Angler/57_Angler_peoplefishing_57_796.jpg +399 294 183 249 +# 57--Angler/57_Angler_peoplefishing_57_430.jpg +629 193 51 76 +369 197 52 80 +# 57--Angler/57_Angler_peoplefishing_57_20.jpg +418 289 31 34 +348 246 26 29 +# 57--Angler/57_Angler_peoplefishing_57_15.jpg +680 173 26 25 +420 167 25 27 +576 309 19 17 +# 57--Angler/57_Angler_peoplefishing_57_803.jpg +421 403 107 152 +# 57--Angler/57_Angler_peoplefishing_57_120.jpg +476 347 61 52 +248 161 17 20 +# 57--Angler/57_Angler_peoplefishing_57_559.jpg +458 18 33 41 +# 57--Angler/57_Angler_peoplefishing_57_182.jpg +168 76 86 88 +# 57--Angler/57_Angler_peoplefishing_57_870.jpg +563 517 91 96 +# 57--Angler/57_Angler_peoplefishing_57_53.jpg +782 293 32 34 +556 328 16 32 +# 57--Angler/57_Angler_peoplefishing_57_139.jpg +241 340 15 17 +199 319 14 17 +180 339 13 18 +295 339 5 12 +474 342 13 14 +432 307 8 14 +514 299 11 16 +410 346 13 16 +331 374 8 15 +395 369 7 14 +426 378 7 14 +475 382 8 13 +570 392 10 14 +553 315 11 16 +626 299 13 14 +678 295 9 14 +716 282 11 12 +693 276 9 12 +662 261 10 14 +653 286 10 11 +558 304 13 13 +678 388 8 13 +703 303 8 10 +759 357 5 13 +816 297 11 15 +808 315 11 13 +782 255 10 12 +851 349 10 14 +822 334 8 15 +878 316 7 9 +965 326 10 15 +890 235 8 11 +676 322 6 14 +546 388 10 10 +861 276 10 16 +966 241 6 6 +770 291 12 15 +620 322 9 14 +# 57--Angler/57_Angler_peoplefishing_57_933.jpg +584 254 86 114 +418 398 58 76 +# 57--Angler/57_Angler_peoplefishing_57_411.jpg +307 264 35 57 +779 209 48 67 +# 57--Angler/57_Angler_peoplefishing_57_600.jpg +452 142 124 154 +# 57--Angler/57_Angler_peoplefishing_57_764.jpg +321 112 175 230 +# 57--Angler/57_Angler_peoplefishing_57_866.jpg +614 94 136 170 +250 150 126 166 +# 57--Angler/57_Angler_peoplefishing_57_442.jpg +452 104 58 94 +692 166 64 88 +# 57--Angler/57_Angler_peoplefishing_57_1012.jpg +222 320 70 68 +398 158 102 112 +632 214 112 128 +# 58--Hockey/58_Hockey_icehockey_puck_58_507.jpg +852 221 59 72 +724 120 61 70 +411 109 38 40 +462 64 36 40 +293 46 20 31 +343 54 24 32 +# 58--Hockey/58_Hockey_icehockey_puck_58_653.jpg +62 154 64 82 +252 182 58 74 +466 100 62 76 +686 150 56 74 +896 26 66 82 +# 58--Hockey/58_Hockey_icehockey_puck_58_895.jpg +634 264 390 456 +81 750 75 74 +373 823 34 30 +386 876 38 30 +192 797 52 67 +# 58--Hockey/58_Hockey_icehockey_puck_58_431.jpg +212 191 154 195 +233 305 34 41 +890 201 50 63 +# 58--Hockey/58_Hockey_icehockey_puck_58_17.jpg +68 93 63 70 +446 488 25 35 +165 499 45 46 +277 454 21 25 +542 449 20 25 +375 238 18 22 +309 360 25 17 +193 464 15 14 +856 63 53 70 +17 309 92 119 +# 58--Hockey/58_Hockey_icehockey_puck_58_493.jpg +447 394 93 60 +472 94 70 83 +317 179 74 86 +773 149 38 36 +905 129 38 42 +611 145 35 42 +655 57 32 43 +236 101 43 54 +250 43 34 45 +439 55 28 39 +752 64 25 35 +809 5 36 31 +719 4 30 27 +598 1 31 35 +112 134 38 40 +161 131 28 32 +345 0 36 25 +226 1 33 31 +110 16 28 40 +2 1 35 32 +1 110 15 54 +482 4 32 30 +905 97 28 25 +# 58--Hockey/58_Hockey_icehockey_puck_58_94.jpg +820 243 42 44 +689 171 41 45 +762 120 39 52 +600 100 34 57 +438 116 39 51 +569 162 44 38 +303 97 45 45 +1009 108 15 51 +977 112 31 46 +126 41 36 39 +47 12 34 51 +# 58--Hockey/58_Hockey_icehockey_puck_58_290.jpg +492 471 29 36 +331 347 23 31 +44 181 18 23 +100 116 21 26 +25 34 25 32 +4 119 20 36 +299 81 23 33 +60 26 26 27 +124 31 24 18 +356 70 25 34 +420 66 26 32 +491 64 22 32 +560 46 26 30 +599 41 22 27 +176 18 24 20 +661 38 22 29 +742 32 21 32 +363 3 23 24 +237 20 26 28 +808 22 21 24 +864 3 22 26 +915 2 21 21 +916 237 18 24 +208 87 26 36 +162 79 29 36 +153 23 19 21 +# 58--Hockey/58_Hockey_icehockey_puck_58_680.jpg +950 232 28 33 +837 208 36 42 +891 368 39 44 +774 348 37 38 +753 220 31 38 +694 245 32 30 +623 257 34 35 +671 357 35 38 +563 330 34 43 +563 250 33 35 +549 198 33 33 +688 194 13 13 +640 194 15 14 +734 193 13 20 +937 169 14 14 +471 328 36 37 +437 235 31 33 +371 323 34 42 +333 243 33 41 +352 230 26 33 +257 177 30 41 +224 201 39 42 +253 347 33 43 +287 330 30 45 +97 174 30 42 +927 80 12 15 +949 63 12 12 +1001 44 13 18 +981 25 13 17 +920 24 11 13 +924 8 11 14 +895 61 13 14 +888 135 14 17 +834 132 14 11 +804 150 13 19 +788 125 17 17 +790 52 11 13 +832 42 13 13 +836 28 13 13 +770 22 14 15 +729 65 13 14 +739 119 15 18 +704 120 14 17 +702 152 14 19 +735 137 15 19 +868 153 16 20 +973 143 14 14 +984 177 13 14 +954 128 12 15 +703 27 10 13 +672 60 15 17 +579 94 14 17 +625 59 14 14 +559 27 11 13 +503 26 11 11 +451 17 12 14 +418 38 12 14 +466 112 13 17 +512 115 15 15 +540 115 14 14 +543 148 14 16 +579 132 14 15 +625 138 13 15 +643 142 14 18 +469 64 14 16 +445 99 12 13 +423 113 15 16 +434 133 14 16 +483 156 16 14 +670 6 13 12 +376 133 16 17 +525 47 11 13 +517 2 12 11 +580 5 13 13 +599 2 11 8 +624 1 12 10 +362 27 11 11 +341 31 11 12 +306 21 12 12 +299 82 15 14 +370 106 13 14 +324 106 11 11 +335 142 13 18 +300 140 15 15 +271 135 11 12 +174 54 13 15 +229 80 13 17 +115 80 13 14 +21 104 13 14 +50 43 13 11 +246 133 15 18 +215 132 13 10 +106 43 13 13 +93 41 12 13 +# 58--Hockey/58_Hockey_icehockey_puck_58_475.jpg +570 236 32 52 +323 242 33 49 +# 58--Hockey/58_Hockey_icehockey_puck_58_244.jpg +801 483 32 36 +704 470 35 39 +671 440 28 27 +629 458 29 36 +587 466 27 33 +538 275 30 38 +459 252 27 39 +362 272 28 36 +406 420 30 34 +495 416 24 31 +533 407 25 30 +455 506 26 34 +517 508 29 31 +449 456 28 30 +402 470 29 34 +235 480 35 43 +# 58--Hockey/58_Hockey_icehockey_puck_58_785.jpg +794 315 30 33 +918 315 34 42 +731 344 30 33 +685 341 30 33 +657 304 30 39 +625 363 29 36 +607 332 30 28 +524 344 31 32 +483 344 29 35 +447 352 35 44 +424 296 31 41 +376 338 33 48 +330 337 31 36 +284 369 33 36 +281 309 33 38 +246 368 31 31 +130 329 37 44 +# 58--Hockey/58_Hockey_icehockey_puck_58_592.jpg +965 397 31 31 +715 331 13 16 +691 358 14 17 +543 354 28 27 +181 330 15 19 +# 58--Hockey/58_Hockey_icehockey_puck_58_880.jpg +921 279 23 30 +642 281 19 22 +394 309 23 28 +339 326 15 19 +# 58--Hockey/58_Hockey_icehockey_puck_58_50.jpg +448 84 60 80 +714 194 62 72 +# 58--Hockey/58_Hockey_icehockey_puck_58_365.jpg +485 212 61 68 +807 20 53 83 +109 83 61 111 +# 58--Hockey/58_Hockey_icehockey_puck_58_262.jpg +777 488 17 16 +567 215 8 12 +552 228 10 15 +538 249 12 16 +470 257 11 12 +469 188 10 14 +430 206 11 12 +385 240 11 16 +350 250 12 15 +323 237 11 15 +348 275 11 13 +206 238 10 15 +270 322 13 15 +228 308 11 12 +204 333 13 17 +323 332 13 15 +327 352 9 17 +323 410 11 17 +51 329 12 18 +776 87 9 11 +800 83 8 13 +747 74 9 11 +719 84 7 8 +721 46 10 11 +764 48 9 11 +836 75 7 14 +788 49 11 11 +695 54 9 10 +865 95 10 10 +858 32 9 8 +835 8 10 9 +755 25 9 10 +895 91 10 11 +911 37 9 10 +966 94 10 11 +186 53 6 10 +365 36 11 13 +26 48 7 16 +266 60 8 6 +240 45 8 11 +33 20 11 9 +444 76 12 11 +439 8 13 13 +495 51 9 10 +528 31 9 12 +556 47 12 12 +604 70 11 14 +632 72 8 9 +624 38 11 10 +607 19 10 11 +304 18 11 11 +339 6 10 14 +947 1 8 9 +980 6 10 10 +1018 14 6 10 +881 74 9 11 +333 40 7 9 +662 68 10 11 +675 41 10 11 +649 42 5 7 +51 51 8 10 +217 35 7 12 +5 45 10 14 +# 58--Hockey/58_Hockey_icehockey_puck_58_285.jpg +960 275 45 40 +784 83 14 17 +568 169 34 49 +567 93 21 26 +431 135 28 33 +288 153 33 33 +318 94 8 9 +41 135 22 19 +306 141 25 34 +# 58--Hockey/58_Hockey_icehockey_puck_58_835.jpg +853 405 34 49 +700 410 37 38 +558 399 38 56 +439 411 38 51 +308 414 37 43 +163 400 38 47 +873 297 35 51 +769 256 31 43 +763 213 28 39 +677 249 30 42 +598 248 34 37 +513 238 32 42 +442 240 36 39 +355 235 34 49 +245 241 33 44 +693 200 27 35 +626 197 27 33 +551 185 31 33 +449 182 30 32 +354 180 33 36 +289 175 29 41 +214 181 31 37 +147 245 33 42 +50 304 39 50 +# 58--Hockey/58_Hockey_icehockey_puck_58_182.jpg +610 157 48 60 +379 168 49 64 +626 98 26 33 +819 120 34 42 +# 58--Hockey/58_Hockey_icehockey_puck_58_113.jpg +516 57 48 50 +309 126 51 58 +81 42 30 29 +362 44 23 26 +394 46 20 25 +433 47 14 23 +273 39 25 25 +224 44 20 17 +186 44 18 16 +6 27 16 20 +491 57 18 20 +309 48 21 21 +660 57 22 22 +842 5 24 22 +955 10 25 25 +994 98 29 40 +125 45 15 16 +445 47 14 20 +247 20 10 14 +# 58--Hockey/58_Hockey_icehockey_puck_58_184.jpg +560 394 54 94 +# 58--Hockey/58_Hockey_icehockey_puck_58_692.jpg +566 91 63 72 +443 67 62 78 +257 47 44 72 +135 34 48 59 +377 32 49 62 +# 58--Hockey/58_Hockey_icehockey_puck_58_940.jpg +824 267 31 27 +695 293 32 32 +422 318 33 39 +252 223 11 13 +# 58--Hockey/58_Hockey_icehockey_puck_58_455.jpg +433 108 125 187 +# 58--Hockey/58_Hockey_icehockey_puck_58_248.jpg +495 191 36 39 +564 67 21 29 +396 240 43 58 +85 48 16 19 +689 20 15 15 +824 18 15 16 +# 58--Hockey/58_Hockey_icehockey_puck_58_655.jpg +664 84 56 94 +858 80 72 92 +302 212 60 82 +104 170 80 84 +326 112 58 82 +# 58--Hockey/58_Hockey_icehockey_puck_58_469.jpg +551 220 32 35 +618 115 21 21 +553 143 15 23 +573 110 17 18 +478 93 21 19 +399 90 18 22 +365 242 40 40 +335 115 18 20 +332 76 16 21 +358 64 17 17 +413 23 19 26 +463 16 16 18 +304 57 16 21 +256 48 18 20 +216 61 21 21 +164 85 17 21 +100 88 18 21 +68 96 20 21 +23 87 21 24 +10 43 18 27 +88 3 17 21 +135 14 14 19 +160 31 12 16 +648 115 15 22 +739 138 14 17 +783 156 16 23 +723 25 15 20 +683 25 15 16 +501 23 12 18 +526 166 19 21 +887 69 14 17 +871 120 14 14 +903 169 14 16 +931 159 12 13 +838 162 13 18 +830 119 15 16 +951 79 11 14 +# 58--Hockey/58_Hockey_icehockey_puck_58_221.jpg +456 527 64 82 +480 458 44 60 +544 490 34 45 +652 464 51 60 +826 477 33 51 +896 476 27 56 +821 153 37 46 +731 161 39 54 +576 41 43 47 +78 589 53 57 +201 475 54 65 +110 455 50 66 +8 455 49 65 +97 369 52 57 +132 329 50 57 +254 339 44 55 +366 292 39 58 +488 297 41 48 +196 206 47 53 +50 204 38 42 +1 194 29 58 +123 91 30 50 +48 77 39 49 +7 81 25 43 +1 694 52 73 +287 583 40 48 +# 58--Hockey/58_Hockey_icehockey_puck_58_384.jpg +348 575 32 32 +420 495 32 34 +531 442 32 33 +508 336 24 26 +555 305 27 29 +11 514 27 24 +230 212 20 24 +545 259 23 22 +607 227 26 30 +603 194 21 30 +656 184 25 23 +738 127 27 28 +774 66 20 23 +831 58 17 24 +841 26 24 25 +853 4 22 18 +# 58--Hockey/58_Hockey_icehockey_puck_58_245.jpg +883 47 29 37 +811 54 29 37 +733 63 28 39 +676 55 31 40 +610 73 29 35 +840 200 29 37 +900 168 31 41 +613 196 29 38 +380 104 29 38 +408 81 33 42 +474 117 30 36 +528 100 29 39 +280 80 29 37 +198 122 31 34 +254 204 29 40 +351 207 31 38 +436 192 33 43 +200 228 31 40 +153 77 30 36 +102 96 33 38 +99 193 33 34 +677 290 31 40 +611 366 29 40 +# 58--Hockey/58_Hockey_icehockey_puck_58_330.jpg +284 86 68 88 +598 48 62 88 +# 58--Hockey/58_Hockey_icehockey_puck_58_403.jpg +298 84 54 90 +564 212 78 80 +# 58--Hockey/58_Hockey_icehockey_puck_58_753.jpg +878 396 57 72 +171 109 11 14 +183 549 44 75 +894 192 15 24 +1003 159 18 26 +# 58--Hockey/58_Hockey_icehockey_puck_58_926.jpg +937 277 51 68 +882 54 47 65 +567 106 58 68 +461 209 39 56 +610 383 50 61 +402 321 46 60 +221 221 52 68 +351 15 52 68 +# 58--Hockey/58_Hockey_icehockey_puck_58_715.jpg +630 174 144 180 +266 88 158 200 +# 58--Hockey/58_Hockey_icehockey_puck_58_118.jpg +180 80 60 74 +594 130 68 74 +# 58--Hockey/58_Hockey_icehockey_puck_58_212.jpg +595 79 56 63 +512 83 26 24 +499 42 20 24 +257 118 21 35 +506 134 21 21 +371 50 17 21 +430 85 24 23 +431 126 26 21 +379 75 19 26 +287 34 25 29 +402 7 17 24 +357 140 23 24 +180 156 24 34 +141 161 24 32 +59 169 29 33 +220 73 23 28 +238 28 21 28 +138 69 26 35 +62 89 20 25 +37 37 23 24 +129 41 22 19 +432 7 18 24 +650 50 19 23 +201 34 21 27 +316 35 21 25 +455 153 23 18 +27 2 21 14 +942 157 19 26 +992 161 20 23 +853 161 19 24 +775 51 17 24 +928 91 16 17 +970 114 20 27 +957 90 19 25 +964 55 20 24 +829 91 19 20 +914 121 18 22 +875 18 21 24 +865 58 21 19 +942 15 10 23 +989 19 14 21 +794 52 18 19 +418 58 19 21 +789 97 16 17 +827 64 13 17 +806 172 21 23 +# 58--Hockey/58_Hockey_icehockey_puck_58_553.jpg +913 235 25 25 +857 184 11 18 +567 174 11 13 +480 218 17 20 +142 182 16 21 +# 58--Hockey/58_Hockey_icehockey_puck_58_671.jpg +618 320 44 68 +570 253 30 35 +395 259 17 29 +309 240 22 33 +269 264 29 44 +# 58--Hockey/58_Hockey_icehockey_puck_58_404.jpg +485 224 52 60 +506 63 58 69 +403 374 51 41 +# 58--Hockey/58_Hockey_icehockey_puck_58_531.jpg +292 252 513 692 +# 58--Hockey/58_Hockey_icehockey_puck_58_467.jpg +770 141 38 46 +685 109 20 25 +# 58--Hockey/58_Hockey_icehockey_puck_58_697.jpg +412 110 130 168 +# 58--Hockey/58_Hockey_icehockey_puck_58_825.jpg +840 393 45 51 +791 218 33 41 +820 118 29 38 +769 102 27 34 +715 121 28 39 +684 221 31 39 +638 105 26 32 +597 115 28 35 +546 100 28 34 +573 231 32 41 +477 215 29 42 +498 126 28 33 +453 89 27 28 +341 211 34 46 +390 118 26 29 +348 106 24 33 +295 117 29 37 +250 110 27 34 +213 116 28 38 +210 228 34 47 +107 110 29 37 +90 384 51 51 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_592.jpg +388 65 38 37 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_856.jpg +561 249 13 17 +187 256 17 19 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_978.jpg +294 54 456 642 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_201.jpg +648 184 118 202 +806 138 172 258 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_401.jpg +220 270 158 202 +412 262 92 118 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_906.jpg +272 386 272 365 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_789.jpg +278 242 234 310 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_244.jpg +374 119 243 352 +334 954 61 79 +647 969 64 79 +553 939 61 100 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_200.jpg +428 64 70 102 +146 256 82 104 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_27.jpg +594 176 192 250 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_1014.jpg +50 34 56 48 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_95.jpg +586 146 140 204 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_229.jpg +644 162 192 252 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_763.jpg +443 218 50 56 +747 412 70 98 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_34.jpg +242 50 210 280 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_1020.jpg +368 6 118 138 +506 146 122 226 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_404.jpg +865 182 49 65 +794 116 16 18 +741 82 14 17 +696 85 16 17 +660 104 10 11 +200 395 89 84 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_690.jpg +297 248 40 70 +743 285 45 74 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_1019.jpg +92 550 21 26 +121 559 18 23 +961 515 25 34 +300 500 20 35 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_85.jpg +406 170 170 236 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_117.jpg +204 118 154 242 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_202.jpg +692 152 182 236 +430 178 116 172 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_1038.jpg +211 36 627 960 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_357.jpg +416 66 86 134 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_659.jpg +788 328 23 30 +510 245 19 27 +299 275 21 28 +257 243 23 33 +188 254 24 30 +595 256 15 34 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_725.jpg +694 56 180 336 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_704.jpg +442 175 59 90 +478 40 43 59 +331 90 43 51 +163 80 56 76 +98 203 66 75 +626 17 52 72 +767 94 49 64 +860 133 70 90 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_172.jpg +651 219 15 17 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_283.jpg +202 82 156 212 +482 160 70 140 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_64.jpg +530 138 164 206 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_532.jpg +67 94 23 31 +2 76 22 30 +98 54 21 26 +131 51 24 29 +173 92 21 30 +235 51 21 32 +297 77 12 21 +208 109 20 20 +381 96 15 20 +502 88 14 19 +512 111 17 18 +537 218 30 40 +732 71 29 29 +# 59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_928.jpg +457 171 105 151 +# 6--Funeral/6_Funeral_Funeral_6_109.jpg +121 214 31 40 +195 215 30 36 +247 221 28 34 +269 224 21 32 +301 200 22 26 +302 227 26 31 +353 226 27 31 +367 206 22 29 +411 205 21 24 +422 238 24 29 +458 223 22 31 +488 217 17 22 +491 233 13 19 +501 231 14 19 +488 252 18 16 +520 231 17 25 +547 220 20 23 +569 214 15 21 +389 310 26 38 +452 299 25 29 +587 224 22 30 +562 237 14 15 +616 220 19 24 +645 219 21 24 +593 299 24 31 +572 322 21 27 +697 248 18 21 +715 252 17 20 +739 253 19 20 +777 230 19 20 +800 254 20 21 +831 240 20 18 +715 295 25 27 +659 296 21 26 +880 233 19 23 +914 221 21 23 +# 6--Funeral/6_Funeral_Funeral_6_987.jpg +655 332 101 82 +344 173 113 146 +220 46 78 92 +345 52 47 62 +1 88 55 70 +298 56 42 58 +# 6--Funeral/6_Funeral_Funeral_6_364.jpg +171 138 38 39 +470 125 31 42 +556 84 38 54 +631 139 25 27 +0 88 15 54 +# 6--Funeral/6_Funeral_Funeral_6_909.jpg +413 247 87 102 +804 259 93 111 +# 6--Funeral/6_Funeral_Funeral_6_618.jpg +530 1181 94 123 +313 1300 63 70 +543 2046 234 190 +259 2478 80 83 +79 2466 78 95 +640 3345 128 146 +419 3543 127 92 +920 4315 104 126 +732 478 121 151 +384 537 28 44 +354 489 45 61 +275 500 51 48 +206 491 43 45 +164 522 43 56 +73 499 40 58 +# 6--Funeral/6_Funeral_Funeral_6_483.jpg +237 255 416 599 +# 6--Funeral/6_Funeral_Funeral_6_745.jpg +416 161 122 177 +101 409 113 133 +540 480 99 133 +790 533 106 124 +544 207 99 156 +767 381 90 117 +131 271 87 131 +# 6--Funeral/6_Funeral_Funeral_6_160.jpg +0 130 46 142 +34 75 57 101 +19 223 62 106 +90 192 43 60 +150 217 65 91 +139 179 38 47 +199 179 62 88 +261 208 44 66 +285 185 37 71 +360 221 30 44 +358 304 46 60 +398 263 12 17 +613 375 38 53 +720 158 38 57 +721 189 32 65 +751 168 44 77 +783 205 47 80 +871 249 65 90 +864 208 53 71 +832 125 48 69 +896 96 61 79 +997 55 27 139 +542 294 13 20 +531 294 14 20 +# 6--Funeral/6_Funeral_Funeral_6_870.jpg +55 150 69 78 +234 145 104 159 +515 240 107 139 +833 87 78 107 +# 6--Funeral/6_Funeral_Funeral_6_280.jpg +380 48 348 174 +# 6--Funeral/6_Funeral_Funeral_6_432.jpg +327 150 115 150 +824 203 112 153 +712 147 94 124 +# 6--Funeral/6_Funeral_Funeral_6_211.jpg +18 218 35 53 +129 255 27 39 +216 187 50 68 +375 112 54 81 +493 257 27 58 +543 102 54 83 +739 45 70 96 +889 200 31 48 +# 6--Funeral/6_Funeral_Funeral_6_444.jpg +398 230 187 230 +# 6--Funeral/6_Funeral_Funeral_6_252.jpg +336 189 124 141 +164 243 55 68 +23 273 63 73 +51 200 53 68 +38 129 38 55 +0 125 36 65 +97 155 44 61 +170 144 47 60 +214 189 45 53 +239 244 47 56 +644 127 140 163 +538 177 46 53 +581 187 45 64 +730 292 61 81 +805 214 62 77 +875 199 58 71 +917 252 58 64 +979 282 45 72 +978 202 46 72 +739 219 53 72 +# 6--Funeral/6_Funeral_Funeral_6_572.jpg +795 293 32 37 +736 317 34 41 +653 340 34 38 +611 303 34 38 +552 298 32 33 +545 353 32 35 +499 342 32 33 +417 344 29 33 +339 324 28 34 +353 371 30 30 +285 346 29 33 +229 357 30 32 +162 367 30 31 +# 6--Funeral/6_Funeral_Funeral_6_292.jpg +142 160 60 89 +360 172 61 90 +177 65 64 84 +365 72 52 79 +534 40 55 73 +747 18 53 72 +550 172 63 91 +798 163 66 89 +# 6--Funeral/6_Funeral_Funeral_6_696.jpg +457 173 63 80 +560 216 53 77 +676 121 64 81 +818 149 46 67 +897 135 44 53 +970 170 52 64 +562 71 64 82 +409 92 61 51 +329 177 27 33 +363 179 21 23 +266 153 27 30 +291 163 18 24 +221 174 20 26 +221 157 16 19 +159 196 34 38 +116 184 30 36 +95 185 27 32 +59 197 28 29 +22 204 25 27 +4 176 29 32 +91 158 20 26 +23 164 19 25 +60 175 22 25 +549 152 22 32 +807 162 16 25 +961 160 23 30 +386 145 30 40 +347 161 16 18 +# 6--Funeral/6_Funeral_Funeral_6_140.jpg +937 77 20 29 +881 47 21 29 +826 43 21 29 +773 15 14 18 +501 219 57 62 +892 251 11 14 +382 160 8 8 +192 1 125 137 +83 97 57 77 +188 644 70 33 +303 24 16 15 +# 6--Funeral/6_Funeral_Funeral_6_937.jpg +295 142 315 424 +# 6--Funeral/6_Funeral_Funeral_6_77.jpg +856 362 10 16 +918 358 9 12 +943 378 7 12 +953 381 6 12 +980 394 5 9 +904 403 6 7 +894 406 6 6 +826 373 9 10 +815 374 9 10 +798 377 11 13 +765 357 12 13 +793 376 8 11 +837 376 6 8 +849 359 9 13 +985 433 6 11 +992 432 5 13 +909 439 9 14 +892 444 8 11 +902 432 8 12 +937 445 9 12 +914 464 8 19 +926 499 14 23 +902 516 21 14 +925 537 13 20 +944 506 15 30 +973 547 16 19 +1005 568 17 31 +993 580 21 22 +740 380 10 10 +669 366 10 13 +604 354 20 22 +584 363 21 23 +550 361 17 20 +444 355 14 19 +427 365 15 22 +390 364 14 14 +404 354 12 12 +362 351 17 22 +334 342 23 21 +493 355 17 23 +299 373 17 21 +280 376 18 21 +219 368 15 19 +248 379 21 26 +198 370 16 20 +179 369 18 21 +111 369 18 22 +85 355 18 22 +34 375 20 28 +9 375 18 19 +976 520 14 31 +# 6--Funeral/6_Funeral_Funeral_6_759.jpg +899 287 37 49 +743 345 34 46 +849 273 30 37 +959 214 35 42 +950 145 33 43 +867 122 31 41 +777 213 32 40 +653 185 34 47 +498 256 40 50 +543 163 43 48 +442 161 34 43 +317 280 40 41 +334 181 32 36 +382 118 30 37 +267 164 37 43 +201 163 29 34 +179 153 25 35 +105 274 42 49 +70 170 33 42 +50 145 24 34 +16 94 31 40 +129 93 30 39 +94 73 26 36 +171 68 26 40 +203 89 29 36 +263 112 28 42 +326 93 31 35 +363 55 34 40 +461 95 30 36 +433 71 27 40 +383 15 28 33 +365 12 26 30 +313 2 32 30 +195 1 27 36 +123 12 32 39 +256 91 23 32 +655 135 35 40 +718 138 31 37 +727 67 31 36 +691 51 28 39 +602 81 33 37 +634 22 26 38 +682 12 33 39 +564 11 34 41 +495 31 32 38 +451 5 27 29 +798 100 27 35 +802 31 27 34 +916 52 24 32 +957 65 27 30 +966 100 25 35 +918 0 31 15 +893 0 21 24 +785 15 26 34 +234 66 32 42 +62 311 27 38 +973 332 33 41 +272 69 25 36 +904 46 23 42 +771 13 25 39 +# 6--Funeral/6_Funeral_Funeral_6_177.jpg +229 233 23 42 +266 178 26 60 +357 190 11 12 +410 176 12 14 +416 251 35 56 +400 195 12 19 +501 188 14 19 +514 191 14 17 +549 206 33 65 +492 255 32 42 +540 251 25 44 +446 257 27 43 +624 187 29 39 +689 169 27 46 +686 180 15 27 +727 168 13 22 +740 184 10 11 +781 154 37 46 +827 161 20 22 +846 185 15 21 +875 152 33 41 +897 187 34 41 +912 154 27 41 +961 160 25 31 +1013 161 11 36 +387 238 19 27 +378 267 13 17 +# 6--Funeral/6_Funeral_Funeral_6_760.jpg +208 133 103 119 +630 158 23 25 +758 165 15 25 +899 156 20 23 +924 163 14 17 +962 161 16 18 +666 361 28 31 +653 357 17 19 +602 356 10 11 +577 357 9 10 +673 427 19 22 +413 322 25 31 +# 6--Funeral/6_Funeral_Funeral_6_690.jpg +397 178 173 249 +# 6--Funeral/6_Funeral_Funeral_6_128.jpg +860 72 48 53 +794 78 34 42 +789 77 25 32 +750 125 24 31 +452 88 41 43 +485 84 25 46 +533 83 27 35 +635 100 32 41 +113 105 38 46 +172 70 37 50 +215 58 31 42 +287 107 34 38 +# 6--Funeral/6_Funeral_Funeral_6_676.jpg +616 258 51 76 +497 250 36 49 +558 144 53 69 +318 273 42 29 +732 37 28 40 +853 45 34 42 +906 237 47 59 +662 182 42 50 +645 57 32 51 +254 390 57 44 +# 6--Funeral/6_Funeral_Funeral_6_941.jpg +384 164 230 340 +# 6--Funeral/6_Funeral_Funeral_6_610.jpg +360 386 124 96 +368 292 94 72 +# 6--Funeral/6_Funeral_Funeral_6_1029.jpg +272 123 110 138 +418 206 37 59 +145 164 39 42 +80 146 29 44 +41 176 38 50 +0 201 30 35 +592 114 109 129 +837 103 55 67 +981 100 43 78 +363 169 34 62 +232 208 30 54 +# 6--Funeral/6_Funeral_Funeral_6_485.jpg +460 176 92 140 +910 136 70 104 +# 6--Funeral/6_Funeral_Funeral_6_1005.jpg +497 184 99 133 +# 6--Funeral/6_Funeral_Funeral_6_779.jpg +307 151 391 482 +# 6--Funeral/6_Funeral_Funeral_6_733.jpg +826 138 146 159 +657 148 83 102 +136 200 79 91 +333 302 17 18 +300 305 13 16 +260 311 10 13 +213 292 29 35 +602 279 12 18 +25 281 27 38 +391 308 6 11 +# 6--Funeral/6_Funeral_Funeral_6_315.jpg +367 164 121 182 +# 6--Funeral/6_Funeral_Funeral_6_790.jpg +296 73 384 533 +# 6--Funeral/6_Funeral_Funeral_6_461.jpg +477 173 197 259 +# 6--Funeral/6_Funeral_Funeral_6_627.jpg +140 176 150 218 +366 150 162 224 +762 98 148 222 +# 6--Funeral/6_Funeral_Funeral_6_241.jpg +465 204 104 135 +845 402 72 88 +# 6--Funeral/6_Funeral_Funeral_6_1006.jpg +644 38 62 94 +440 180 74 84 +32 4 62 66 +# 6--Funeral/6_Funeral_Funeral_6_861.jpg +382 273 261 379 +# 6--Funeral/6_Funeral_Funeral_6_531.jpg +44 339 21 23 +80 333 22 23 +134 324 18 23 +162 320 17 18 +228 328 13 14 +198 317 11 14 +247 307 12 17 +261 291 12 17 +272 285 9 14 +324 278 10 14 +339 280 8 13 +353 279 8 13 +177 321 11 17 +383 269 10 12 +412 266 9 13 +435 252 8 11 +447 249 8 10 +524 236 9 10 +479 242 8 11 +538 234 9 11 +404 263 8 9 +420 261 9 11 +898 212 11 13 +1002 181 14 17 +488 250 8 10 +# 6--Funeral/6_Funeral_Funeral_6_537.jpg +134 326 258 309 +597 484 243 404 +# 61--Street_Battle/61_Street_Battle_streetfight_61_276.jpg +197 63 26 34 +255 48 25 33 +313 25 27 35 +345 62 24 29 +390 36 28 30 +440 52 25 30 +467 43 21 24 +488 49 18 22 +524 37 25 33 +583 35 23 29 +635 39 26 31 +667 40 25 31 +779 44 25 34 +810 22 23 31 +938 30 23 31 +963 34 19 26 +65 125 27 32 +8 50 25 31 +51 78 24 29 +125 46 23 26 +153 59 24 33 +174 45 23 29 +# 61--Street_Battle/61_Street_Battle_streetfight_61_179.jpg +304 80 132 274 +466 60 166 236 +# 61--Street_Battle/61_Street_Battle_streetfight_61_907.jpg +252 278 154 216 +533 172 145 195 +# 61--Street_Battle/61_Street_Battle_streetfight_61_606.jpg +200 70 68 110 +344 118 72 98 +496 74 74 108 +640 120 84 102 +772 82 78 108 +# 61--Street_Battle/61_Street_Battle_streetfight_61_123.jpg +766 114 126 208 +192 124 122 156 +# 61--Street_Battle/61_Street_Battle_streetfight_61_640.jpg +678 102 140 194 +# 61--Street_Battle/61_Street_Battle_streetfight_61_936.jpg +496 218 38 54 +671 325 30 40 +706 327 32 37 +124 265 41 50 +349 333 41 39 +832 380 34 42 +963 259 41 50 +# 61--Street_Battle/61_Street_Battle_streetfight_61_767.jpg +286 106 84 166 +582 114 94 154 +# 61--Street_Battle/61_Street_Battle_streetfight_61_282.jpg +57 127 130 212 +246 105 164 218 +433 88 153 221 +628 107 158 226 +798 88 161 229 +266 784 187 294 +600 738 206 289 +# 61--Street_Battle/61_Street_Battle_streetfight_61_50.jpg +451 152 42 54 +# 61--Street_Battle/61_Street_Battle_streetfight_61_521.jpg +142 244 82 78 +# 61--Street_Battle/61_Street_Battle_streetfight_61_22.jpg +174 44 126 176 +# 61--Street_Battle/61_Street_Battle_streetfight_61_162.jpg +39 440 58 40 +533 253 60 83 +631 269 40 46 +776 239 33 59 +819 127 93 129 +307 189 7 8 +243 410 9 10 +309 331 5 7 +314 330 4 6 +227 110 11 13 +# 61--Street_Battle/61_Street_Battle_streetfight_61_432.jpg +101 154 26 35 +177 148 27 36 +264 175 17 20 +271 212 26 34 +212 211 28 39 +147 230 38 53 +144 296 32 48 +320 150 16 23 +417 158 26 48 +498 294 28 38 +520 206 22 28 +546 236 25 28 +654 202 13 18 +634 215 16 20 +588 217 16 26 +623 270 22 30 +647 320 23 35 +734 230 8 16 +763 246 13 21 +740 322 24 36 +420 530 29 43 +499 502 31 41 +639 515 31 45 +797 262 12 16 +824 269 10 15 +846 273 16 22 +834 287 15 20 +794 286 17 27 +848 323 22 30 +886 322 23 30 +859 271 10 15 +881 287 9 11 +901 283 15 21 +805 415 28 41 +850 399 23 27 +918 396 25 31 +934 269 10 15 +966 374 27 34 +712 397 30 40 +818 288 13 18 +# 61--Street_Battle/61_Street_Battle_streetfight_61_211.jpg +128 546 39 50 +36 510 41 52 +174 532 36 43 +272 523 33 45 +327 543 28 37 +882 618 36 51 +907 564 35 41 +842 172 79 145 +988 579 35 45 +977 625 37 47 +224 531 29 36 +196 533 26 41 +949 580 23 33 +970 560 20 26 +960 532 16 24 +254 939 117 92 +# 61--Street_Battle/61_Street_Battle_streetfight_61_344.jpg +533 257 198 275 +# 61--Street_Battle/61_Street_Battle_streetfight_61_375.jpg +366 168 306 420 +# 61--Street_Battle/61_Street_Battle_streetfight_61_155.jpg +317 256 40 59 +709 284 49 48 +# 61--Street_Battle/61_Street_Battle_streetfight_61_395.jpg +496 191 67 89 +273 472 35 50 +500 801 67 85 +719 591 41 36 +# 61--Street_Battle/61_Street_Battle_streetfight_61_703.jpg +406 134 192 296 +# 61--Street_Battle/61_Street_Battle_streetfight_61_815.jpg +332 228 72 66 +400 318 120 100 +796 270 62 62 +# 61--Street_Battle/61_Street_Battle_streetfight_61_212.jpg +72 222 137 145 +678 21 98 135 +46 451 42 53 +142 407 31 48 +366 213 85 157 +# 61--Street_Battle/61_Street_Battle_streetfight_61_913.jpg +207 138 12 15 +175 129 11 16 +143 199 15 16 +107 249 14 13 +163 257 12 16 +127 307 40 52 +233 313 27 39 +143 460 11 16 +160 473 9 10 +177 480 8 9 +190 478 8 10 +227 478 10 9 +269 462 9 11 +245 472 10 10 +875 131 127 139 +966 592 21 23 +927 584 14 15 +869 565 25 24 +200 198 10 13 +# 61--Street_Battle/61_Street_Battle_streetfight_61_12.jpg +7 145 81 95 +91 223 38 49 +156 245 31 35 +222 258 33 39 +408 201 71 92 +263 280 25 29 +605 157 66 90 +738 109 67 105 +497 217 32 37 +# 61--Street_Battle/61_Street_Battle_streetfight_61_407.jpg +358 130 158 244 +# 61--Street_Battle/61_Street_Battle_streetfight_61_430.jpg +16 449 15 17 +37 453 17 21 +53 452 13 21 +65 445 15 18 +93 463 14 18 +138 449 18 22 +163 458 17 20 +136 416 18 15 +139 378 12 15 +199 460 13 20 +234 461 18 27 +217 451 12 18 +272 460 11 14 +274 534 10 13 +290 451 16 25 +311 465 8 15 +372 460 13 32 +380 481 10 15 +386 460 10 17 +428 475 18 24 +453 449 18 24 +476 452 14 18 +519 459 21 21 +551 468 16 20 +596 443 11 17 +625 462 18 26 +570 467 23 40 +691 454 17 20 +709 454 16 18 +639 453 14 27 +739 462 17 17 +756 446 18 20 +800 458 10 16 +841 458 18 20 +862 452 13 17 +862 485 19 23 +897 487 15 22 +911 390 16 17 +906 428 17 24 +921 429 20 28 +952 434 21 27 +977 450 14 15 +1011 451 12 17 +917 580 23 29 +747 25 18 18 +# 61--Street_Battle/61_Street_Battle_streetfight_61_4.jpg +386 53 57 75 +814 0 41 37 +676 190 56 41 +854 553 44 57 +793 0 35 28 +# 61--Street_Battle/61_Street_Battle_streetfight_61_558.jpg +167 258 68 72 +143 367 210 262 +705 537 31 42 +614 809 25 24 +# 61--Street_Battle/61_Street_Battle_streetfight_61_158.jpg +614 184 91 95 +749 164 26 31 +873 187 31 63 +351 382 59 42 +395 431 81 65 +306 280 36 26 +# 61--Street_Battle/61_Street_Battle_streetfight_61_665.jpg +690 142 43 55 +631 170 38 50 +518 164 36 47 +200 155 38 52 +271 151 35 45 +358 162 34 43 +412 146 34 46 +# 61--Street_Battle/61_Street_Battle_streetfight_61_350.jpg +706 118 96 128 +704 378 104 60 +# 61--Street_Battle/61_Street_Battle_streetfight_61_546.jpg +99 110 14 28 +56 135 21 26 +427 151 24 35 +529 153 19 27 +# 7--Cheering/7_Cheering_Cheering_7_134.jpg +6 34 68 144 +162 72 78 118 +486 146 60 80 +248 222 90 148 +472 236 86 124 +724 256 84 112 +# 7--Cheering/7_Cheering_Cheering_7_655.jpg +393 760 225 372 +# 7--Cheering/7_Cheering_Cheering_7_313.jpg +5 90 21 31 +29 109 32 52 +161 121 50 66 +232 142 41 57 +343 154 39 59 +468 239 34 69 +652 148 35 54 +746 131 40 69 +951 181 39 67 +972 140 34 71 +842 0 21 16 +# 7--Cheering/7_Cheering_Cheering_7_542.jpg +250 280 216 330 +546 122 256 340 +# 7--Cheering/7_Cheering_Cheering_7_404.jpg +305 360 38 37 +474 210 30 39 +584 335 39 53 +720 462 23 32 +131 469 22 27 +523 612 30 33 +395 615 29 26 +470 612 21 33 +# 7--Cheering/7_Cheering_Cheering_7_413.jpg +13 344 18 23 +2 341 13 18 +36 332 22 30 +67 330 23 28 +43 379 36 41 +166 336 24 30 +153 382 34 53 +202 331 12 18 +249 333 11 21 +268 340 31 40 +322 348 19 27 +313 339 13 23 +350 284 13 20 +345 335 15 21 +382 339 17 19 +339 419 40 56 +428 360 31 43 +494 393 32 41 +480 433 67 84 +544 345 29 43 +616 369 40 51 +1004 374 20 44 +924 374 61 77 +840 373 41 49 +733 359 31 41 +746 382 48 57 +727 448 87 112 +# 7--Cheering/7_Cheering_Cheering_7_334.jpg +719 54 10 14 +749 47 13 13 +839 5 12 17 +1014 145 9 19 +886 163 9 13 +803 143 44 59 +679 170 46 57 +167 135 46 60 +298 196 46 63 +388 163 21 32 +463 121 42 60 +569 177 43 56 +613 257 43 60 +498 241 43 64 +378 238 46 65 +191 277 51 66 +293 302 42 61 +398 350 46 60 +509 339 47 61 +644 371 43 66 +708 284 41 65 +780 281 41 63 +674 74 10 13 +360 18 8 9 +437 15 12 11 +392 11 11 13 +# 7--Cheering/7_Cheering_Cheering_7_29.jpg +45 94 11 13 +57 83 12 11 +71 87 12 15 +38 169 12 16 +110 132 14 13 +79 153 10 17 +53 132 14 14 +163 144 14 17 +157 112 13 16 +137 112 16 15 +111 97 12 18 +130 83 16 19 +158 98 11 13 +172 107 16 20 +193 120 13 17 +223 142 11 14 +218 120 15 15 +206 92 13 19 +173 78 13 17 +186 75 5 5 +243 45 14 14 +247 102 14 20 +239 97 12 14 +269 105 16 19 +266 96 14 15 +278 86 12 13 +284 98 11 20 +309 108 14 17 +308 86 13 16 +338 168 14 18 +343 131 10 13 +76 76 11 9 +328 104 13 16 +332 79 12 14 +358 103 12 17 +353 92 13 13 +365 83 13 11 +380 113 13 19 +380 94 10 14 +392 90 10 14 +436 125 14 20 +443 169 16 19 +410 201 14 18 +389 202 13 13 +460 98 13 17 +490 204 15 16 +492 186 13 16 +509 173 11 17 +532 138 14 14 +522 107 16 19 +500 75 13 15 +481 93 11 16 +499 108 13 16 +555 87 12 14 +560 101 11 18 +559 158 11 15 +559 182 14 15 +569 217 16 21 +590 94 15 19 +583 105 10 11 +577 108 8 11 +583 119 12 15 +591 123 13 20 +549 107 8 10 +606 81 13 16 +610 99 11 15 +625 115 14 17 +630 139 14 21 +617 175 15 22 +680 175 15 19 +683 141 16 20 +671 131 14 18 +677 117 14 14 +662 54 14 18 +720 118 16 19 +727 164 13 19 +762 126 13 14 +738 104 15 20 +777 103 17 22 +757 103 16 15 +784 89 14 17 +803 97 15 17 +817 96 15 18 +836 124 18 14 +790 212 17 19 +824 232 15 20 +607 208 14 20 +609 240 12 17 +841 96 15 15 +860 95 14 19 +886 112 14 20 +884 84 13 14 +906 93 14 15 +902 81 14 13 +920 95 15 14 +909 125 16 20 +936 115 15 18 +947 92 11 16 +957 92 10 15 +966 87 9 21 +974 89 12 20 +988 98 9 14 +1004 83 12 20 +984 122 9 14 +997 187 17 16 +976 194 14 16 +992 225 13 16 +890 215 15 19 +924 220 20 18 +895 204 16 16 +998 102 12 15 +0 199 4 21 +97 215 14 16 +33 261 16 19 +68 277 19 22 +45 288 10 18 +18 295 12 17 +0 320 5 17 +41 359 17 18 +45 352 16 15 +66 335 14 13 +96 312 13 15 +91 294 14 17 +123 268 10 15 +156 269 14 16 +160 291 12 16 +97 351 14 14 +98 372 16 16 +74 402 15 16 +116 401 17 22 +34 445 16 14 +1 384 16 17 +64 439 15 12 +95 423 17 20 +165 378 13 13 +193 399 15 19 +224 397 17 21 +202 440 17 22 +262 432 16 17 +301 435 19 16 +347 436 18 17 +351 401 20 16 +338 398 14 20 +360 365 16 16 +193 273 14 15 +212 239 14 18 +254 230 13 16 +247 286 18 19 +237 293 17 17 +279 251 13 18 +302 207 15 17 +311 263 16 17 +325 273 18 19 +358 245 14 19 +369 296 16 17 +405 294 13 17 +402 327 14 15 +370 331 15 16 +350 339 15 17 +431 442 17 18 +421 427 13 21 +421 311 11 12 +479 309 14 16 +538 241 13 20 +479 334 13 18 +465 360 15 22 +453 394 10 12 +452 385 10 13 +498 447 15 15 +546 354 14 20 +612 359 13 19 +632 367 13 18 +612 418 16 17 +631 421 13 17 +653 468 13 16 +677 426 16 16 +686 276 16 13 +714 295 17 19 +707 319 14 12 +758 258 11 12 +767 271 12 13 +805 277 16 24 +736 319 15 26 +724 359 15 15 +810 369 16 19 +777 457 16 16 +725 452 13 19 +954 253 14 18 +880 250 14 16 +842 315 13 18 +838 342 17 18 +897 350 18 23 +976 336 16 24 +1004 334 13 19 +962 300 13 19 +940 295 14 19 +862 443 12 19 +895 444 10 18 +936 470 16 16 +1016 427 8 16 +186 85 13 16 +513 282 14 15 +650 245 11 15 +412 99 12 13 +673 99 15 14 +766 493 25 17 +614 447 15 17 +454 489 12 15 +1 115 13 15 +9 100 14 18 +25 94 12 16 +36 91 12 18 +21 156 14 17 +279 145 14 13 +222 91 11 15 +50 383 15 14 +820 270 13 16 +845 239 12 16 +536 482 15 17 +# 7--Cheering/7_Cheering_Cheering_7_408.jpg +212 194 72 82 +346 202 66 84 +428 248 58 92 +592 250 60 82 +624 364 66 88 +780 276 60 88 +# 7--Cheering/7_Cheering_Cheering_7_530.jpg +11 107 20 21 +81 135 13 16 +140 143 13 16 +259 175 9 14 +303 164 13 15 +360 196 7 9 +364 183 8 9 +402 30 27 54 +523 189 9 13 +723 148 21 28 +549 194 9 10 +590 219 5 7 +958 210 13 17 +923 243 9 12 +913 233 5 8 +620 108 20 50 +# 7--Cheering/7_Cheering_Cheering_7_195.jpg +454 468 55 58 +328 522 49 66 +182 596 73 64 +601 477 56 75 +677 618 49 57 +591 853 52 62 +234 895 72 58 +180 777 52 62 +428 975 73 71 +# 7--Cheering/7_Cheering_Cheering_7_426.jpg +0 0 0 0 +# 7--Cheering/7_Cheering_Cheering_7_500.jpg +442 140 312 456 +# 7--Cheering/7_Cheering_Cheering_7_558.jpg +448 675 157 192 +651 520 165 203 +# 7--Cheering/7_Cheering_Cheering_7_373.jpg +81 967 126 240 +399 871 135 198 +787 859 135 183 +# 7--Cheering/7_Cheering_Cheering_7_870.jpg +108 233 34 47 +231 254 37 45 +380 203 40 56 +649 222 46 63 +889 204 56 67 +# 7--Cheering/7_Cheering_Cheering_7_462.jpg +819 362 36 51 +197 369 30 36 +273 365 39 38 +59 421 29 36 +550 132 67 97 +0 545 18 40 +68 553 24 49 +361 589 31 71 +441 592 31 59 +721 336 25 43 +# 7--Cheering/7_Cheering_Cheering_7_427.jpg +126 68 60 94 +472 72 60 88 +832 60 68 96 +# 7--Cheering/7_Cheering_Cheering_7_293.jpg +549 511 69 60 +# 7--Cheering/7_Cheering_Cheering_7_138.jpg +56 175 43 57 +104 150 38 38 +196 118 29 37 +214 139 43 48 +295 132 41 51 +340 138 48 63 +391 132 38 58 +470 163 48 74 +491 104 50 57 +535 87 36 62 +601 199 49 61 +785 119 47 76 +835 119 55 82 +867 88 47 62 +1009 115 15 59 +# 7--Cheering/7_Cheering_Cheering_7_473.jpg +470 336 75 123 +815 118 66 98 +# 7--Cheering/7_Cheering_Cheering_7_209.jpg +198 204 78 104 +386 240 72 106 +586 268 64 96 +708 156 72 104 +# 7--Cheering/7_Cheering_Cheering_7_536.jpg +0 232 22 30 +519 339 39 58 +742 273 50 86 +851 282 30 41 +649 266 32 38 +241 232 31 35 +214 262 36 32 +314 223 31 30 +897 244 28 27 +1001 239 22 42 +1000 396 23 34 +328 308 28 34 +461 271 27 35 +205 226 26 30 +175 256 30 35 +44 228 32 34 +330 260 30 33 +96 241 27 30 +730 226 33 43 +127 320 20 28 +434 248 21 27 +521 265 33 36 +# 7--Cheering/7_Cheering_Cheering_7_391.jpg +60 412 32 67 +91 376 30 49 +217 350 23 27 +195 420 59 84 +357 347 24 39 +303 348 23 40 +309 325 20 26 +422 312 11 24 +478 334 18 28 +464 309 13 21 +708 175 18 22 +703 286 12 22 +540 326 14 32 +344 590 68 168 +574 381 12 50 +# 7--Cheering/7_Cheering_Cheering_7_884.jpg +164 366 30 34 +191 434 31 41 +323 185 70 99 +469 119 79 125 +827 196 57 82 +993 343 24 31 +# 7--Cheering/7_Cheering_Cheering_7_57.jpg +32 161 37 52 +0 299 49 65 +107 322 22 43 +133 300 44 51 +245 345 41 51 +330 310 41 54 +390 221 38 45 +432 336 44 60 +556 346 43 51 +663 313 42 45 +735 144 43 50 +794 284 53 55 +747 337 39 56 +1001 334 23 71 +955 337 29 50 +958 379 29 52 +# 7--Cheering/7_Cheering_Cheering_7_469.jpg +575 340 90 105 +# 7--Cheering/7_Cheering_Cheering_7_239.jpg +344 182 236 306 +# 7--Cheering/7_Cheering_Cheering_7_835.jpg +345 165 330 405 +# 7--Cheering/7_Cheering_Cheering_7_724.jpg +316 185 30 21 +380 136 53 61 +# 7--Cheering/7_Cheering_Cheering_7_692.jpg +264 576 67 93 +491 531 75 96 +# 7--Cheering/7_Cheering_Cheering_7_687.jpg +571 86 53 79 +679 379 15 29 +819 214 32 41 +895 442 12 14 +# 7--Cheering/7_Cheering_Cheering_7_631.jpg +386 223 142 187 +702 331 90 148 +# 7--Cheering/7_Cheering_Cheering_7_386.jpg +978 484 46 67 +764 291 28 43 +765 436 31 37 +666 442 31 33 +789 489 36 57 +802 522 39 58 +565 423 12 16 +559 466 32 51 +411 399 11 15 +394 376 10 13 +393 401 11 16 +356 429 13 17 +308 407 13 21 +286 400 13 19 +329 395 12 18 +293 433 19 28 +213 546 45 54 +56 507 53 74 +222 429 17 25 +258 429 11 18 +# 7--Cheering/7_Cheering_Cheering_7_345.jpg +600 192 25 35 +2 309 19 34 +31 372 13 37 +47 393 23 31 +159 411 25 31 +88 374 26 28 +202 388 22 25 +238 432 17 26 +247 421 18 29 +228 374 26 37 +251 368 28 34 +265 392 22 31 +340 377 13 31 +353 401 24 30 +466 389 21 35 +452 158 26 39 +414 199 33 26 +680 235 13 27 +610 326 20 28 +553 351 25 31 +578 389 29 31 +679 424 21 28 +756 374 25 28 +794 389 24 37 +812 429 18 31 +860 403 23 30 +858 354 23 31 +872 371 26 33 +900 356 28 32 +940 310 21 29 +208 408 14 22 +215 408 14 18 +# 7--Cheering/7_Cheering_Cheering_7_125.jpg +216 90 90 134 +322 176 86 120 +454 134 70 112 +534 190 70 104 +626 148 68 120 +# 7--Cheering/7_Cheering_Cheering_7_118.jpg +30 282 96 126 +296 232 94 116 +416 234 88 120 +570 248 88 130 +780 224 92 132 +882 294 104 146 +# 7--Cheering/7_Cheering_Cheering_7_60.jpg +256 160 88 110 +438 160 70 106 +598 20 74 118 +702 152 88 122 +# 7--Cheering/7_Cheering_Cheering_7_802.jpg +346 316 214 298 +# 7--Cheering/7_Cheering_Cheering_7_171.jpg +106 91 14 20 +68 139 16 24 +32 161 13 20 +19 133 13 22 +99 162 13 18 +106 150 11 16 +132 144 12 19 +135 171 13 18 +157 151 14 15 +159 92 12 22 +162 30 13 20 +0 0 16 0 +118 5 13 17 +156 10 14 16 +175 8 13 20 +202 36 11 17 +396 22 7 13 +386 63 14 21 +362 53 8 14 +359 68 10 15 +327 79 12 22 +314 94 11 18 +345 95 14 17 +389 111 10 13 +403 107 11 18 +221 10 15 18 +223 59 12 18 +157 203 12 23 +127 223 11 19 +264 184 12 20 +249 27 13 17 +240 41 12 13 +241 83 12 13 +276 89 10 16 +291 39 14 20 +291 76 10 15 +330 54 12 16 +298 22 9 10 +332 26 10 16 +365 31 8 18 +421 97 13 19 +425 110 13 17 +420 148 14 18 +442 144 12 17 +463 130 11 18 +468 121 9 15 +488 126 16 16 +515 106 11 14 +548 102 13 15 +551 124 13 21 +570 133 11 18 +571 105 10 16 +580 113 13 18 +636 107 12 15 +645 90 10 18 +596 136 13 18 +619 154 14 19 +646 159 10 21 +633 176 7 18 +636 202 14 19 +587 160 12 18 +541 165 11 17 +386 122 15 19 +401 154 11 19 +646 51 8 13 +624 67 11 17 +618 87 11 16 +605 84 12 18 +589 68 10 17 +596 47 10 11 +573 47 10 17 +562 58 14 16 +529 45 11 10 +526 57 8 9 +525 69 12 11 +515 33 10 14 +496 35 9 13 +471 27 12 14 +451 26 10 15 +420 21 10 13 +421 40 10 13 +423 63 14 21 +445 56 11 18 +454 47 11 16 +445 78 12 16 +477 48 12 15 +470 63 10 18 +490 67 12 16 +479 83 11 15 +484 101 14 16 +461 100 12 17 +435 95 9 15 +496 216 14 16 +506 183 15 21 +499 159 15 19 +510 129 12 16 +499 238 13 18 +511 259 14 20 +628 229 13 15 +668 190 11 13 +656 159 11 22 +648 244 16 22 +140 1 12 11 +186 0 15 14 +252 0 12 9 +301 0 14 15 +328 1 13 19 +366 13 12 14 +606 3 11 13 +671 61 9 13 +687 67 10 14 +681 80 11 16 +689 91 13 18 +661 85 11 19 +647 72 11 17 +699 117 12 14 +737 85 9 16 +743 120 11 17 +762 107 10 14 +760 118 12 15 +667 144 11 14 +726 192 11 16 +760 207 0 1 +860 196 11 16 +878 217 12 17 +884 224 14 17 +911 235 12 16 +905 250 13 16 +929 261 10 14 +942 271 12 18 +957 274 17 20 +961 259 13 21 +976 266 13 17 +997 264 9 16 +977 299 10 15 +990 307 13 17 +881 294 11 18 +856 288 9 18 +723 213 13 17 +748 240 9 13 +754 258 12 20 +935 328 10 14 +894 366 15 20 +857 382 10 19 +985 374 14 16 +844 419 26 26 +854 322 11 20 +1018 408 6 13 +1009 269 11 11 +336 46 9 11 +387 32 8 13 +59 59 14 19 +241 60 15 15 +53 20 13 18 +590 119 17 13 +721 109 11 14 +720 124 13 11 +902 326 12 15 +955 328 12 16 +862 227 9 15 +855 212 10 14 +926 237 11 14 +877 171 10 14 +857 175 8 12 +873 195 11 13 +995 286 10 16 +881 250 12 11 +936 219 12 12 +953 243 11 14 +974 247 13 12 +998 245 11 14 +755 210 10 17 +696 136 13 15 +717 138 11 10 +741 138 12 14 +727 159 13 12 +754 176 9 13 +775 165 12 15 +793 169 11 15 +785 141 10 17 +800 147 10 16 +813 162 12 17 +811 182 10 13 +797 199 12 15 +783 192 12 13 +757 164 8 9 +809 135 10 12 +657 212 11 16 +699 196 12 12 +715 236 11 14 +701 235 10 13 +709 258 13 13 +691 250 12 18 +699 280 12 15 +729 287 12 16 +722 308 15 15 +750 280 13 15 +754 318 11 16 +782 316 12 16 +616 187 12 15 +595 183 9 15 +602 222 14 17 +611 239 13 15 +604 262 14 17 +639 300 15 17 +619 296 14 18 +635 331 12 18 +586 299 13 15 +555 292 15 21 +580 252 12 16 +554 248 13 17 +548 263 16 19 +555 221 10 14 +535 189 13 16 +523 209 15 18 +533 245 12 17 +403 209 11 12 +414 189 12 16 +440 185 13 11 +472 181 14 19 +474 204 13 19 +450 216 14 12 +462 230 15 17 +437 284 14 20 +426 312 15 21 +390 252 12 18 +384 333 11 15 +394 322 8 13 +340 319 12 18 +351 271 11 18 +350 238 12 19 +341 216 14 20 +303 218 16 18 +321 240 13 22 +292 269 15 22 +272 307 14 21 +382 408 20 22 +22 81 13 16 +57 85 14 17 +88 90 13 20 +105 69 13 15 +105 119 11 15 +150 117 14 16 +72 418 18 25 +27 392 12 18 +5 14 12 11 +1 83 8 17 +124 418 18 24 +# 7--Cheering/7_Cheering_Cheering_7_739.jpg +284 280 462 546 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_157.jpg +323 464 24 33 +270 443 18 21 +164 425 34 38 +219 457 21 24 +4 471 24 32 +333 435 12 12 +559 465 13 15 +607 460 16 19 +441 461 21 27 +506 472 13 12 +459 467 12 13 +724 416 18 23 +808 464 12 16 +818 454 14 13 +750 449 4 15 +649 324 17 21 +1004 342 18 24 +939 448 10 14 +907 449 8 12 +1004 447 12 14 +1019 425 4 5 +967 453 7 9 +955 294 3 4 +981 299 4 5 +965 291 3 4 +862 448 10 16 +956 452 7 10 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_412.jpg +661 126 25 29 +124 39 10 12 +185 41 9 11 +859 456 42 52 +606 237 20 25 +980 324 35 38 +1008 289 16 39 +977 260 40 39 +863 350 45 38 +927 322 45 42 +863 265 41 36 +840 324 43 39 +799 361 42 38 +763 432 42 48 +727 501 49 48 +693 457 58 53 +681 481 43 46 +708 422 56 41 +780 318 42 46 +774 355 39 41 +765 298 39 42 +781 264 39 39 +727 273 40 42 +562 555 59 62 +522 583 54 50 +476 582 62 50 +445 627 22 31 +453 647 24 26 +487 478 53 56 +638 428 41 43 +619 393 44 40 +517 420 56 56 +710 334 38 50 +684 327 42 46 +646 300 40 49 +655 273 35 39 +620 255 35 33 +615 285 48 43 +554 376 39 49 +563 451 45 63 +529 259 27 24 +486 261 44 36 +471 313 43 49 +477 393 40 50 +424 421 49 49 +363 496 57 51 +301 570 58 57 +270 547 53 56 +270 485 40 49 +328 402 57 48 +405 363 41 46 +371 319 36 40 +456 254 36 38 +388 266 43 34 +238 548 47 50 +147 552 54 54 +102 557 37 48 +79 540 47 44 +221 427 39 54 +176 386 34 40 +281 310 45 39 +353 238 39 35 +271 259 32 38 +80 464 43 44 +8 510 51 51 +24 462 32 35 +101 356 53 37 +147 323 33 35 +137 310 35 32 +90 328 39 39 +162 246 41 39 +209 267 33 41 +37 337 26 29 +6 263 30 40 +103 264 29 35 +961 211 45 35 +1007 182 17 34 +942 187 28 31 +883 219 28 32 +911 182 27 27 +904 177 21 23 +947 113 18 26 +936 83 19 24 +959 95 20 22 +872 70 13 22 +849 75 18 24 +924 63 18 21 +893 88 16 19 +859 45 16 16 +807 142 30 31 +801 234 32 31 +755 215 32 34 +667 241 29 35 +661 272 32 33 +699 207 34 32 +657 211 26 22 +668 181 23 28 +712 171 19 27 +778 156 23 21 +779 128 18 23 +802 97 19 25 +876 101 21 28 +852 93 24 22 +972 0 26 13 +841 35 18 21 +747 25 16 19 +760 101 16 21 +738 112 18 30 +699 118 21 17 +686 143 20 20 +714 107 22 18 +687 82 22 24 +691 25 15 17 +721 21 18 16 +820 48 11 11 +852 13 13 20 +569 220 30 29 +591 199 32 32 +560 188 31 28 +599 164 26 29 +624 144 26 30 +550 154 29 28 +579 140 22 27 +542 139 26 30 +554 113 23 27 +635 116 21 23 +607 125 19 30 +609 99 21 24 +642 83 19 17 +580 84 16 19 +609 77 18 20 +623 62 15 18 +638 67 17 19 +584 59 16 21 +538 116 20 24 +486 137 32 31 +491 219 35 33 +515 179 34 34 +507 184 27 30 +498 189 25 28 +526 31 17 20 +519 82 19 18 +461 27 25 24 +467 86 16 17 +447 101 18 20 +442 239 30 32 +422 238 27 31 +428 208 22 31 +444 200 28 27 +344 179 31 35 +269 220 28 30 +228 231 33 34 +166 216 36 40 +249 172 32 34 +375 184 28 28 +430 133 23 31 +385 150 27 27 +360 146 25 25 +333 169 28 23 +333 157 26 31 +320 133 28 28 +354 121 27 25 +394 113 20 26 +365 96 23 25 +383 96 20 25 +419 68 13 14 +368 66 24 24 +445 64 18 17 +370 13 15 16 +368 41 18 22 +403 35 20 17 +408 11 14 15 +400 0 13 12 +316 89 23 19 +280 99 21 21 +251 154 24 23 +241 125 25 27 +292 34 20 20 +283 37 17 20 +302 13 15 16 +277 16 16 17 +271 14 15 19 +243 32 19 16 +231 70 17 20 +219 112 17 20 +116 225 28 31 +103 211 27 31 +163 170 23 28 +156 200 22 28 +78 125 31 29 +146 95 23 29 +0 148 22 22 +0 108 16 27 +117 84 23 28 +113 0 15 15 +51 0 13 7 +37 94 9 9 +1017 40 7 45 +127 15 17 21 +137 66 19 20 +0 55 5 14 +406 216 22 31 +248 196 36 35 +617 520 54 54 +951 630 44 53 +914 601 53 51 +827 628 62 52 +774 600 55 55 +798 540 51 44 +970 540 37 39 +996 374 26 31 +965 494 43 52 +906 292 26 37 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_433.jpg +839 315 49 69 +640 242 66 78 +519 296 59 59 +169 300 66 58 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_173.jpg +242 38 192 290 +724 160 168 234 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_269.jpg +390 189 237 318 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_244.jpg +846 146 91 98 +976 111 48 131 +783 124 72 72 +742 142 53 69 +648 77 92 108 +943 153 53 67 +508 142 120 147 +388 111 106 132 +359 160 71 76 +253 166 80 88 +317 152 41 40 +101 74 139 151 +19 166 70 79 +0 179 51 90 +738 101 24 27 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_266.jpg +283 105 136 220 +631 231 103 161 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_553.jpg +438 118 112 142 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_300.jpg +206 462 142 162 +576 74 132 208 +80 44 124 180 +700 442 150 192 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_620.jpg +246 156 194 266 +648 156 194 260 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_148.jpg +812 295 60 64 +741 266 52 58 +671 259 59 60 +568 327 78 76 +426 306 85 93 +498 297 65 75 +505 218 54 57 +620 359 85 94 +430 239 54 59 +754 222 25 29 +904 588 120 153 +886 436 75 72 +722 473 96 120 +959 301 65 68 +901 232 42 50 +240 373 103 124 +307 279 84 90 +358 250 72 72 +372 125 17 19 +321 103 20 24 +193 114 26 28 +130 87 25 30 +169 97 18 22 +219 100 17 21 +246 84 11 15 +267 81 12 13 +303 72 8 11 +310 66 8 8 +274 67 11 14 +213 57 10 11 +231 54 10 12 +585 194 43 45 +729 134 15 16 +636 179 24 29 +597 161 24 25 +614 136 18 19 +479 151 29 33 +566 151 19 20 +509 157 19 22 +553 123 14 16 +595 107 9 11 +551 103 8 10 +483 113 15 20 +492 92 8 11 +508 98 7 9 +807 99 12 13 +765 120 13 15 +781 103 12 13 +753 101 14 14 +705 129 12 11 +708 118 11 12 +683 89 8 9 +690 125 10 12 +994 146 19 21 +882 124 22 16 +796 128 17 13 +853 120 11 21 +661 111 10 11 +556 77 8 10 +605 89 8 11 +621 86 8 10 +686 96 8 11 +589 94 8 9 +627 102 11 9 +798 159 6 20 +840 132 16 17 +0 435 41 178 +0 380 63 132 +0 213 69 82 +103 214 54 47 +115 30 10 9 +34 15 9 12 +8 8 12 14 +846 503 99 101 +234 297 80 110 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_84.jpg +921 598 36 54 +977 486 24 28 +879 511 20 25 +777 488 19 24 +534 516 21 26 +309 497 29 30 +287 502 23 28 +123 548 48 49 +209 512 22 33 +185 489 15 19 +126 487 20 22 +75 507 22 26 +94 506 21 28 +35 529 28 33 +154 447 9 11 +162 465 8 14 +257 433 17 21 +196 460 9 9 +219 481 17 23 +670 250 16 20 +632 204 16 21 +523 175 14 26 +490 192 19 22 +446 187 14 21 +603 217 12 17 +847 486 9 13 +831 469 8 10 +207 426 10 15 +288 420 12 19 +227 424 11 15 +9 524 14 23 +219 464 12 14 +226 447 12 13 +226 437 10 12 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_236.jpg +770 356 41 47 +556 144 76 81 +406 357 47 49 +335 341 63 87 +158 368 60 71 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_174.jpg +266 457 31 33 +295 495 42 40 +130 418 33 36 +168 461 36 38 +209 441 30 35 +309 439 31 39 +259 420 31 39 +69 478 43 46 +7 485 46 50 +306 414 27 32 +343 393 16 19 +164 426 33 33 +212 403 23 27 +243 421 19 22 +288 427 20 23 +289 400 21 24 +89 397 24 25 +48 429 24 24 +34 387 22 27 +114 343 14 15 +225 385 16 16 +177 403 19 22 +148 392 18 20 +126 376 16 16 +172 374 14 17 +295 380 14 15 +161 349 10 11 +207 381 15 17 +183 367 13 14 +183 352 9 10 +335 378 14 15 +281 374 13 16 +51 408 24 26 +13 375 12 15 +156 336 8 12 +156 606 106 77 +167 531 76 80 +198 484 38 40 +360 378 18 21 +312 479 43 51 +385 538 80 82 +621 507 43 50 +453 453 44 50 +434 448 34 38 +420 427 28 30 +426 630 92 53 +668 582 76 85 +433 508 57 60 +739 485 62 63 +669 459 32 36 +584 458 43 45 +581 413 30 30 +572 452 39 41 +501 343 34 38 +611 420 22 23 +655 421 25 30 +652 399 29 26 +576 378 21 23 +434 386 19 21 +641 382 19 22 +546 375 14 17 +440 415 25 24 +419 390 17 17 +455 382 20 23 +473 370 13 17 +613 360 14 16 +598 383 15 18 +556 359 12 14 +571 360 13 14 +579 344 10 13 +666 339 9 10 +674 383 15 20 +492 333 13 13 +494 381 17 17 +412 380 13 13 +397 400 14 17 +383 380 13 16 +741 445 31 34 +878 463 39 45 +824 437 33 41 +806 413 21 25 +824 424 25 28 +930 419 33 39 +888 392 16 23 +850 388 20 23 +872 381 17 18 +870 396 13 15 +796 397 24 28 +760 416 29 32 +727 400 18 23 +770 382 21 22 +692 400 19 21 +929 400 27 29 +934 337 19 25 +754 373 20 20 +638 336 12 19 +720 378 21 21 +741 378 7 8 +770 363 8 8 +800 374 10 11 +898 514 60 64 +817 594 98 89 +951 470 44 47 +1010 444 14 46 +950 422 30 31 +980 408 24 26 +1011 365 13 17 +999 387 18 20 +989 379 16 17 +988 334 15 17 +969 339 12 13 +897 338 10 13 +889 341 10 13 +867 370 9 11 +879 374 11 11 +451 335 10 11 +441 329 10 11 +479 362 9 10 +409 326 9 10 +425 413 19 19 +1013 302 11 14 +997 299 11 12 +990 294 8 11 +981 291 10 12 +968 298 9 11 +930 299 9 10 +918 321 9 11 +903 313 10 12 +902 298 9 11 +855 306 10 13 +919 305 8 10 +941 298 9 10 +959 301 8 9 +728 341 9 10 +685 328 9 9 +540 259 6 8 +319 328 5 7 +309 376 13 15 +451 357 10 12 +489 359 10 12 +96 360 6 10 +252 384 14 16 +239 401 15 20 +259 374 12 12 +403 381 13 15 +468 308 9 13 +335 361 11 11 +302 358 11 11 +318 363 9 11 +255 356 10 10 +210 365 10 12 +196 379 15 16 +321 344 5 8 +348 333 7 8 +324 361 11 11 +288 357 9 10 +274 363 9 9 +230 361 11 12 +305 339 7 10 +268 347 7 8 +345 352 7 7 +245 361 10 11 +391 328 9 12 +371 324 9 11 +361 360 9 10 +540 372 13 15 +527 381 11 12 +463 366 10 12 +621 346 9 12 +607 301 7 9 +593 366 13 13 +627 380 10 16 +830 372 14 15 +1002 440 22 31 +155 372 11 14 +45 365 7 9 +36 330 9 9 +714 438 29 31 +611 381 14 17 +567 381 16 19 +119 390 10 12 +120 409 10 14 +491 586 55 57 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_451.jpg +432 114 249 327 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_69.jpg +116 124 150 218 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_437.jpg +257 165 20 26 +215 164 14 18 +149 165 15 18 +87 162 16 17 +26 160 15 18 +129 267 21 20 +251 268 19 24 +364 67 23 27 +437 65 21 28 +520 37 20 19 +366 255 22 25 +474 254 24 28 +567 55 24 28 +648 57 25 27 +727 52 28 31 +585 151 19 19 +641 149 16 18 +695 149 21 18 +754 149 17 19 +579 259 24 26 +682 260 24 31 +788 394 12 13 +693 389 9 14 +712 391 10 11 +764 391 10 14 +752 391 10 10 +447 304 36 56 +522 380 65 51 +21 72 14 17 +88 69 13 17 +148 66 13 18 +213 64 15 20 +279 63 16 17 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_498.jpg +917 254 107 175 +882 207 57 60 +762 132 67 95 +688 50 119 158 +571 143 70 81 +660 150 25 24 +544 163 33 49 +533 139 30 38 +480 181 51 56 +398 148 43 65 +421 165 37 55 +349 152 55 81 +257 141 93 116 +202 184 58 64 +66 132 95 102 +31 189 63 64 +148 148 23 35 +229 142 46 54 +177 179 19 23 +3 172 30 38 +848 203 27 36 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_118.jpg +404 214 77 77 +294 172 53 56 +208 196 49 44 +532 171 36 40 +363 181 45 50 +392 204 58 64 +289 198 39 46 +210 285 41 45 +171 213 41 50 +142 191 45 64 +38 193 34 49 +48 243 28 35 +923 490 101 193 +714 383 117 118 +890 334 60 65 +697 253 74 93 +532 290 67 98 +649 206 35 43 +957 274 44 48 +992 238 32 39 +883 257 25 30 +720 203 25 29 +886 234 23 25 +960 218 15 20 +902 199 8 10 +988 207 13 11 +636 195 20 24 +586 166 9 11 +713 188 11 13 +930 198 6 7 +640 163 6 7 +628 172 7 9 +810 231 40 59 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_86.jpg +903 278 33 45 +735 248 52 64 +576 236 54 64 +473 284 41 42 +376 221 48 51 +459 251 33 35 +345 264 29 37 +207 238 44 42 +120 185 53 55 +12 162 52 64 +434 24 70 92 +184 0 91 116 +0 0 61 77 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_252.jpg +988 291 21 22 +994 320 28 30 +948 316 19 24 +979 299 17 20 +937 295 17 20 +960 254 9 11 +922 313 29 32 +895 298 18 18 +913 306 11 13 +885 335 19 21 +876 314 17 18 +844 328 22 23 +853 302 16 18 +842 316 15 16 +835 245 13 17 +826 349 15 28 +806 314 15 15 +789 339 17 18 +754 335 15 17 +707 293 21 24 +744 302 17 17 +747 287 18 22 +671 350 17 21 +652 364 19 19 +647 348 16 20 +656 307 8 9 +776 392 33 46 +608 362 18 18 +565 353 26 26 +492 383 25 28 +413 391 28 35 +367 390 25 30 +142 460 83 83 +294 443 50 62 +108 423 16 16 +305 407 16 19 +291 391 9 15 +87 454 18 23 +42 424 8 11 +80 412 12 11 +117 403 6 10 +129 390 4 6 +105 403 10 10 +281 394 11 14 +206 402 9 10 +233 392 9 10 +459 133 20 23 +467 381 10 10 +441 381 12 17 +677 406 7 10 +730 349 8 12 +152 397 7 7 +146 395 7 10 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_218.jpg +843 259 99 114 +789 262 58 63 +692 346 47 47 +579 322 8 10 +667 558 63 72 +602 342 45 52 +535 342 54 62 +465 335 36 47 +429 328 57 64 +396 341 35 40 +287 343 71 78 +172 389 84 98 +153 343 46 52 +122 291 48 57 +65 321 73 70 +24 361 11 10 +10 338 8 11 +10 325 8 9 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_297.jpg +160 456 54 64 +114 205 25 28 +413 321 29 39 +429 239 19 29 +494 279 41 48 +520 237 20 25 +529 159 21 30 +599 274 31 45 +639 264 48 52 +776 246 56 56 +665 155 21 24 +763 182 15 27 +374 297 26 17 +926 303 38 43 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_357.jpg +203 300 127 170 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_529.jpg +276 138 180 248 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_32.jpg +989 252 22 28 +951 276 27 24 +912 263 32 45 +957 258 21 25 +871 290 26 27 +789 294 27 28 +722 279 24 28 +663 278 26 28 +614 296 24 25 +560 307 24 27 +467 283 26 29 +362 289 25 28 +320 293 25 28 +423 332 20 20 +222 266 27 27 +108 243 29 29 +31 263 28 30 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_120.jpg +196 139 625 603 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_76.jpg +841 275 41 45 +779 292 36 39 +711 314 31 33 +709 233 29 32 +685 294 33 38 +641 235 33 35 +552 261 32 38 +578 246 28 33 +603 221 32 36 +513 251 37 40 +513 227 30 32 +388 237 29 34 +444 231 34 37 +405 236 29 34 +416 282 32 40 +343 300 36 38 +297 224 30 34 +256 286 34 44 +266 275 32 38 +207 236 31 35 +327 226 29 35 +207 307 39 40 +260 241 26 29 +150 304 39 44 +69 313 39 45 +21 294 41 45 +614 275 32 42 +558 222 20 34 +356 233 31 34 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_25.jpg +590 154 180 240 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_113.jpg +238 178 52 82 +358 114 70 96 +696 38 52 70 +812 336 54 72 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_347.jpg +211 128 33 50 +750 163 29 47 +21 571 16 21 +178 563 9 18 +476 583 9 17 +852 575 13 15 +921 590 14 15 +959 587 10 16 +1009 575 15 18 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_452.jpg +0 414 31 57 +65 445 39 46 +77 449 46 53 +152 450 48 48 +240 418 60 59 +369 400 50 66 +421 389 67 74 +645 293 69 83 +788 333 91 91 +969 389 55 72 +# 8--Election_Campain/8_Election_Campain_Election_Campaign_8_133.jpg +944 186 28 28 +902 190 31 30 +854 192 29 33 +729 186 32 26 +624 170 33 36 +184 88 83 148 +435 178 25 22 +21 119 33 36 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_907.jpg +410 156 344 458 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_784.jpg +204 90 388 466 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_518.jpg +401 316 169 253 +765 163 160 259 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_352.jpg +181 395 560 708 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_214.jpg +56 200 122 146 +392 140 102 158 +838 164 116 140 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_492.jpg +262 6 454 478 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_141.jpg +248 295 546 716 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_35.jpg +378 245 12 17 +291 210 15 18 +312 215 12 16 +334 213 10 14 +401 187 12 15 +216 397 42 35 +200 287 16 21 +268 308 23 35 +303 284 17 26 +368 310 17 35 +498 305 15 25 +687 215 14 21 +623 216 14 19 +853 143 12 15 +602 196 13 18 +957 187 28 38 +957 223 27 31 +190 191 17 22 +374 214 12 15 +355 420 38 61 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_655.jpg +63 207 24 30 +137 223 20 32 +115 218 21 21 +161 202 28 34 +199 189 24 32 +231 218 25 26 +307 201 24 34 +392 198 23 23 +422 191 20 32 +346 211 21 30 +365 228 26 40 +465 190 19 25 +488 199 24 36 +497 225 36 41 +552 185 20 26 +543 183 17 22 +579 210 26 38 +651 204 22 27 +708 187 23 31 +726 192 26 26 +757 207 27 39 +661 216 40 61 +793 240 49 64 +848 189 25 30 +884 213 29 38 +942 184 28 40 +957 214 28 43 +1010 205 14 41 +138 202 20 28 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_357.jpg +338 195 375 561 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_571.jpg +369 342 252 348 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_165.jpg +90 54 72 88 +474 66 68 78 +844 58 74 94 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_344.jpg +608 294 268 368 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_849.jpg +268 361 449 629 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_31.jpg +586 211 16 20 +442 206 14 20 +48 286 14 19 +20 310 11 15 +927 361 13 21 +958 431 17 29 +195 375 14 24 +121 370 14 23 +82 433 12 27 +166 428 13 26 +82 481 19 33 +799 444 13 26 +812 374 14 28 +920 231 12 17 +957 221 13 19 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_520.jpg +282 495 195 264 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_297.jpg +410 176 262 362 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_66.jpg +216 175 73 88 +453 285 53 76 +884 267 75 88 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_332.jpg +163 241 796 1101 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_325.jpg +332 192 60 76 +684 114 56 110 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_41.jpg +152 194 78 94 +700 188 84 110 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_432.jpg +437 166 244 364 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_40.jpg +12 170 19 21 +108 186 17 18 +49 226 21 24 +187 178 17 23 +247 168 15 19 +206 173 14 16 +299 176 14 20 +334 180 11 15 +353 169 14 17 +372 177 13 17 +406 162 16 22 +423 164 15 23 +463 178 19 23 +460 156 17 22 +369 236 19 22 +263 219 18 26 +220 228 39 57 +316 213 26 71 +713 162 32 36 +751 116 25 38 +665 174 25 36 +693 159 20 31 +591 127 26 36 +528 168 21 27 +513 168 14 23 +970 168 54 49 +898 171 35 43 +116 179 12 16 +331 221 11 16 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_257.jpg +270 192 153 213 +685 351 132 171 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_924.jpg +398 156 256 364 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_209.jpg +262 70 98 136 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_636.jpg +202 296 495 661 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_346.jpg +256 238 140 199 +422 223 147 224 +81 634 50 72 +264 659 52 68 +417 671 51 62 +614 708 36 57 +362 981 86 52 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_397.jpg +87 90 47 63 +208 134 46 52 +355 103 45 61 +483 118 41 55 +627 111 45 61 +712 153 40 52 +783 161 45 53 +869 142 53 61 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_252.jpg +296 124 400 542 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_615.jpg +350 158 298 358 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_933.jpg +304 265 425 593 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_945.jpg +227 227 372 607 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_648.jpg +461 187 293 373 +19 475 112 253 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_748.jpg +308 215 378 547 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_12.jpg +930 203 37 56 +390 312 19 27 +317 317 22 25 +261 306 19 22 +326 232 16 22 +107 221 32 37 +182 259 16 19 +117 382 26 44 +205 339 18 30 +254 331 14 29 +420 342 20 32 +802 427 27 58 +673 260 15 21 +615 287 15 23 +567 285 13 20 +512 291 13 17 +830 302 20 24 +469 342 20 35 +52 245 8 12 +18 246 6 12 +201 312 19 24 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_105.jpg +58 91 55 68 +113 121 48 62 +276 145 41 60 +333 192 28 34 +486 56 68 92 +601 79 48 68 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_607.jpg +333 153 249 348 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_34.jpg +257 286 408 465 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_258.jpg +283 338 479 654 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_431.jpg +306 258 342 453 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_872.jpg +411 321 207 288 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_883.jpg +274 196 286 375 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_161.jpg +214 62 196 236 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_343.jpg +496 120 123 179 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_710.jpg +269 266 411 607 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_74.jpg +140 278 84 82 +458 264 82 112 +856 282 76 90 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_658.jpg +247 178 364 491 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_632.jpg +177 303 501 751 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_375.jpg +128 132 72 98 +298 122 72 96 +546 126 54 84 +660 104 56 92 +842 128 66 92 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_693.jpg +8 108 52 118 +264 186 72 122 +658 116 88 130 +880 214 82 128 +522 236 80 90 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_196.jpg +287 160 367 451 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_767.jpg +281 167 399 573 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_552.jpg +389 120 243 355 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_100.jpg +493 271 32 38 +639 268 28 35 +747 268 28 37 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_595.jpg +252 156 324 454 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_43.jpg +400 70 144 184 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_828.jpg +266 269 545 784 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_424.jpg +59 336 92 151 +489 67 157 238 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_563.jpg +402 114 188 252 +780 220 132 152 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_278.jpg +596 116 78 112 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_391.jpg +322 12 414 590 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_345.jpg +229 440 133 169 +392 259 232 322 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_757.jpg +143 223 370 397 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_930.jpg +268 96 343 521 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_328.jpg +236 114 120 204 +554 166 128 188 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_114.jpg +464 92 205 316 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_60.jpg +123 387 27 37 +263 394 31 42 +393 387 29 38 +555 314 29 39 +680 392 31 40 +768 390 27 35 +862 400 24 39 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_182.jpg +440 160 94 118 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_147.jpg +458 154 100 136 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_45.jpg +68 270 48 65 +478 261 50 74 +266 246 51 77 +838 258 61 78 +680 273 59 75 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_89.jpg +132 116 73 130 +403 154 80 136 +661 199 75 107 +784 222 53 70 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_594.jpg +334 182 300 400 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_613.jpg +316 224 270 347 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_521.jpg +332 172 294 372 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_129.jpg +336 242 152 202 +712 278 126 152 +# 9--Press_Conference/9_Press_Conference_Press_Conference_9_183.jpg +218 190 112 160 +302 224 74 140 +560 136 170 204 diff --git a/data/widerface/val/wider_val.txt b/data/widerface/val/wider_val.txt new file mode 100644 index 0000000..50fa6e8 --- /dev/null +++ b/data/widerface/val/wider_val.txt @@ -0,0 +1,3226 @@ +./21--Festival/21_Festival_Festival_21_640.jpg +./21--Festival/21_Festival_Festival_21_811.jpg +./21--Festival/21_Festival_Festival_21_741.jpg +./21--Festival/21_Festival_Festival_21_585.jpg +./21--Festival/21_Festival_Festival_21_140.jpg +./21--Festival/21_Festival_Festival_21_107.jpg +./21--Festival/21_Festival_Festival_21_218.jpg +./21--Festival/21_Festival_Festival_21_491.jpg +./21--Festival/21_Festival_Festival_21_42.jpg +./21--Festival/21_Festival_Festival_21_604.jpg +./21--Festival/21_Festival_Festival_21_331.jpg +./21--Festival/21_Festival_Festival_21_664.jpg +./21--Festival/21_Festival_Festival_21_943.jpg +./21--Festival/21_Festival_Festival_21_354.jpg +./21--Festival/21_Festival_Festival_21_97.jpg +./21--Festival/21_Festival_Festival_21_414.jpg +./21--Festival/21_Festival_Festival_21_513.jpg +./21--Festival/21_Festival_Festival_21_193.jpg +./21--Festival/21_Festival_Festival_21_605.jpg +./21--Festival/21_Festival_Festival_21_881.jpg +./21--Festival/21_Festival_Festival_21_395.jpg +./21--Festival/21_Festival_Festival_21_100.jpg +./21--Festival/21_Festival_Festival_21_526.jpg +./21--Festival/21_Festival_Festival_21_22.jpg +./21--Festival/21_Festival_Festival_21_660.jpg +./21--Festival/21_Festival_Festival_21_462.jpg +./21--Festival/21_Festival_Festival_21_219.jpg +./21--Festival/21_Festival_Festival_21_201.jpg +./21--Festival/21_Festival_Festival_21_225.jpg +./21--Festival/21_Festival_Festival_21_785.jpg +./21--Festival/21_Festival_Festival_21_340.jpg +./21--Festival/21_Festival_Festival_21_254.jpg +./21--Festival/21_Festival_Festival_21_562.jpg +./21--Festival/21_Festival_Festival_21_830.jpg +./21--Festival/21_Festival_Festival_21_275.jpg +./21--Festival/21_Festival_Festival_21_936.jpg +./21--Festival/21_Festival_Festival_21_601.jpg +./21--Festival/21_Festival_Festival_21_373.jpg +./21--Festival/21_Festival_Festival_21_777.jpg +./21--Festival/21_Festival_Festival_21_210.jpg +./21--Festival/21_Festival_Festival_21_727.jpg +./21--Festival/21_Festival_Festival_21_378.jpg +./21--Festival/21_Festival_Festival_21_797.jpg +./21--Festival/21_Festival_Festival_21_290.jpg +./21--Festival/21_Festival_Festival_21_976.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_184.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_832.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_710.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_390.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_520.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_9.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_1022.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_689.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_991.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_307.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_606.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_886.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_85.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_204.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_262.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_750.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_223.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_934.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_64.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_893.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_567.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_764.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_359.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_31.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_393.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_619.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_942.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_149.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_245.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_178.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_529.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_259.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_192.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_236.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_610.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_719.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_405.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_345.jpg +./26--Soldier_Drilling/26_Soldier_Drilling_Soldiers_Drilling_26_336.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_84.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_512.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_143.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_309.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_135.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_226.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_524.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_4.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_64.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_589.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_467.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_134.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_124.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_637.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_56.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_338.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_566.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_495.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_94.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_422.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_317.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_392.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_231.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_750.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_546.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_569.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_195.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_141.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_270.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_116.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_752.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_447.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_305.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_59.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_474.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_591.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_482.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_25.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_361.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_85.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_346.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_490.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_311.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_239.jpg +./16--Award_Ceremony/16_Award_Ceremony_Awards_Ceremony_16_73.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_555.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_705.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_491.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_708.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_486.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_63.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_865.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_722.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_256.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_749.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_107.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_490.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_746.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_554.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_778.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_911.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_988.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_343.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_160.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_77.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_8.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_552.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_122.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_43.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_696.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_823.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_979.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_819.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_40.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_95.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_264.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_525.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_482.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_397.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_861.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_914.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_932.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_862.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_840.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_533.jpg +./30--Surgeons/30_Surgeons_Surgeons_30_115.jpg +./19--Couple/19_Couple_Couple_19_631.jpg +./19--Couple/19_Couple_Couple_19_106.jpg +./19--Couple/19_Couple_Couple_19_88.jpg +./19--Couple/19_Couple_Couple_19_317.jpg +./19--Couple/19_Couple_Couple_19_873.jpg +./19--Couple/19_Couple_Couple_19_254.jpg +./19--Couple/19_Couple_Couple_19_125.jpg +./19--Couple/19_Couple_Couple_19_770.jpg +./19--Couple/19_Couple_Couple_19_548.jpg +./19--Couple/19_Couple_Couple_19_667.jpg +./19--Couple/19_Couple_Couple_19_688.jpg +./19--Couple/19_Couple_Couple_19_156.jpg +./19--Couple/19_Couple_Couple_19_301.jpg +./19--Couple/19_Couple_Couple_19_835.jpg +./19--Couple/19_Couple_Couple_19_847.jpg +./19--Couple/19_Couple_Couple_19_110.jpg +./19--Couple/19_Couple_Couple_19_514.jpg +./19--Couple/19_Couple_Couple_19_936.jpg +./19--Couple/19_Couple_Couple_19_24.jpg +./19--Couple/19_Couple_Couple_19_86.jpg +./19--Couple/19_Couple_Couple_19_325.jpg +./19--Couple/19_Couple_Couple_19_31.jpg +./19--Couple/19_Couple_Couple_19_509.jpg +./19--Couple/19_Couple_Couple_19_139.jpg +./19--Couple/19_Couple_Couple_19_881.jpg +./19--Couple/19_Couple_Couple_19_832.jpg +./19--Couple/19_Couple_Couple_19_1014.jpg +./19--Couple/19_Couple_Couple_19_349.jpg +./19--Couple/19_Couple_Couple_19_743.jpg +./19--Couple/19_Couple_Couple_19_90.jpg +./19--Couple/19_Couple_Couple_19_810.jpg +./19--Couple/19_Couple_Couple_19_836.jpg +./19--Couple/19_Couple_Couple_19_50.jpg +./19--Couple/19_Couple_Couple_19_319.jpg +./19--Couple/19_Couple_Couple_19_822.jpg +./19--Couple/19_Couple_Couple_19_910.jpg +./22--Picnic/22_Picnic_Picnic_22_541.jpg +./22--Picnic/22_Picnic_Picnic_22_10.jpg +./22--Picnic/22_Picnic_Picnic_22_152.jpg +./22--Picnic/22_Picnic_Picnic_22_594.jpg +./22--Picnic/22_Picnic_Picnic_22_688.jpg +./22--Picnic/22_Picnic_Picnic_22_654.jpg +./22--Picnic/22_Picnic_Picnic_22_732.jpg +./22--Picnic/22_Picnic_Picnic_22_933.jpg +./22--Picnic/22_Picnic_Picnic_22_36.jpg +./22--Picnic/22_Picnic_Picnic_22_241.jpg +./22--Picnic/22_Picnic_Picnic_22_444.jpg +./22--Picnic/22_Picnic_Picnic_22_483.jpg +./22--Picnic/22_Picnic_Picnic_22_308.jpg +./22--Picnic/22_Picnic_Picnic_22_290.jpg +./22--Picnic/22_Picnic_Picnic_22_561.jpg +./22--Picnic/22_Picnic_Picnic_22_928.jpg +./22--Picnic/22_Picnic_Picnic_22_357.jpg +./22--Picnic/22_Picnic_Picnic_22_140.jpg +./22--Picnic/22_Picnic_Picnic_22_313.jpg +./22--Picnic/22_Picnic_Picnic_22_310.jpg +./22--Picnic/22_Picnic_Picnic_22_564.jpg +./22--Picnic/22_Picnic_Picnic_22_354.jpg +./22--Picnic/22_Picnic_Picnic_22_208.jpg +./22--Picnic/22_Picnic_Picnic_22_537.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_1037.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_264.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_372.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_901.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_633.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_368.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_67.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_644.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_812.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_268.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_601.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_133.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_405.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_890.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_431.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_702.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_10.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_763.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_129.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_931.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_95.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_315.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_523.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_254.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_540.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_703.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_824.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_887.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_904.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_329.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_691.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_281.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_15.jpg +./24--Soldier_Firing/24_Soldier_Firing_Soldier_Firing_24_115.jpg +./53--Raid/53_Raid_policeraid_53_368.jpg +./53--Raid/53_Raid_policeraid_53_489.jpg +./53--Raid/53_Raid_policeraid_53_92.jpg +./53--Raid/53_Raid_policeraid_53_860.jpg +./53--Raid/53_Raid_policeraid_53_385.jpg +./53--Raid/53_Raid_policeraid_53_107.jpg +./53--Raid/53_Raid_policeraid_53_619.jpg +./53--Raid/53_Raid_policeraid_53_207.jpg +./53--Raid/53_Raid_policeraid_53_696.jpg +./53--Raid/53_Raid_policeraid_53_178.jpg +./53--Raid/53_Raid_policeraid_53_208.jpg +./53--Raid/53_Raid_policeraid_53_171.jpg +./53--Raid/53_Raid_policeraid_53_854.jpg +./53--Raid/53_Raid_policeraid_53_736.jpg +./53--Raid/53_Raid_policeraid_53_340.jpg +./53--Raid/53_Raid_policeraid_53_674.jpg +./53--Raid/53_Raid_policeraid_53_14.jpg +./53--Raid/53_Raid_policeraid_53_928.jpg +./53--Raid/53_Raid_policeraid_53_599.jpg +./53--Raid/53_Raid_policeraid_53_43.jpg +./53--Raid/53_Raid_policeraid_53_827.jpg +./53--Raid/53_Raid_policeraid_53_858.jpg +./53--Raid/53_Raid_policeraid_53_364.jpg +./53--Raid/53_Raid_policeraid_53_445.jpg +./53--Raid/53_Raid_policeraid_53_574.jpg +./53--Raid/53_Raid_policeraid_53_272.jpg +./53--Raid/53_Raid_policeraid_53_471.jpg +./53--Raid/53_Raid_policeraid_53_47.jpg +./53--Raid/53_Raid_policeraid_53_951.jpg +./53--Raid/53_Raid_policeraid_53_649.jpg +./53--Raid/53_Raid_policeraid_53_396.jpg +./53--Raid/53_Raid_policeraid_53_543.jpg +./53--Raid/53_Raid_policeraid_53_458.jpg +./53--Raid/53_Raid_policeraid_53_6.jpg +./53--Raid/53_Raid_policeraid_53_770.jpg +./53--Raid/53_Raid_policeraid_53_597.jpg +./53--Raid/53_Raid_policeraid_53_555.jpg +./53--Raid/53_Raid_policeraid_53_805.jpg +./53--Raid/53_Raid_policeraid_53_829.jpg +./53--Raid/53_Raid_policeraid_53_438.jpg +./53--Raid/53_Raid_policeraid_53_212.jpg +./53--Raid/53_Raid_policeraid_53_280.jpg +./53--Raid/53_Raid_policeraid_53_256.jpg +./53--Raid/53_Raid_policeraid_53_686.jpg +./53--Raid/53_Raid_policeraid_53_54.jpg +./38--Tennis/38_Tennis_Tennis_38_580.jpg +./38--Tennis/38_Tennis_Tennis_38_319.jpg +./38--Tennis/38_Tennis_Tennis_38_531.jpg +./38--Tennis/38_Tennis_Tennis_38_332.jpg +./38--Tennis/38_Tennis_Tennis_38_497.jpg +./38--Tennis/38_Tennis_Tennis_38_604.jpg +./38--Tennis/38_Tennis_Tennis_38_717.jpg +./38--Tennis/38_Tennis_Tennis_38_507.jpg +./38--Tennis/38_Tennis_Tennis_38_232.jpg +./38--Tennis/38_Tennis_Tennis_38_420.jpg +./38--Tennis/38_Tennis_Tennis_38_182.jpg +./38--Tennis/38_Tennis_Tennis_38_535.jpg +./38--Tennis/38_Tennis_Tennis_38_230.jpg +./38--Tennis/38_Tennis_Tennis_38_592.jpg +./38--Tennis/38_Tennis_Tennis_38_501.jpg +./38--Tennis/38_Tennis_Tennis_38_754.jpg +./38--Tennis/38_Tennis_Tennis_38_94.jpg +./38--Tennis/38_Tennis_Tennis_38_558.jpg +./38--Tennis/38_Tennis_Tennis_38_131.jpg +./38--Tennis/38_Tennis_Tennis_38_18.jpg +./38--Tennis/38_Tennis_Tennis_38_23.jpg +./38--Tennis/38_Tennis_Tennis_38_323.jpg +./38--Tennis/38_Tennis_Tennis_38_692.jpg +./38--Tennis/38_Tennis_Tennis_38_371.jpg +./38--Tennis/38_Tennis_Tennis_38_452.jpg +./38--Tennis/38_Tennis_Tennis_38_666.jpg +./38--Tennis/38_Tennis_Tennis_38_81.jpg +./38--Tennis/38_Tennis_Tennis_38_240.jpg +./38--Tennis/38_Tennis_Tennis_38_142.jpg +./38--Tennis/38_Tennis_Tennis_38_128.jpg +./38--Tennis/38_Tennis_Tennis_38_758.jpg +./38--Tennis/38_Tennis_Tennis_38_40.jpg +./38--Tennis/38_Tennis_Tennis_38_485.jpg +./38--Tennis/38_Tennis_Tennis_38_300.jpg +./38--Tennis/38_Tennis_Tennis_38_683.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_223.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_339.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_295.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_189.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_639.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_320.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_375.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_349.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_406.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_774.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_287.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_468.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_26.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_441.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_574.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_702.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_573.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_560.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_71.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_587.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_176.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_807.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_385.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_102.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_663.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_865.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_644.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_507.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_206.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_108.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_633.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_250.jpg +./11--Meeting/11_Meeting_Meeting_11_Meeting_Meeting_11_529.jpg +./36--Football/36_Football_americanfootball_ball_36_81.jpg +./36--Football/36_Football_americanfootball_ball_36_327.jpg +./36--Football/36_Football_Football_36_194.jpg +./36--Football/36_Football_americanfootball_ball_36_126.jpg +./36--Football/36_Football_Football_36_23.jpg +./36--Football/36_Football_americanfootball_ball_36_631.jpg +./36--Football/36_Football_americanfootball_ball_36_234.jpg +./36--Football/36_Football_americanfootball_ball_36_681.jpg +./36--Football/36_Football_americanfootball_ball_36_16.jpg +./36--Football/36_Football_americanfootball_ball_36_162.jpg +./36--Football/36_Football_americanfootball_ball_36_358.jpg +./36--Football/36_Football_americanfootball_ball_36_396.jpg +./36--Football/36_Football_americanfootball_ball_36_321.jpg +./36--Football/36_Football_americanfootball_ball_36_693.jpg +./36--Football/36_Football_americanfootball_ball_36_38.jpg +./36--Football/36_Football_americanfootball_ball_36_853.jpg +./36--Football/36_Football_Football_36_202.jpg +./36--Football/36_Football_americanfootball_ball_36_487.jpg +./36--Football/36_Football_americanfootball_ball_36_27.jpg +./36--Football/36_Football_americanfootball_ball_36_132.jpg +./36--Football/36_Football_Football_36_157.jpg +./36--Football/36_Football_Football_36_62.jpg +./36--Football/36_Football_americanfootball_ball_36_373.jpg +./36--Football/36_Football_Football_36_110.jpg +./36--Football/36_Football_americanfootball_ball_36_301.jpg +./36--Football/36_Football_americanfootball_ball_36_257.jpg +./36--Football/36_Football_americanfootball_ball_36_25.jpg +./36--Football/36_Football_americanfootball_ball_36_615.jpg +./36--Football/36_Football_americanfootball_ball_36_265.jpg +./36--Football/36_Football_Football_36_108.jpg +./36--Football/36_Football_americanfootball_ball_36_6.jpg +./36--Football/36_Football_americanfootball_ball_36_111.jpg +./36--Football/36_Football_americanfootball_ball_36_273.jpg +./36--Football/36_Football_Football_36_138.jpg +./36--Football/36_Football_americanfootball_ball_36_647.jpg +./36--Football/36_Football_americanfootball_ball_36_114.jpg +./36--Football/36_Football_americanfootball_ball_36_510.jpg +./36--Football/36_Football_americanfootball_ball_36_456.jpg +./36--Football/36_Football_americanfootball_ball_36_279.jpg +./36--Football/36_Football_americanfootball_ball_36_526.jpg +./36--Football/36_Football_Football_36_80.jpg +./36--Football/36_Football_americanfootball_ball_36_1021.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_453.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_663.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_193.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_294.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_835.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_556.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_360.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_339.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_775.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_672.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_599.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_483.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_540.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_108.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_411.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_100.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_374.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_750.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_579.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_255.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_109.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_493.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_318.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_914.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_636.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_90.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_1026.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_427.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_702.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_760.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_843.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_64.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_277.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_272.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_27.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_1037.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_72.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_1015.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_22.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_799.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_326.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_33.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_101.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_1003.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_412.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_849.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_648.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_87.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_544.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_227.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_730.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_739.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_282.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_387.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_447.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_62.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_696.jpg +./20--Family_Group/20_Family_Group_Family_Group_20_759.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_479.jpg +./35--Basketball/35_Basketball_Basketball_35_457.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_736.jpg +./35--Basketball/35_Basketball_playingbasketball_35_19.jpg +./35--Basketball/35_Basketball_Basketball_35_684.jpg +./35--Basketball/35_Basketball_playingbasketball_35_566.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_391.jpg +./35--Basketball/35_Basketball_playingbasketball_35_612.jpg +./35--Basketball/35_Basketball_playingbasketball_35_682.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_254.jpg +./35--Basketball/35_Basketball_playingbasketball_35_405.jpg +./35--Basketball/35_Basketball_playingbasketball_35_113.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_192.jpg +./35--Basketball/35_Basketball_playingbasketball_35_730.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_393.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_124.jpg +./35--Basketball/35_Basketball_playingbasketball_35_794.jpg +./35--Basketball/35_Basketball_playingbasketball_35_449.jpg +./35--Basketball/35_Basketball_playingbasketball_35_199.jpg +./35--Basketball/35_Basketball_playingbasketball_35_636.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_256.jpg +./35--Basketball/35_Basketball_Basketball_35_361.jpg +./35--Basketball/35_Basketball_Basketball_35_791.jpg +./35--Basketball/35_Basketball_playingbasketball_35_251.jpg +./35--Basketball/35_Basketball_playingbasketball_35_495.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_153.jpg +./35--Basketball/35_Basketball_playingbasketball_35_36.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_341.jpg +./35--Basketball/35_Basketball_playingbasketball_35_651.jpg +./35--Basketball/35_Basketball_Basketball_35_180.jpg +./35--Basketball/35_Basketball_Basketball_35_549.jpg +./35--Basketball/35_Basketball_Basketball_35_449.jpg +./35--Basketball/35_Basketball_playingbasketball_35_556.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_50.jpg +./35--Basketball/35_Basketball_playingbasketball_35_433.jpg +./35--Basketball/35_Basketball_playingbasketball_35_127.jpg +./35--Basketball/35_Basketball_playingbasketball_35_764.jpg +./35--Basketball/35_Basketball_playingbasketball_35_276.jpg +./35--Basketball/35_Basketball_playingbasketball_35_248.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_80.jpg +./35--Basketball/35_Basketball_playingbasketball_35_588.jpg +./35--Basketball/35_Basketball_playingbasketball_35_491.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_178.jpg +./35--Basketball/35_Basketball_Basketball_35_107.jpg +./35--Basketball/35_Basketball_playingbasketball_35_209.jpg +./35--Basketball/35_Basketball_playingbasketball_35_252.jpg +./35--Basketball/35_Basketball_playingbasketball_35_366.jpg +./35--Basketball/35_Basketball_Basketball_35_185.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_133.jpg +./35--Basketball/35_Basketball_playingbasketball_35_65.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_937.jpg +./35--Basketball/35_Basketball_Basketball_35_664.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_478.jpg +./35--Basketball/35_Basketball_playingbasketball_35_431.jpg +./35--Basketball/35_Basketball_Basketball_35_737.jpg +./35--Basketball/35_Basketball_Basketball_35_209.jpg +./35--Basketball/35_Basketball_Basketball_35_158.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_201.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_389.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_375.jpg +./35--Basketball/35_Basketball_playingbasketball_35_362.jpg +./35--Basketball/35_Basketball_playingbasketball_35_795.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_858.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_513.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_309.jpg +./35--Basketball/35_Basketball_playingbasketball_35_350.jpg +./35--Basketball/35_Basketball_playingbasketball_35_523.jpg +./35--Basketball/35_Basketball_playingbasketball_35_417.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_542.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_429.jpg +./35--Basketball/35_Basketball_playingbasketball_35_732.jpg +./35--Basketball/35_Basketball_Basketball_35_754.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_998.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_82.jpg +./35--Basketball/35_Basketball_playingbasketball_35_3.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_64.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_290.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_904.jpg +./35--Basketball/35_Basketball_playingbasketball_35_632.jpg +./35--Basketball/35_Basketball_playingbasketball_35_476.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_197.jpg +./35--Basketball/35_Basketball_playingbasketball_35_585.jpg +./35--Basketball/35_Basketball_Basketball_35_458.jpg +./35--Basketball/35_Basketball_Basketball_35_712.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_287.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_460.jpg +./35--Basketball/35_Basketball_playingbasketball_35_823.jpg +./35--Basketball/35_Basketball_Basketball_35_393.jpg +./35--Basketball/35_Basketball_playingbasketball_35_876.jpg +./35--Basketball/35_Basketball_playingbasketball_35_2.jpg +./35--Basketball/35_Basketball_playingbasketball_35_91.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_216.jpg +./35--Basketball/35_Basketball_playingbasketball_35_511.jpg +./35--Basketball/35_Basketball_playingbasketball_35_195.jpg +./35--Basketball/35_Basketball_playingbasketball_35_73.jpg +./35--Basketball/35_Basketball_playingbasketball_35_78.jpg +./35--Basketball/35_Basketball_playingbasketball_35_644.jpg +./35--Basketball/35_Basketball_playingbasketball_35_219.jpg +./35--Basketball/35_Basketball_playingbasketball_35_818.jpg +./35--Basketball/35_Basketball_Basketball_35_653.jpg +./35--Basketball/35_Basketball_playingbasketball_35_782.jpg +./35--Basketball/35_Basketball_playingbasketball_35_134.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_565.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_276.jpg +./35--Basketball/35_Basketball_playingbasketball_35_582.jpg +./35--Basketball/35_Basketball_Basketball_35_529.jpg +./35--Basketball/35_Basketball_Basketball_35_801.jpg +./35--Basketball/35_Basketball_Basketball_35_304.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_709.jpg +./35--Basketball/35_Basketball_playingbasketball_35_377.jpg +./35--Basketball/35_Basketball_playingbasketball_35_619.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_208.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_681.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_423.jpg +./35--Basketball/35_Basketball_Basketball_35_635.jpg +./35--Basketball/35_Basketball_playingbasketball_35_156.jpg +./35--Basketball/35_Basketball_playingbasketball_35_674.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_662.jpg +./35--Basketball/35_Basketball_playingbasketball_35_606.jpg +./35--Basketball/35_Basketball_playingbasketball_35_283.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_446.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_689.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_827.jpg +./35--Basketball/35_Basketball_playingbasketball_35_135.jpg +./35--Basketball/35_Basketball_playingbasketball_35_279.jpg +./35--Basketball/35_Basketball_Basketball_35_579.jpg +./35--Basketball/35_Basketball_playingbasketball_35_555.jpg +./35--Basketball/35_Basketball_basketballgame_ball_35_412.jpg +./35--Basketball/35_Basketball_Basketball_35_327.jpg +./35--Basketball/35_Basketball_playingbasketball_35_13.jpg +./35--Basketball/35_Basketball_playingbasketball_35_11.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_301.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_125.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_1024.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_341.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_717.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_234.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_251.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_287.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_907.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_13.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_839.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_688.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_940.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_133.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_758.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_429.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_563.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_881.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_1.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_956.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_81.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_458.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_797.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_93.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_1047.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_51.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_341.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_538.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_438.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_726.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_565.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_757.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_500.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_372.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_942.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_325.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_547.jpg +./43--Row_Boat/43_Row_Boat_Rowboat_43_106.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_227.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_784.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_276.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_842.jpg +./43--Row_Boat/43_Row_Boat_Canoe_43_1048.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_437.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_300.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_25.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_252.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_529.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_118.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_173.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_69.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_218.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_76.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_451.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_266.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_347.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_297.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_357.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_433.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_269.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_498.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_120.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_157.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_236.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_620.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_84.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_452.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_412.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_174.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_113.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_133.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_553.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_244.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_148.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_86.jpg +./8--Election_Campain/8_Election_Campain_Election_Campaign_8_32.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_173.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_36.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_1020.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_27.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_848.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_404.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_934.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_60.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_716.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_1024.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_496.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_898.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_395.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_401.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_277.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_373.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_316.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_259.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_256.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_433.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_591.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_1046.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_935.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_499.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_447.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_938.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_430.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_40.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_678.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_307.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_236.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_2.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_269.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_191.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_240.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_933.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_239.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_368.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_524.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_162.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_619.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_944.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_171.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_668.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_674.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_823.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_69.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_577.jpg +./10--People_Marching/10_People_Marching_People_Marching_10_People_Marching_People_Marching_10_552.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_793.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_34.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_822.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_498.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_131.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_514.jpg +./10--People_Marching/10_People_Marching_People_Marching_2_638.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_380.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_424.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_343.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_764.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_365.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_523.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_183.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_637.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_643.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_708.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_773.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_932.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_640.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_12.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_154.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_45.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_721.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_726.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_786.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_50.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_531.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_414.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_686.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_263.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_569.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_196.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_951.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_298.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_155.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_789.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_325.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_648.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_130.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_867.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_218.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_588.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_809.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_770.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_1027.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_245.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_516.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_122.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_801.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_838.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_305.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_499.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_1015.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_774.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_1013.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_894.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_466.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_188.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_711.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_208.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_859.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_179.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_620.jpg +./55--Sports_Coach_Trainer/55_Sports_Coach_Trainer_sportcoaching_55_181.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_595.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_204.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_932.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_462.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_723.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_44.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_987.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_812.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_870.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_170.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_530.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_529.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_116.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_468.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_135.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_944.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_594.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_1039.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_860.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_169.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_434.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_26.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_90.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_512.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_209.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_738.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_68.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_788.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_786.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_566.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_658.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_101.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_408.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_357.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_692.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_134.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_494.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_262.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_516.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_110.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_1038.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_624.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_42.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_400.jpg +./32--Worker_Laborer/32_Worker_Laborer_Worker_Laborer_32_443.jpg +./33--Running/33_Running_Running_33_891.jpg +./33--Running/33_Running_Running_33_35.jpg +./33--Running/33_Running_Running_33_517.jpg +./33--Running/33_Running_Running_33_747.jpg +./33--Running/33_Running_Running_33_577.jpg +./33--Running/33_Running_Running_33_760.jpg +./33--Running/33_Running_Running_33_586.jpg +./33--Running/33_Running_Running_33_316.jpg +./33--Running/33_Running_Running_33_209.jpg +./33--Running/33_Running_Running_33_538.jpg +./33--Running/33_Running_Running_33_569.jpg +./33--Running/33_Running_Running_33_203.jpg +./33--Running/33_Running_Running_33_490.jpg +./33--Running/33_Running_Running_33_771.jpg +./33--Running/33_Running_Running_33_475.jpg +./33--Running/33_Running_Running_33_266.jpg +./33--Running/33_Running_Running_33_17.jpg +./33--Running/33_Running_Running_33_411.jpg +./33--Running/33_Running_Running_33_341.jpg +./33--Running/33_Running_Running_33_119.jpg +./33--Running/33_Running_Running_33_332.jpg +./33--Running/33_Running_Running_33_547.jpg +./33--Running/33_Running_Running_33_786.jpg +./33--Running/33_Running_Running_33_44.jpg +./33--Running/33_Running_Running_33_286.jpg +./33--Running/33_Running_Running_33_107.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_50.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_685.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_322.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_171.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_276.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_736.jpg +./12--Group/12_Group_Group_12_Group_Group_12_411.jpg +./12--Group/12_Group_Group_12_Group_Group_12_912.jpg +./12--Group/12_Group_Group_12_Group_Group_12_759.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_853.jpg +./12--Group/12_Group_Group_12_Group_Group_12_728.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_212.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_504.jpg +./12--Group/12_Group_Group_12_Group_Group_12_62.jpg +./12--Group/12_Group_Group_12_Group_Group_12_315.jpg +./12--Group/12_Group_Group_12_Group_Group_12_165.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_527.jpg +./12--Group/12_Group_Group_12_Group_Group_12_268.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_196.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_855.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_458.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_107.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_270.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_771.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_283.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_295.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_662.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_253.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_1007.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_789.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_313.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_228.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_707.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_67.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_213.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_115.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_850.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_315.jpg +./12--Group/12_Group_Group_12_Group_Group_12_407.jpg +./12--Group/12_Group_Group_12_Group_Group_12_179.jpg +./12--Group/12_Group_Group_12_Group_Group_12_247.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_13.jpg +./12--Group/12_Group_Group_12_Group_Group_12_80.jpg +./12--Group/12_Group_Group_12_Group_Group_12_144.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_996.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_94.jpg +./12--Group/12_Group_Group_12_Group_Group_12_218.jpg +./12--Group/12_Group_Group_12_Group_Group_12_772.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_72.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_103.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_28.jpg +./12--Group/12_Group_Group_12_Group_Group_12_101.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_550.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_500.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_613.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_126.jpg +./12--Group/12_Group_Group_12_Group_Group_12_610.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_55.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_244.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_653.jpg +./12--Group/12_Group_Group_12_Group_Group_12_578.jpg +./12--Group/12_Group_Group_12_Group_Group_12_301.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_942.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_274.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_64.jpg +./12--Group/12_Group_Group_12_Group_Group_12_331.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_650.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_186.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_143.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_83.jpg +./12--Group/12_Group_Group_12_Group_Group_12_735.jpg +./12--Group/12_Group_Group_12_Group_Group_12_478.jpg +./12--Group/12_Group_Group_12_Group_Group_12_293.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_557.jpg +./12--Group/12_Group_Group_12_Group_Group_12_112.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_340.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_551.jpg +./12--Group/12_Group_Group_12_Group_Group_12_249.jpg +./12--Group/12_Group_Group_12_Group_Group_12_182.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_852.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_43.jpg +./12--Group/12_Group_Group_12_Group_Group_12_10.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_644.jpg +./12--Group/12_Group_Group_12_Group_Group_12_367.jpg +./12--Group/12_Group_Group_12_Group_Group_12_28.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_286.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_868.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_330.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_927.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_364.jpg +./12--Group/12_Group_Group_12_Group_Group_12_823.jpg +./12--Group/12_Group_Group_12_Group_Group_12_198.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_354.jpg +./12--Group/12_Group_Group_12_Group_Group_12_519.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_778.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_418.jpg +./12--Group/12_Group_Group_12_Group_Group_12_417.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_461.jpg +./12--Group/12_Group_Group_12_Group_Group_12_253.jpg +./12--Group/12_Group_Group_12_Group_Group_12_354.jpg +./12--Group/12_Group_Group_12_Group_Group_12_123.jpg +./12--Group/12_Group_Group_12_Group_Group_12_227.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_120.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_60.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_503.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_759.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_602.jpg +./12--Group/12_Group_Group_12_Group_Group_12_59.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_551.jpg +./12--Group/12_Group_Group_12_Group_Group_12_732.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_849.jpg +./12--Group/12_Group_Group_12_Group_Group_12_434.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_353.jpg +./12--Group/12_Group_Group_12_Group_Group_12_153.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_162.jpg +./12--Group/12_Group_Group_12_Group_Group_12_84.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_403.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_21.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_379.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_15.jpg +./12--Group/12_Group_Group_12_Group_Group_12_794.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_162.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_86.jpg +./12--Group/12_Group_Group_12_Group_Group_12_522.jpg +./12--Group/12_Group_Group_12_Group_Group_12_843.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_536.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_235.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_607.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_442.jpg +./12--Group/12_Group_Group_12_Group_Group_12_935.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_461.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_412.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_617.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_112.jpg +./12--Group/12_Group_Group_12_Group_Group_12_379.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_138.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_583.jpg +./12--Group/12_Group_Group_12_Group_Group_12_38.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_889.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_448.jpg +./12--Group/12_Group_Group_12_Group_Group_12_29.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_319.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_211.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_495.jpg +./12--Group/12_Group_Team_Organized_Group_12_Group_Team_Organized_Group_12_101.jpg +./12--Group/12_Group_Large_Group_12_Group_Large_Group_12_575.jpg +./34--Baseball/34_Baseball_Baseball_34_16.jpg +./34--Baseball/34_Baseball_Baseball_34_73.jpg +./34--Baseball/34_Baseball_Baseball_34_436.jpg +./34--Baseball/34_Baseball_Baseball_34_356.jpg +./34--Baseball/34_Baseball_Baseball_34_895.jpg +./34--Baseball/34_Baseball_Baseball_34_667.jpg +./34--Baseball/34_Baseball_Baseball_34_580.jpg +./34--Baseball/34_Baseball_Baseball_34_622.jpg +./34--Baseball/34_Baseball_Baseball_34_350.jpg +./34--Baseball/34_Baseball_Baseball_34_600.jpg +./34--Baseball/34_Baseball_Baseball_34_391.jpg +./34--Baseball/34_Baseball_Baseball_34_585.jpg +./34--Baseball/34_Baseball_Baseball_34_756.jpg +./34--Baseball/34_Baseball_Baseball_34_608.jpg +./34--Baseball/34_Baseball_Baseball_34_886.jpg +./34--Baseball/34_Baseball_Baseball_34_66.jpg +./34--Baseball/34_Baseball_Baseball_34_829.jpg +./34--Baseball/34_Baseball_Baseball_34_828.jpg +./34--Baseball/34_Baseball_Baseball_34_560.jpg +./34--Baseball/34_Baseball_Baseball_34_171.jpg +./34--Baseball/34_Baseball_Baseball_34_164.jpg +./34--Baseball/34_Baseball_Baseball_34_127.jpg +./34--Baseball/34_Baseball_Baseball_34_143.jpg +./34--Baseball/34_Baseball_Baseball_34_867.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_111.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_420.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_93.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_465.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_162.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_484.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_613.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_720.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_276.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_327.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_685.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_188.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_740.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_176.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_34.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_769.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_220.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_888.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_225.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_43.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_726.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_818.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_339.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_572.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_212.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_683.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_932.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_351.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_304.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_214.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_858.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_667.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_267.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_722.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_373.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_517.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_847.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_358.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_227.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_230.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_21.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_927.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_118.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_742.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_195.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_788.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_215.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_915.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_200.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_410.jpg +./31--Waiter_Waitress/31_Waiter_Waitress_Waiter_Waitress_31_842.jpg +./42--Car_Racing/42_Car_Racing_Nascar_42_922.jpg +./42--Car_Racing/42_Car_Racing_Nascar_42_828.jpg +./42--Car_Racing/42_Car_Racing_Car_Racing_42_263.jpg +./42--Car_Racing/42_Car_Racing_Nascar_42_900.jpg +./42--Car_Racing/42_Car_Racing_Nascar_42_650.jpg +./42--Car_Racing/42_Car_Racing_Car_Racing_42_857.jpg +./42--Car_Racing/42_Car_Racing_Nascar_42_440.jpg +./42--Car_Racing/42_Car_Racing_Car_Racing_42_906.jpg +./42--Car_Racing/42_Car_Racing_Nascar_42_482.jpg +./42--Car_Racing/42_Car_Racing_Car_Racing_42_1045.jpg +./42--Car_Racing/42_Car_Racing_Car_Racing_42_939.jpg +./42--Car_Racing/42_Car_Racing_Car_Racing_42_602.jpg +./42--Car_Racing/42_Car_Racing_Nascar_42_661.jpg +./42--Car_Racing/42_Car_Racing_Nascar_42_911.jpg +./42--Car_Racing/42_Car_Racing_Nascar_42_823.jpg +./42--Car_Racing/42_Car_Racing_Car_Racing_42_743.jpg +./42--Car_Racing/42_Car_Racing_Car_Racing_42_600.jpg +./42--Car_Racing/42_Car_Racing_Nascar_42_462.jpg +./42--Car_Racing/42_Car_Racing_Nascar_42_468.jpg +./42--Car_Racing/42_Car_Racing_Nascar_42_442.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_401.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_47.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_364.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_389.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_659.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_642.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_255.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_420.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_274.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_869.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_1022.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_596.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_580.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_521.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_894.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_776.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_845.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_171.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_950.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_740.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_115.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_197.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_48.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_612.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_361.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_920.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_138.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_638.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_24.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_698.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_484.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_566.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_805.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_156.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_887.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_771.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_488.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_161.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_668.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_460.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_762.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_727.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_980.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_108.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_783.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_1044.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_273.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_911.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_1035.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_175.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_749.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_285.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_492.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_422.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_593.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_646.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_1043.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_57.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_891.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_242.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_609.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_331.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_260.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_945.jpg +./40--Gymnastics/40_Gymnastics_Gymnastics_40_627.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_678.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_1015.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_72.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_814.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_656.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_357.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_652.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_75.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_321.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_695.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_537.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_841.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_892.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_785.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_596.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_205.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_439.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_122.jpg +./48--Parachutist_Paratrooper/48_Parachutist_Paratrooper_Parachutist_Paratrooper_48_164.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_345.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_213.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_788.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_18.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_464.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_170.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_488.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_720.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_647.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_134.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_17.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_374.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_165.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_173.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_749.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_282.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_19.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_33.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_827.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_752.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_595.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_144.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_323.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_396.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_43.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_735.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_645.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_493.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_326.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_790.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_249.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_408.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_654.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_449.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_410.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_649.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_432.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_303.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_609.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_196.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_283.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_679.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_764.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_715.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_189.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_180.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_houseparty_50_641.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_479.jpg +./50--Celebration_Or_Party/50_Celebration_Or_Party_birthdayparty_50_75.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_611.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_794.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_344.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_583.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_616.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_416.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_819.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_156.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_200.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_875.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_529.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_169.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_495.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_458.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_661.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_658.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_252.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_276.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_163.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_546.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_310.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_595.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_825.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_417.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_696.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_388.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_817.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_103.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_176.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_504.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_249.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_440.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_138.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_777.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_348.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_541.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_351.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_486.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_354.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_668.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_1026.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_1029.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_77.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_943.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_272.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_682.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_438.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_487.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_568.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_1047.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_87.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_855.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_751.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_765.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_275.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_121.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_349.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_359.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_270.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_389.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_992.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_44.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_283.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_382.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_869.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_81.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_362.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_908.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_1000.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_119.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_495.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_941.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_463.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_901.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_203.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_342.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_778.jpg +./39--Ice_Skating/39_Ice_Skating_Ice_Skating_39_793.jpg +./39--Ice_Skating/39_Ice_Skating_iceskiing_39_591.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_142.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_939.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_1028.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_531.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_160.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_550.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_974.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_86.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_149.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_769.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_211.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_134.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_518.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_402.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_615.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_207.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_692.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_857.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_685.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_225.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_369.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_838.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_118.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_273.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_508.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_733.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_107.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_217.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_416.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_936.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_277.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_434.jpg +./45--Balloonist/45_Balloonist_Balloonist_45_186.jpg +./51--Dresses/51_Dresses_wearingdress_51_178.jpg +./51--Dresses/51_Dresses_wearingdress_51_113.jpg +./51--Dresses/51_Dresses_wearingdress_51_904.jpg +./51--Dresses/51_Dresses_wearingdress_51_377.jpg +./51--Dresses/51_Dresses_wearingdress_51_340.jpg +./51--Dresses/51_Dresses_wearingdress_51_268.jpg +./51--Dresses/51_Dresses_wearingdress_51_691.jpg +./51--Dresses/51_Dresses_wearingdress_51_748.jpg +./51--Dresses/51_Dresses_wearingdress_51_465.jpg +./51--Dresses/51_Dresses_wearingdress_51_606.jpg +./51--Dresses/51_Dresses_wearingdress_51_306.jpg +./51--Dresses/51_Dresses_wearingdress_51_610.jpg +./51--Dresses/51_Dresses_wearingdress_51_17.jpg +./51--Dresses/51_Dresses_wearingdress_51_280.jpg +./51--Dresses/51_Dresses_wearingdress_51_869.jpg +./51--Dresses/51_Dresses_wearingdress_51_77.jpg +./51--Dresses/51_Dresses_wearingdress_51_737.jpg +./51--Dresses/51_Dresses_wearingdress_51_672.jpg +./51--Dresses/51_Dresses_wearingdress_51_445.jpg +./51--Dresses/51_Dresses_wearingdress_51_7.jpg +./51--Dresses/51_Dresses_wearingdress_51_335.jpg +./51--Dresses/51_Dresses_wearingdress_51_741.jpg +./51--Dresses/51_Dresses_wearingdress_51_830.jpg +./51--Dresses/51_Dresses_wearingdress_51_183.jpg +./51--Dresses/51_Dresses_wearingdress_51_837.jpg +./51--Dresses/51_Dresses_wearingdress_51_1031.jpg +./51--Dresses/51_Dresses_wearingdress_51_588.jpg +./51--Dresses/51_Dresses_wearingdress_51_451.jpg +./51--Dresses/51_Dresses_wearingdress_51_140.jpg +./51--Dresses/51_Dresses_wearingdress_51_536.jpg +./51--Dresses/51_Dresses_wearingdress_51_105.jpg +./51--Dresses/51_Dresses_wearingdress_51_388.jpg +./51--Dresses/51_Dresses_wearingdress_51_883.jpg +./51--Dresses/51_Dresses_wearingdress_51_815.jpg +./51--Dresses/51_Dresses_wearingdress_51_13.jpg +./51--Dresses/51_Dresses_wearingdress_51_161.jpg +./51--Dresses/51_Dresses_wearingdress_51_96.jpg +./51--Dresses/51_Dresses_wearingdress_51_348.jpg +./51--Dresses/51_Dresses_wearingdress_51_94.jpg +./51--Dresses/51_Dresses_wearingdress_51_464.jpg +./51--Dresses/51_Dresses_wearingdress_51_1012.jpg +./51--Dresses/51_Dresses_wearingdress_51_633.jpg +./51--Dresses/51_Dresses_wearingdress_51_739.jpg +./51--Dresses/51_Dresses_wearingdress_51_492.jpg +./51--Dresses/51_Dresses_wearingdress_51_327.jpg +./51--Dresses/51_Dresses_wearingdress_51_727.jpg +./51--Dresses/51_Dresses_wearingdress_51_339.jpg +./51--Dresses/51_Dresses_wearingdress_51_763.jpg +./51--Dresses/51_Dresses_wearingdress_51_914.jpg +./51--Dresses/51_Dresses_wearingdress_51_549.jpg +./51--Dresses/51_Dresses_wearingdress_51_654.jpg +./51--Dresses/51_Dresses_wearingdress_51_221.jpg +./51--Dresses/51_Dresses_wearingdress_51_689.jpg +./51--Dresses/51_Dresses_wearingdress_51_789.jpg +./51--Dresses/51_Dresses_wearingdress_51_150.jpg +./51--Dresses/51_Dresses_wearingdress_51_139.jpg +./51--Dresses/51_Dresses_wearingdress_51_685.jpg +./51--Dresses/51_Dresses_wearingdress_51_512.jpg +./51--Dresses/51_Dresses_wearingdress_51_874.jpg +./51--Dresses/51_Dresses_wearingdress_51_414.jpg +./51--Dresses/51_Dresses_wearingdress_51_398.jpg +./51--Dresses/51_Dresses_wearingdress_51_386.jpg +./51--Dresses/51_Dresses_wearingdress_51_736.jpg +./51--Dresses/51_Dresses_wearingdress_51_599.jpg +./51--Dresses/51_Dresses_wearingdress_51_1041.jpg +./51--Dresses/51_Dresses_wearingdress_51_612.jpg +./51--Dresses/51_Dresses_wearingdress_51_226.jpg +./51--Dresses/51_Dresses_wearingdress_51_106.jpg +./51--Dresses/51_Dresses_wearingdress_51_1035.jpg +./51--Dresses/51_Dresses_wearingdress_51_670.jpg +./51--Dresses/51_Dresses_wearingdress_51_692.jpg +./51--Dresses/51_Dresses_wearingdress_51_580.jpg +./51--Dresses/51_Dresses_wearingdress_51_233.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_38.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_610.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_51.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_607.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_203.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_475.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_474.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_388.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_340.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_243.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_451.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_777.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_576.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_668.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_628.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_925.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_641.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_34.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_448.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_94.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_869.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_796.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_77.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_510.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_642.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_735.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_177.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_644.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_234.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_868.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_457.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_492.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_773.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_287.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_460.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_544.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_365.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_133.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_574.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_633.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_66.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_244.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_279.jpg +./5--Car_Accident/5_Car_Accident_Car_Crash_5_866.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_515.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_937.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_202.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_982.jpg +./5--Car_Accident/5_Car_Accident_Accident_5_948.jpg +./0--Parade/0_Parade_marchingband_1_234.jpg +./0--Parade/0_Parade_Parade_0_443.jpg +./0--Parade/0_Parade_Parade_0_472.jpg +./0--Parade/0_Parade_Parade_0_429.jpg +./0--Parade/0_Parade_marchingband_1_20.jpg +./0--Parade/0_Parade_Parade_0_468.jpg +./0--Parade/0_Parade_marchingband_1_104.jpg +./0--Parade/0_Parade_Parade_0_814.jpg +./0--Parade/0_Parade_marchingband_1_356.jpg +./0--Parade/0_Parade_Parade_0_275.jpg +./0--Parade/0_Parade_Parade_0_164.jpg +./0--Parade/0_Parade_Parade_0_664.jpg +./0--Parade/0_Parade_Parade_0_376.jpg +./0--Parade/0_Parade_Parade_0_559.jpg +./0--Parade/0_Parade_Parade_0_639.jpg +./0--Parade/0_Parade_marchingband_1_382.jpg +./0--Parade/0_Parade_marchingband_1_932.jpg +./0--Parade/0_Parade_marchingband_1_404.jpg +./0--Parade/0_Parade_Parade_0_490.jpg +./0--Parade/0_Parade_Parade_0_461.jpg +./0--Parade/0_Parade_marchingband_1_309.jpg +./0--Parade/0_Parade_marchingband_1_822.jpg +./0--Parade/0_Parade_Parade_0_377.jpg +./0--Parade/0_Parade_marchingband_1_517.jpg +./0--Parade/0_Parade_marchingband_1_359.jpg +./0--Parade/0_Parade_Parade_0_616.jpg +./0--Parade/0_Parade_marchingband_1_869.jpg +./0--Parade/0_Parade_Parade_0_125.jpg +./0--Parade/0_Parade_Parade_0_459.jpg +./0--Parade/0_Parade_Parade_0_611.jpg +./0--Parade/0_Parade_marchingband_1_227.jpg +./0--Parade/0_Parade_marchingband_1_379.jpg +./0--Parade/0_Parade_marchingband_1_1004.jpg +./0--Parade/0_Parade_marchingband_1_156.jpg +./0--Parade/0_Parade_Parade_0_757.jpg +./0--Parade/0_Parade_marchingband_1_172.jpg +./0--Parade/0_Parade_Parade_0_194.jpg +./0--Parade/0_Parade_marchingband_1_360.jpg +./0--Parade/0_Parade_marchingband_1_188.jpg +./0--Parade/0_Parade_Parade_0_829.jpg +./0--Parade/0_Parade_Parade_0_502.jpg +./0--Parade/0_Parade_Parade_0_628.jpg +./0--Parade/0_Parade_Parade_0_43.jpg +./0--Parade/0_Parade_Parade_0_519.jpg +./0--Parade/0_Parade_Parade_0_913.jpg +./0--Parade/0_Parade_Parade_0_470.jpg +./0--Parade/0_Parade_marchingband_1_439.jpg +./0--Parade/0_Parade_Parade_0_53.jpg +./0--Parade/0_Parade_marchingband_1_353.jpg +./0--Parade/0_Parade_Parade_0_317.jpg +./0--Parade/0_Parade_Parade_0_239.jpg +./0--Parade/0_Parade_marchingband_1_765.jpg +./0--Parade/0_Parade_marchingband_1_525.jpg +./0--Parade/0_Parade_Parade_0_286.jpg +./0--Parade/0_Parade_Parade_0_266.jpg +./0--Parade/0_Parade_marchingband_1_818.jpg +./0--Parade/0_Parade_marchingband_1_695.jpg +./0--Parade/0_Parade_marchingband_1_410.jpg +./0--Parade/0_Parade_Parade_0_887.jpg +./0--Parade/0_Parade_Parade_0_917.jpg +./0--Parade/0_Parade_marchingband_1_267.jpg +./0--Parade/0_Parade_Parade_0_901.jpg +./0--Parade/0_Parade_marchingband_1_445.jpg +./0--Parade/0_Parade_Parade_0_382.jpg +./0--Parade/0_Parade_Parade_0_960.jpg +./0--Parade/0_Parade_Parade_0_688.jpg +./0--Parade/0_Parade_Parade_0_205.jpg +./0--Parade/0_Parade_marchingband_1_74.jpg +./0--Parade/0_Parade_marchingband_1_768.jpg +./0--Parade/0_Parade_Parade_0_465.jpg +./0--Parade/0_Parade_Parade_0_906.jpg +./0--Parade/0_Parade_Parade_0_364.jpg +./0--Parade/0_Parade_Parade_0_246.jpg +./0--Parade/0_Parade_marchingband_1_746.jpg +./0--Parade/0_Parade_marchingband_1_465.jpg +./0--Parade/0_Parade_Parade_0_850.jpg +./0--Parade/0_Parade_Parade_0_29.jpg +./0--Parade/0_Parade_marchingband_1_147.jpg +./0--Parade/0_Parade_marchingband_1_488.jpg +./0--Parade/0_Parade_marchingband_1_78.jpg +./0--Parade/0_Parade_marchingband_1_149.jpg +./0--Parade/0_Parade_marchingband_1_606.jpg +./0--Parade/0_Parade_marchingband_1_710.jpg +./0--Parade/0_Parade_Parade_0_72.jpg +./0--Parade/0_Parade_marchingband_1_311.jpg +./0--Parade/0_Parade_marchingband_1_490.jpg +./0--Parade/0_Parade_marchingband_1_556.jpg +./0--Parade/0_Parade_Parade_0_854.jpg +./0--Parade/0_Parade_marchingband_1_649.jpg +./0--Parade/0_Parade_marchingband_1_629.jpg +./0--Parade/0_Parade_Parade_0_120.jpg +./0--Parade/0_Parade_Parade_0_247.jpg +./0--Parade/0_Parade_marchingband_1_139.jpg +./0--Parade/0_Parade_marchingband_1_881.jpg +./0--Parade/0_Parade_Parade_0_102.jpg +./0--Parade/0_Parade_Parade_0_288.jpg +./0--Parade/0_Parade_marchingband_1_552.jpg +./0--Parade/0_Parade_marchingband_1_593.jpg +./0--Parade/0_Parade_marchingband_1_1045.jpg +./0--Parade/0_Parade_Parade_0_218.jpg +./0--Parade/0_Parade_Parade_0_12.jpg +./0--Parade/0_Parade_Parade_0_68.jpg +./0--Parade/0_Parade_Parade_0_137.jpg +./0--Parade/0_Parade_marchingband_1_759.jpg +./0--Parade/0_Parade_Parade_0_353.jpg +./0--Parade/0_Parade_marchingband_1_561.jpg +./0--Parade/0_Parade_marchingband_1_910.jpg +./0--Parade/0_Parade_marchingband_1_653.jpg +./0--Parade/0_Parade_Parade_0_478.jpg +./0--Parade/0_Parade_marchingband_1_620.jpg +./0--Parade/0_Parade_Parade_0_873.jpg +./0--Parade/0_Parade_marchingband_1_329.jpg +./0--Parade/0_Parade_Parade_0_545.jpg +./0--Parade/0_Parade_marchingband_1_476.jpg +./0--Parade/0_Parade_marchingband_1_355.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_328.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_511.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_665.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_708.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_514.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_122.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_640.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_500.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_167.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_259.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_91.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_271.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_43.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_65.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_302.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_599.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_22.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_364.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_817.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_607.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_561.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_543.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_197.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_777.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_461.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_802.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_450.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_25.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_459.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_571.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_812.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_294.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_823.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_232.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_10.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_801.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_223.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_243.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_60.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_854.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_880.jpg +./23--Shoppers/23_Shoppers_Shoppers_23_485.jpg +./18--Concerts/18_Concerts_Concerts_18_602.jpg +./18--Concerts/18_Concerts_Concerts_18_257.jpg +./18--Concerts/18_Concerts_Concerts_18_151.jpg +./18--Concerts/18_Concerts_Concerts_18_828.jpg +./18--Concerts/18_Concerts_Concerts_18_350.jpg +./18--Concerts/18_Concerts_Concerts_18_251.jpg +./18--Concerts/18_Concerts_Concerts_18_612.jpg +./18--Concerts/18_Concerts_Concerts_18_403.jpg +./18--Concerts/18_Concerts_Concerts_18_706.jpg +./18--Concerts/18_Concerts_Concerts_18_910.jpg +./18--Concerts/18_Concerts_Concerts_18_656.jpg +./18--Concerts/18_Concerts_Concerts_18_504.jpg +./18--Concerts/18_Concerts_Concerts_18_469.jpg +./18--Concerts/18_Concerts_Concerts_18_1004.jpg +./18--Concerts/18_Concerts_Concerts_18_655.jpg +./18--Concerts/18_Concerts_Concerts_18_554.jpg +./18--Concerts/18_Concerts_Concerts_18_1013.jpg +./18--Concerts/18_Concerts_Concerts_18_38.jpg +./18--Concerts/18_Concerts_Concerts_18_433.jpg +./18--Concerts/18_Concerts_Concerts_18_66.jpg +./18--Concerts/18_Concerts_Concerts_18_784.jpg +./18--Concerts/18_Concerts_Concerts_18_815.jpg +./18--Concerts/18_Concerts_Concerts_18_872.jpg +./18--Concerts/18_Concerts_Concerts_18_258.jpg +./18--Concerts/18_Concerts_Concerts_18_920.jpg +./18--Concerts/18_Concerts_Concerts_18_1038.jpg +./18--Concerts/18_Concerts_Concerts_18_1016.jpg +./18--Concerts/18_Concerts_Concerts_18_536.jpg +./18--Concerts/18_Concerts_Concerts_18_486.jpg +./18--Concerts/18_Concerts_Concerts_18_853.jpg +./18--Concerts/18_Concerts_Concerts_18_665.jpg +./18--Concerts/18_Concerts_Concerts_18_381.jpg +./18--Concerts/18_Concerts_Concerts_18_104.jpg +./18--Concerts/18_Concerts_Concerts_18_522.jpg +./18--Concerts/18_Concerts_Concerts_18_366.jpg +./18--Concerts/18_Concerts_Concerts_18_528.jpg +./18--Concerts/18_Concerts_Concerts_18_252.jpg +./18--Concerts/18_Concerts_Concerts_18_313.jpg +./18--Concerts/18_Concerts_Concerts_18_27.jpg +./18--Concerts/18_Concerts_Concerts_18_1015.jpg +./18--Concerts/18_Concerts_Concerts_18_555.jpg +./18--Concerts/18_Concerts_Concerts_18_60.jpg +./18--Concerts/18_Concerts_Concerts_18_102.jpg +./18--Concerts/18_Concerts_Concerts_18_693.jpg +./18--Concerts/18_Concerts_Concerts_18_855.jpg +./18--Concerts/18_Concerts_Concerts_18_657.jpg +./18--Concerts/18_Concerts_Concerts_18_133.jpg +./18--Concerts/18_Concerts_Concerts_18_349.jpg +./18--Concerts/18_Concerts_Concerts_18_389.jpg +./18--Concerts/18_Concerts_Concerts_18_127.jpg +./18--Concerts/18_Concerts_Concerts_18_447.jpg +./18--Concerts/18_Concerts_Concerts_18_402.jpg +./18--Concerts/18_Concerts_Concerts_18_670.jpg +./54--Rescue/54_Rescue_rescuepeople_54_817.jpg +./54--Rescue/54_Rescue_rescuepeople_54_158.jpg +./54--Rescue/54_Rescue_rescuepeople_54_1035.jpg +./54--Rescue/54_Rescue_firemanrescue_54_327.jpg +./54--Rescue/54_Rescue_rescuepeople_54_711.jpg +./54--Rescue/54_Rescue_rescuepeople_54_143.jpg +./54--Rescue/54_Rescue_rescuepeople_54_328.jpg +./54--Rescue/54_Rescue_rescuepeople_54_926.jpg +./54--Rescue/54_Rescue_rescuepeople_54_8.jpg +./54--Rescue/54_Rescue_firemanrescue_54_617.jpg +./54--Rescue/54_Rescue_firemanrescue_54_939.jpg +./54--Rescue/54_Rescue_rescuepeople_54_855.jpg +./54--Rescue/54_Rescue_rescuepeople_54_860.jpg +./54--Rescue/54_Rescue_firemanrescue_54_908.jpg +./54--Rescue/54_Rescue_rescuepeople_54_254.jpg +./54--Rescue/54_Rescue_rescuepeople_54_1049.jpg +./54--Rescue/54_Rescue_rescuepeople_54_526.jpg +./54--Rescue/54_Rescue_rescuepeople_54_531.jpg +./54--Rescue/54_Rescue_rescuepeople_54_840.jpg +./54--Rescue/54_Rescue_rescuepeople_54_845.jpg +./54--Rescue/54_Rescue_rescuepeople_54_191.jpg +./54--Rescue/54_Rescue_rescuepeople_54_581.jpg +./54--Rescue/54_Rescue_rescuepeople_54_738.jpg +./54--Rescue/54_Rescue_firemanrescue_54_420.jpg +./54--Rescue/54_Rescue_rescuepeople_54_102.jpg +./54--Rescue/54_Rescue_firemanrescue_54_458.jpg +./54--Rescue/54_Rescue_rescuepeople_54_325.jpg +./54--Rescue/54_Rescue_rescuepeople_54_108.jpg +./54--Rescue/54_Rescue_rescuepeople_54_431.jpg +./54--Rescue/54_Rescue_rescuepeople_54_222.jpg +./54--Rescue/54_Rescue_rescuepeople_54_777.jpg +./54--Rescue/54_Rescue_firemanrescue_54_814.jpg +./54--Rescue/54_Rescue_rescuepeople_54_335.jpg +./54--Rescue/54_Rescue_rescuepeople_54_924.jpg +./54--Rescue/54_Rescue_firemanrescue_54_103.jpg +./54--Rescue/54_Rescue_rescuepeople_54_208.jpg +./54--Rescue/54_Rescue_rescuepeople_54_188.jpg +./54--Rescue/54_Rescue_rescuepeople_54_557.jpg +./54--Rescue/54_Rescue_rescuepeople_54_529.jpg +./54--Rescue/54_Rescue_firemanrescue_54_660.jpg +./54--Rescue/54_Rescue_rescuepeople_54_493.jpg +./54--Rescue/54_Rescue_rescuepeople_54_159.jpg +./54--Rescue/54_Rescue_rescuepeople_54_589.jpg +./54--Rescue/54_Rescue_rescuepeople_54_774.jpg +./54--Rescue/54_Rescue_rescuepeople_54_135.jpg +./54--Rescue/54_Rescue_rescuepeople_54_602.jpg +./54--Rescue/54_Rescue_firemanrescue_54_789.jpg +./54--Rescue/54_Rescue_firemanrescue_54_969.jpg +./54--Rescue/54_Rescue_rescuepeople_54_1006.jpg +./54--Rescue/54_Rescue_firemanrescue_54_478.jpg +./54--Rescue/54_Rescue_firemanrescue_54_153.jpg +./54--Rescue/54_Rescue_rescuepeople_54_406.jpg +./54--Rescue/54_Rescue_rescuepeople_54_54.jpg +./54--Rescue/54_Rescue_firemanrescue_54_724.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_364.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_158.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_12.jpg +./2--Demonstration/2_Demonstration_Protesters_2_56.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_496.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_114.jpg +./2--Demonstration/2_Demonstration_Protesters_2_748.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_425.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_35.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_83.jpg +./2--Demonstration/2_Demonstration_Protesters_2_811.jpg +./2--Demonstration/2_Demonstration_Protesters_2_817.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_689.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_687.jpg +./2--Demonstration/2_Demonstration_Protesters_2_1033.jpg +./2--Demonstration/2_Demonstration_Protesters_2_508.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_611.jpg +./2--Demonstration/2_Demonstration_Protesters_2_369.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_306.jpg +./2--Demonstration/2_Demonstration_Protesters_2_840.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_137.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_368.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_314.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_942.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_268.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_476.jpg +./2--Demonstration/2_Demonstration_Protesters_2_268.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_231.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_456.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_18.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_615.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_545.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_600.jpg +./2--Demonstration/2_Demonstration_Protesters_2_54.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_30.jpg +./2--Demonstration/2_Demonstration_Protesters_2_912.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_339.jpg +./2--Demonstration/2_Demonstration_Protesters_2_174.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_319.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_619.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_726.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_140.jpg +./2--Demonstration/2_Demonstration_Protesters_2_57.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_406.jpg +./2--Demonstration/2_Demonstration_Protesters_2_826.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_225.jpg +./2--Demonstration/2_Demonstration_Protesters_2_86.jpg +./2--Demonstration/2_Demonstration_Protesters_2_519.jpg +./2--Demonstration/2_Demonstration_Protesters_2_589.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_410.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_98.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_224.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_329.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_163.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_665.jpg +./2--Demonstration/2_Demonstration_Protesters_2_258.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_367.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_402.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_414.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_517.jpg +./2--Demonstration/2_Demonstration_Protesters_2_476.jpg +./2--Demonstration/2_Demonstration_Protesters_2_684.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_102.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_162.jpg +./2--Demonstration/2_Demonstration_Protesters_2_362.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_382.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_200.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_360.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_32.jpg +./2--Demonstration/2_Demonstration_Protesters_2_213.jpg +./2--Demonstration/2_Demonstration_Protesters_2_148.jpg +./2--Demonstration/2_Demonstration_Protesters_2_228.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_595.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_586.jpg +./2--Demonstration/2_Demonstration_Protesters_2_714.jpg +./2--Demonstration/2_Demonstration_Protesters_2_583.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_282.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_695.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_54.jpg +./2--Demonstration/2_Demonstration_Protesters_2_351.jpg +./2--Demonstration/2_Demonstration_Protesters_2_345.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_170.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_712.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_159.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_90.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_335.jpg +./2--Demonstration/2_Demonstration_Protesters_2_156.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_807.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_771.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_306.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_763.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_895.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_76.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_451.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_307.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_107.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_609.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_800.jpg +./2--Demonstration/2_Demonstration_Protesters_2_92.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_420.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_816.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_494.jpg +./2--Demonstration/2_Demonstration_Protesters_2_24.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_290.jpg +./2--Demonstration/2_Demonstration_Protesters_2_905.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_79.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_176.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_264.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_471.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_690.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_5.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_799.jpg +./2--Demonstration/2_Demonstration_Protesters_2_179.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_242.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_940.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_499.jpg +./2--Demonstration/2_Demonstration_Protesters_2_646.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_181.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_438.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_309.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_117.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_914.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_329.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_545.jpg +./2--Demonstration/2_Demonstration_Protesters_2_163.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_960.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_330.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_171.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_617.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_604.jpg +./2--Demonstration/2_Demonstration_Protesters_2_493.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_470.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_22.jpg +./2--Demonstration/2_Demonstration_Protesters_2_204.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_713.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_578.jpg +./2--Demonstration/2_Demonstration_Protesters_2_901.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_641.jpg +./2--Demonstration/2_Demonstration_Protesters_2_12.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_304.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_547.jpg +./2--Demonstration/2_Demonstration_Protesters_2_221.jpg +./2--Demonstration/2_Demonstration_Protesters_2_738.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_566.jpg +./2--Demonstration/2_Demonstration_Protesters_2_91.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_391.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_487.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_204.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_430.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_195.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_781.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_842.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_577.jpg +./2--Demonstration/2_Demonstration_Protesters_2_525.jpg +./2--Demonstration/2_Demonstration_Protesters_2_370.jpg +./2--Demonstration/2_Demonstration_Protesters_2_881.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_301.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_187.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_644.jpg +./2--Demonstration/2_Demonstration_Protesters_2_779.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_120.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_219.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_41.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_114.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_655.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_567.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_267.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_666.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_491.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_456.jpg +./2--Demonstration/2_Demonstration_Protesters_2_561.jpg +./2--Demonstration/2_Demonstration_Protesters_2_291.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_924.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_891.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_813.jpg +./2--Demonstration/2_Demonstration_Protesters_2_456.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_365.jpg +./2--Demonstration/2_Demonstration_Protesters_2_293.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_884.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_135.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_518.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_196.jpg +./2--Demonstration/2_Demonstration_Protesters_2_542.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_57.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_584.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_746.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_100.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_58.jpg +./2--Demonstration/2_Demonstration_Protesters_2_117.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_413.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_795.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_637.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_172.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_453.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_512.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_441.jpg +./2--Demonstration/2_Demonstration_Protesters_2_559.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_28.jpg +./2--Demonstration/2_Demonstration_Protesters_2_460.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_896.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_1.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_173.jpg +./2--Demonstration/2_Demonstration_Protesters_2_46.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_823.jpg +./2--Demonstration/2_Demonstration_Protesters_2_16.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_659.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_378.jpg +./2--Demonstration/2_Demonstration_Protesters_2_178.jpg +./2--Demonstration/2_Demonstration_Protesters_2_796.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_741.jpg +./2--Demonstration/2_Demonstration_Protesters_2_884.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_64.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_341.jpg +./2--Demonstration/2_Demonstration_Protesters_2_618.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_700.jpg +./2--Demonstration/2_Demonstration_Protesters_2_822.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_286.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_419.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_752.jpg +./2--Demonstration/2_Demonstration_Protesters_2_563.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_183.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_488.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_654.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_867.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_985.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_700.jpg +./2--Demonstration/2_Demonstration_Protesters_2_65.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_672.jpg +./2--Demonstration/2_Demonstration_Protesters_2_260.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_915.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_486.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_395.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_606.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_188.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_244.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_385.jpg +./2--Demonstration/2_Demonstration_Political_Rally_2_791.jpg +./2--Demonstration/2_Demonstration_Protesters_2_352.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_546.jpg +./2--Demonstration/2_Demonstration_Protesters_2_131.jpg +./2--Demonstration/2_Demonstration_Protesters_2_486.jpg +./2--Demonstration/2_Demonstration_Protesters_2_577.jpg +./2--Demonstration/2_Demonstration_Demonstrators_2_162.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_450.jpg +./2--Demonstration/2_Demonstration_Protesters_2_800.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_314.jpg +./2--Demonstration/2_Demonstration_Protesters_2_531.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_117.jpg +./2--Demonstration/2_Demonstration_Demonstration_Or_Protest_2_816.jpg +./27--Spa/27_Spa_Spa_27_486.jpg +./27--Spa/27_Spa_Spa_27_691.jpg +./27--Spa/27_Spa_Spa_27_420.jpg +./27--Spa/27_Spa_Spa_27_360.jpg +./27--Spa/27_Spa_Spa_27_109.jpg +./27--Spa/27_Spa_Spa_27_38.jpg +./27--Spa/27_Spa_Spa_27_329.jpg +./27--Spa/27_Spa_Spa_27_157.jpg +./27--Spa/27_Spa_Spa_27_168.jpg +./27--Spa/27_Spa_Spa_27_851.jpg +./27--Spa/27_Spa_Spa_27_716.jpg +./27--Spa/27_Spa_Spa_27_768.jpg +./27--Spa/27_Spa_Spa_27_656.jpg +./27--Spa/27_Spa_Spa_27_728.jpg +./27--Spa/27_Spa_Spa_27_225.jpg +./27--Spa/27_Spa_Spa_27_322.jpg +./27--Spa/27_Spa_Spa_27_512.jpg +./27--Spa/27_Spa_Spa_27_121.jpg +./27--Spa/27_Spa_Spa_27_393.jpg +./27--Spa/27_Spa_Spa_27_782.jpg +./27--Spa/27_Spa_Spa_27_212.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_194.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_127.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_852.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_919.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_216.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_17.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_583.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_610.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_231.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_343.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_332.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_629.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_742.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_916.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_430.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_120.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_659.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_937.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_762.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_652.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_379.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_66.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_96.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_184.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_443.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_240.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_337.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_339.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_76.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_407.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_400.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_237.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_597.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_640.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_755.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_167.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_1032.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_35.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_71.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_578.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_370.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_809.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_246.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_3.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_173.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_585.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_936.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_688.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_549.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_329.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_650.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_433.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_707.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_769.jpg +./44--Aerobics/44_Aerobics_Aerobics_44_794.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_248.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_365.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_692.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_469.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_50.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_244.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_17.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_715.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_680.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_553.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_785.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_403.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_880.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_940.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_895.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_184.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_455.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_835.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_926.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_697.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_182.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_655.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_753.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_507.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_467.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_475.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_285.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_653.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_118.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_94.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_221.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_493.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_212.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_825.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_531.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_431.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_671.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_113.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_592.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_290.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_404.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_262.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_330.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_384.jpg +./58--Hockey/58_Hockey_icehockey_puck_58_245.jpg +./14--Traffic/14_Traffic_Traffic_14_840.jpg +./14--Traffic/14_Traffic_Traffic_14_504.jpg +./14--Traffic/14_Traffic_Traffic_14_654.jpg +./14--Traffic/14_Traffic_Traffic_14_361.jpg +./14--Traffic/14_Traffic_Traffic_14_505.jpg +./14--Traffic/14_Traffic_Traffic_14_677.jpg +./14--Traffic/14_Traffic_Traffic_14_722.jpg +./14--Traffic/14_Traffic_Traffic_14_443.jpg +./14--Traffic/14_Traffic_Traffic_14_380.jpg +./14--Traffic/14_Traffic_Traffic_14_675.jpg +./14--Traffic/14_Traffic_Traffic_14_728.jpg +./14--Traffic/14_Traffic_Traffic_14_55.jpg +./14--Traffic/14_Traffic_Traffic_14_834.jpg +./14--Traffic/14_Traffic_Traffic_14_644.jpg +./14--Traffic/14_Traffic_Traffic_14_713.jpg +./14--Traffic/14_Traffic_Traffic_14_267.jpg +./14--Traffic/14_Traffic_Traffic_14_170.jpg +./14--Traffic/14_Traffic_Traffic_14_253.jpg +./14--Traffic/14_Traffic_Traffic_14_850.jpg +./7--Cheering/7_Cheering_Cheering_7_57.jpg +./7--Cheering/7_Cheering_Cheering_7_408.jpg +./7--Cheering/7_Cheering_Cheering_7_469.jpg +./7--Cheering/7_Cheering_Cheering_7_29.jpg +./7--Cheering/7_Cheering_Cheering_7_60.jpg +./7--Cheering/7_Cheering_Cheering_7_530.jpg +./7--Cheering/7_Cheering_Cheering_7_687.jpg +./7--Cheering/7_Cheering_Cheering_7_426.jpg +./7--Cheering/7_Cheering_Cheering_7_386.jpg +./7--Cheering/7_Cheering_Cheering_7_473.jpg +./7--Cheering/7_Cheering_Cheering_7_835.jpg +./7--Cheering/7_Cheering_Cheering_7_138.jpg +./7--Cheering/7_Cheering_Cheering_7_870.jpg +./7--Cheering/7_Cheering_Cheering_7_239.jpg +./7--Cheering/7_Cheering_Cheering_7_558.jpg +./7--Cheering/7_Cheering_Cheering_7_724.jpg +./7--Cheering/7_Cheering_Cheering_7_195.jpg +./7--Cheering/7_Cheering_Cheering_7_171.jpg +./7--Cheering/7_Cheering_Cheering_7_293.jpg +./7--Cheering/7_Cheering_Cheering_7_427.jpg +./7--Cheering/7_Cheering_Cheering_7_542.jpg +./7--Cheering/7_Cheering_Cheering_7_884.jpg +./7--Cheering/7_Cheering_Cheering_7_345.jpg +./7--Cheering/7_Cheering_Cheering_7_125.jpg +./7--Cheering/7_Cheering_Cheering_7_631.jpg +./7--Cheering/7_Cheering_Cheering_7_334.jpg +./7--Cheering/7_Cheering_Cheering_7_500.jpg +./7--Cheering/7_Cheering_Cheering_7_692.jpg +./7--Cheering/7_Cheering_Cheering_7_373.jpg +./7--Cheering/7_Cheering_Cheering_7_313.jpg +./7--Cheering/7_Cheering_Cheering_7_118.jpg +./7--Cheering/7_Cheering_Cheering_7_134.jpg +./7--Cheering/7_Cheering_Cheering_7_536.jpg +./7--Cheering/7_Cheering_Cheering_7_209.jpg +./7--Cheering/7_Cheering_Cheering_7_739.jpg +./7--Cheering/7_Cheering_Cheering_7_802.jpg +./7--Cheering/7_Cheering_Cheering_7_391.jpg +./7--Cheering/7_Cheering_Cheering_7_404.jpg +./7--Cheering/7_Cheering_Cheering_7_655.jpg +./7--Cheering/7_Cheering_Cheering_7_413.jpg +./7--Cheering/7_Cheering_Cheering_7_462.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_912.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_152.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_845.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_575.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_240.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_660.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_177.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_777.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_617.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_536.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_72.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_511.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_254.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_636.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_703.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_782.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_645.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_657.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_338.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_354.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_566.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_468.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_588.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_715.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_171.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_443.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_38.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_778.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_827.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_491.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_572.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_785.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_193.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_405.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_196.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_837.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_874.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_761.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_779.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_812.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_432.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_42.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_567.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_207.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_385.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_610.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_746.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_731.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_266.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_19.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_631.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_561.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_300.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_195.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_641.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_179.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_583.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_936.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_710.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_matadorbullfighting_47_236.jpg +./47--Matador_Bullfighter/47_Matador_Bullfighter_Matador_Bullfighter_47_354.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_487.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_198.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_989.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_1018.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_656.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_130.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_663.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_267.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_782.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_535.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_357.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_448.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_265.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_2.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_118.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_90.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_826.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_590.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_835.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_124.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_480.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_697.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_751.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_711.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_792.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_244.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_862.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_959.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_22.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_868.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_866.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_643.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_880.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_86.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_7.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_877.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_723.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_327.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_770.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_126.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_144.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_683.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_507.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_282.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_165.jpg +./28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_557.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_148.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_374.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_590.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_250.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_146.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_477.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_626.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_380.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_84.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_463.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_208.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_491.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_310.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_251.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_158.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_761.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_316.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_489.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_10.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_311.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_525.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_451.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_161.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_902.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_432.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_211.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_941.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_221.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_66.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_126.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_524.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_900.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_21.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_363.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_312.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_81.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_684.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_624.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_358.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_494.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_632.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_222.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_436.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_585.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_506.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_822.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_74.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_42.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_132.jpg +./29--Students_Schoolkids/29_Students_Schoolkids_Students_Schoolkids_29_130.jpg +./37--Soccer/37_Soccer_soccer_ball_37_479.jpg +./37--Soccer/37_Soccer_soccer_ball_37_60.jpg +./37--Soccer/37_Soccer_soccer_ball_37_994.jpg +./37--Soccer/37_Soccer_soccer_ball_37_643.jpg +./37--Soccer/37_Soccer_soccer_ball_37_269.jpg +./37--Soccer/37_Soccer_soccer_ball_37_506.jpg +./37--Soccer/37_Soccer_soccer_ball_37_685.jpg +./37--Soccer/37_Soccer_Soccer_37_263.jpg +./37--Soccer/37_Soccer_Soccer_37_393.jpg +./37--Soccer/37_Soccer_soccer_ball_37_886.jpg +./37--Soccer/37_Soccer_soccer_ball_37_851.jpg +./37--Soccer/37_Soccer_Soccer_37_114.jpg +./37--Soccer/37_Soccer_Soccer_37_74.jpg +./37--Soccer/37_Soccer_soccer_ball_37_28.jpg +./37--Soccer/37_Soccer_soccer_ball_37_8.jpg +./37--Soccer/37_Soccer_soccer_ball_37_698.jpg +./37--Soccer/37_Soccer_soccer_ball_37_832.jpg +./37--Soccer/37_Soccer_soccer_ball_37_345.jpg +./37--Soccer/37_Soccer_Soccer_37_618.jpg +./37--Soccer/37_Soccer_soccer_ball_37_1001.jpg +./37--Soccer/37_Soccer_Soccer_37_651.jpg +./37--Soccer/37_Soccer_soccer_ball_37_583.jpg +./37--Soccer/37_Soccer_soccer_ball_37_150.jpg +./37--Soccer/37_Soccer_soccer_ball_37_926.jpg +./37--Soccer/37_Soccer_soccer_ball_37_88.jpg +./37--Soccer/37_Soccer_soccer_ball_37_74.jpg +./37--Soccer/37_Soccer_soccer_ball_37_254.jpg +./37--Soccer/37_Soccer_soccer_ball_37_1011.jpg +./37--Soccer/37_Soccer_soccer_ball_37_867.jpg +./37--Soccer/37_Soccer_Soccer_37_50.jpg +./37--Soccer/37_Soccer_soccer_ball_37_114.jpg +./37--Soccer/37_Soccer_soccer_ball_37_815.jpg +./37--Soccer/37_Soccer_soccer_ball_37_32.jpg +./37--Soccer/37_Soccer_soccer_ball_37_483.jpg +./37--Soccer/37_Soccer_soccer_ball_37_692.jpg +./37--Soccer/37_Soccer_Soccer_37_170.jpg +./37--Soccer/37_Soccer_soccer_ball_37_803.jpg +./37--Soccer/37_Soccer_Soccer_37_415.jpg +./37--Soccer/37_Soccer_Soccer_37_469.jpg +./37--Soccer/37_Soccer_soccer_ball_37_512.jpg +./37--Soccer/37_Soccer_Soccer_37_655.jpg +./37--Soccer/37_Soccer_soccer_ball_37_281.jpg +./37--Soccer/37_Soccer_soccer_ball_37_720.jpg +./37--Soccer/37_Soccer_Soccer_37_565.jpg +./37--Soccer/37_Soccer_soccer_ball_37_818.jpg +./37--Soccer/37_Soccer_soccer_ball_37_907.jpg +./37--Soccer/37_Soccer_Soccer_37_52.jpg +./37--Soccer/37_Soccer_soccer_ball_37_238.jpg +./37--Soccer/37_Soccer_soccer_ball_37_113.jpg +./37--Soccer/37_Soccer_Soccer_37_394.jpg +./37--Soccer/37_Soccer_soccer_ball_37_341.jpg +./37--Soccer/37_Soccer_soccer_ball_37_233.jpg +./37--Soccer/37_Soccer_soccer_ball_37_171.jpg +./37--Soccer/37_Soccer_soccer_ball_37_841.jpg +./37--Soccer/37_Soccer_Soccer_37_3.jpg +./56--Voter/56_Voter_peoplevoting_56_663.jpg +./56--Voter/56_Voter_peoplevoting_56_819.jpg +./56--Voter/56_Voter_peoplevoting_56_887.jpg +./56--Voter/56_Voter_peoplevoting_56_747.jpg +./56--Voter/56_Voter_peoplevoting_56_118.jpg +./56--Voter/56_Voter_peoplevoting_56_777.jpg +./56--Voter/56_Voter_peoplevoting_56_723.jpg +./56--Voter/56_Voter_peoplevoting_56_350.jpg +./56--Voter/56_Voter_peoplevoting_56_620.jpg +./56--Voter/56_Voter_peoplevoting_56_460.jpg +./56--Voter/56_Voter_peoplevoting_56_644.jpg +./56--Voter/56_Voter_peoplevoting_56_717.jpg +./56--Voter/56_Voter_peoplevoting_56_21.jpg +./56--Voter/56_Voter_peoplevoting_56_228.jpg +./56--Voter/56_Voter_peoplevoting_56_531.jpg +./56--Voter/56_Voter_peoplevoting_56_140.jpg +./56--Voter/56_Voter_peoplevoting_56_1011.jpg +./56--Voter/56_Voter_peoplevoting_56_339.jpg +./56--Voter/56_Voter_peoplevoting_56_902.jpg +./56--Voter/56_Voter_peoplevoting_56_796.jpg +./56--Voter/56_Voter_peoplevoting_56_712.jpg +./56--Voter/56_Voter_peoplevoting_56_459.jpg +./56--Voter/56_Voter_peoplevoting_56_378.jpg +./56--Voter/56_Voter_peoplevoting_56_13.jpg +./56--Voter/56_Voter_peoplevoting_56_110.jpg +./56--Voter/56_Voter_peoplevoting_56_579.jpg +./56--Voter/56_Voter_peoplevoting_56_260.jpg +./56--Voter/56_Voter_peoplevoting_56_305.jpg +./56--Voter/56_Voter_peoplevoting_56_946.jpg +./56--Voter/56_Voter_peoplevoting_56_344.jpg +./56--Voter/56_Voter_peoplevoting_56_874.jpg +./56--Voter/56_Voter_peoplevoting_56_370.jpg +./56--Voter/56_Voter_peoplevoting_56_1046.jpg +./56--Voter/56_Voter_peoplevoting_56_122.jpg +./56--Voter/56_Voter_peoplevoting_56_781.jpg +./56--Voter/56_Voter_peoplevoting_56_714.jpg +./56--Voter/56_Voter_peoplevoting_56_953.jpg +./56--Voter/56_Voter_peoplevoting_56_410.jpg +./56--Voter/56_Voter_peoplevoting_56_441.jpg +./56--Voter/56_Voter_peoplevoting_56_641.jpg +./56--Voter/56_Voter_peoplevoting_56_346.jpg +./56--Voter/56_Voter_peoplevoting_56_323.jpg +./56--Voter/56_Voter_peoplevoting_56_764.jpg +./56--Voter/56_Voter_peoplevoting_56_873.jpg +./56--Voter/56_Voter_peoplevoting_56_782.jpg +./56--Voter/56_Voter_peoplevoting_56_268.jpg +./56--Voter/56_Voter_peoplevoting_56_842.jpg +./56--Voter/56_Voter_peoplevoting_56_528.jpg +./56--Voter/56_Voter_peoplevoting_56_103.jpg +./56--Voter/56_Voter_peoplevoting_56_558.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_212.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_4.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_546.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_12.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_815.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_344.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_162.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_430.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_521.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_155.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_907.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_123.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_395.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_407.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_50.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_703.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_158.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_432.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_179.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_606.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_276.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_936.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_558.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_665.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_913.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_350.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_640.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_282.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_375.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_211.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_767.jpg +./61--Street_Battle/61_Street_Battle_streetfight_61_22.jpg +./3--Riot/3_Riot_Riot_3_106.jpg +./3--Riot/3_Riot_Riot_3_263.jpg +./3--Riot/3_Riot_Riot_3_415.jpg +./3--Riot/3_Riot_Riot_3_725.jpg +./3--Riot/3_Riot_Riot_3_716.jpg +./3--Riot/3_Riot_Riot_3_405.jpg +./3--Riot/3_Riot_Riot_3_710.jpg +./3--Riot/3_Riot_Riot_3_137.jpg +./3--Riot/3_Riot_Riot_3_689.jpg +./3--Riot/3_Riot_Riot_3_354.jpg +./3--Riot/3_Riot_Riot_3_184.jpg +./3--Riot/3_Riot_Riot_3_604.jpg +./3--Riot/3_Riot_Riot_3_765.jpg +./3--Riot/3_Riot_Riot_3_666.jpg +./3--Riot/3_Riot_Riot_3_488.jpg +./3--Riot/3_Riot_Riot_3_322.jpg +./3--Riot/3_Riot_Riot_3_186.jpg +./3--Riot/3_Riot_Riot_3_772.jpg +./3--Riot/3_Riot_Riot_3_522.jpg +./3--Riot/3_Riot_Riot_3_963.jpg +./3--Riot/3_Riot_Riot_3_306.jpg +./3--Riot/3_Riot_Riot_3_166.jpg +./3--Riot/3_Riot_Riot_3_123.jpg +./3--Riot/3_Riot_Riot_3_506.jpg +./3--Riot/3_Riot_Riot_3_790.jpg +./3--Riot/3_Riot_Riot_3_542.jpg +./3--Riot/3_Riot_Riot_3_199.jpg +./3--Riot/3_Riot_Riot_3_26.jpg +./3--Riot/3_Riot_Riot_3_480.jpg +./3--Riot/3_Riot_Riot_3_436.jpg +./3--Riot/3_Riot_Riot_3_318.jpg +./3--Riot/3_Riot_Riot_3_101.jpg +./3--Riot/3_Riot_Riot_3_750.jpg +./3--Riot/3_Riot_Riot_3_273.jpg +./3--Riot/3_Riot_Riot_3_958.jpg +./3--Riot/3_Riot_Riot_3_1037.jpg +./3--Riot/3_Riot_Riot_3_438.jpg +./3--Riot/3_Riot_Riot_3_993.jpg +./3--Riot/3_Riot_Riot_3_393.jpg +./3--Riot/3_Riot_Riot_3_521.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_912.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_515.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_1046.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_325.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_18.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_640.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_563.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_16.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_121.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_173.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_614.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_9.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_419.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_437.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_873.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_469.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_882.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_324.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_467.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_174.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_728.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_436.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_883.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_1026.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_513.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_169.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_440.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_761.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_734.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_869.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_936.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_343.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_683.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_59.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_374.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_1029.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_986.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_463.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_707.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_527.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_271.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_585.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_747.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_1045.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_700.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_993.jpg +./25--Soldier_Patrol/25_Soldier_Patrol_Soldier_Patrol_25_679.jpg +./41--Swimming/41_Swimming_Swimmer_41_688.jpg +./41--Swimming/41_Swimming_Swimmer_41_113.jpg +./41--Swimming/41_Swimming_Swimming_41_106.jpg +./41--Swimming/41_Swimming_Swimmer_41_943.jpg +./41--Swimming/41_Swimming_Swimmer_41_927.jpg +./41--Swimming/41_Swimming_Swimmer_41_701.jpg +./41--Swimming/41_Swimming_Swimmer_41_773.jpg +./41--Swimming/41_Swimming_Swimmer_41_935.jpg +./41--Swimming/41_Swimming_Swimming_41_379.jpg +./41--Swimming/41_Swimming_Swimmer_41_883.jpg +./41--Swimming/41_Swimming_Swimmer_41_293.jpg +./41--Swimming/41_Swimming_Swimmer_41_148.jpg +./41--Swimming/41_Swimming_Swimmer_41_931.jpg +./41--Swimming/41_Swimming_Swimmer_41_369.jpg +./41--Swimming/41_Swimming_Swimming_41_380.jpg +./41--Swimming/41_Swimming_Swimmer_41_43.jpg +./41--Swimming/41_Swimming_Swimming_41_161.jpg +./41--Swimming/41_Swimming_Swimming_41_472.jpg +./41--Swimming/41_Swimming_Swimmer_41_704.jpg +./41--Swimming/41_Swimming_Swimmer_41_711.jpg +./41--Swimming/41_Swimming_Swimming_41_73.jpg +./41--Swimming/41_Swimming_Swimming_41_26.jpg +./41--Swimming/41_Swimming_Swimmer_41_483.jpg +./41--Swimming/41_Swimming_Swimmer_41_68.jpg +./41--Swimming/41_Swimming_Swimmer_41_449.jpg +./41--Swimming/41_Swimming_Swimming_41_730.jpg +./41--Swimming/41_Swimming_Swimmer_41_232.jpg +./41--Swimming/41_Swimming_Swimming_41_74.jpg +./41--Swimming/41_Swimming_Swimmer_41_792.jpg +./41--Swimming/41_Swimming_Swimming_41_412.jpg +./41--Swimming/41_Swimming_Swimmer_41_275.jpg +./41--Swimming/41_Swimming_Swimming_41_275.jpg +./41--Swimming/41_Swimming_Swimmer_41_718.jpg +./41--Swimming/41_Swimming_Swimmer_41_976.jpg +./41--Swimming/41_Swimming_Swimmer_41_308.jpg +./41--Swimming/41_Swimming_Swimmer_41_1028.jpg +./41--Swimming/41_Swimming_Swimming_41_822.jpg +./41--Swimming/41_Swimming_Swimming_41_243.jpg +./41--Swimming/41_Swimming_Swimmer_41_288.jpg +./41--Swimming/41_Swimming_Swimmer_41_440.jpg +./41--Swimming/41_Swimming_Swimmer_41_471.jpg +./41--Swimming/41_Swimming_Swimming_41_172.jpg +./41--Swimming/41_Swimming_Swimmer_41_607.jpg +./41--Swimming/41_Swimming_Swimmer_41_170.jpg +./41--Swimming/41_Swimming_Swimmer_41_376.jpg +./41--Swimming/41_Swimming_Swimming_41_699.jpg +./41--Swimming/41_Swimming_Swimmer_41_399.jpg +./41--Swimming/41_Swimming_Swimmer_41_401.jpg +./41--Swimming/41_Swimming_Swimming_41_128.jpg +./41--Swimming/41_Swimming_Swimmer_41_1002.jpg +./41--Swimming/41_Swimming_Swimming_41_240.jpg +./41--Swimming/41_Swimming_Swimmer_41_488.jpg +./41--Swimming/41_Swimming_Swimming_41_535.jpg +./41--Swimming/41_Swimming_Swimmer_41_380.jpg +./41--Swimming/41_Swimming_Swimmer_41_507.jpg +./41--Swimming/41_Swimming_Swimmer_41_358.jpg +./41--Swimming/41_Swimming_Swimming_41_580.jpg +./41--Swimming/41_Swimming_Swimmer_41_55.jpg +./41--Swimming/41_Swimming_Swimmer_41_538.jpg +./41--Swimming/41_Swimming_Swimming_41_52.jpg +./41--Swimming/41_Swimming_Swimmer_41_56.jpg +./41--Swimming/41_Swimming_Swimmer_41_831.jpg +./41--Swimming/41_Swimming_Swimming_41_714.jpg +./41--Swimming/41_Swimming_Swimmer_41_843.jpg +./41--Swimming/41_Swimming_Swimming_41_238.jpg +./41--Swimming/41_Swimming_Swimming_41_466.jpg +./41--Swimming/41_Swimming_Swimmer_41_659.jpg +./41--Swimming/41_Swimming_Swimmer_41_19.jpg +./41--Swimming/41_Swimming_Swimmer_41_1001.jpg +./41--Swimming/41_Swimming_Swimming_41_283.jpg +./41--Swimming/41_Swimming_Swimming_41_271.jpg +./41--Swimming/41_Swimming_Swimmer_41_26.jpg +./41--Swimming/41_Swimming_Swimming_41_641.jpg +./41--Swimming/41_Swimming_Swimming_41_521.jpg +./41--Swimming/41_Swimming_Swimmer_41_885.jpg +./41--Swimming/41_Swimming_Swimmer_41_262.jpg +./41--Swimming/41_Swimming_Swimmer_41_610.jpg +./41--Swimming/41_Swimming_Swimmer_41_564.jpg +./41--Swimming/41_Swimming_Swimmer_41_772.jpg +./41--Swimming/41_Swimming_Swimmer_41_35.jpg +./41--Swimming/41_Swimming_Swimmer_41_755.jpg +./57--Angler/57_Angler_peoplefishing_57_206.jpg +./57--Angler/57_Angler_peoplefishing_57_764.jpg +./57--Angler/57_Angler_peoplefishing_57_933.jpg +./57--Angler/57_Angler_peoplefishing_57_430.jpg +./57--Angler/57_Angler_peoplefishing_57_411.jpg +./57--Angler/57_Angler_peoplefishing_57_515.jpg +./57--Angler/57_Angler_peoplefishing_57_401.jpg +./57--Angler/57_Angler_peoplefishing_57_15.jpg +./57--Angler/57_Angler_peoplefishing_57_442.jpg +./57--Angler/57_Angler_peoplefishing_57_250.jpg +./57--Angler/57_Angler_peoplefishing_57_926.jpg +./57--Angler/57_Angler_peoplefishing_57_17.jpg +./57--Angler/57_Angler_peoplefishing_57_51.jpg +./57--Angler/57_Angler_peoplefishing_57_182.jpg +./57--Angler/57_Angler_peoplefishing_57_153.jpg +./57--Angler/57_Angler_peoplefishing_57_1012.jpg +./57--Angler/57_Angler_peoplefishing_57_104.jpg +./57--Angler/57_Angler_peoplefishing_57_803.jpg +./57--Angler/57_Angler_peoplefishing_57_866.jpg +./57--Angler/57_Angler_peoplefishing_57_880.jpg +./57--Angler/57_Angler_peoplefishing_57_394.jpg +./57--Angler/57_Angler_peoplefishing_57_796.jpg +./57--Angler/57_Angler_peoplefishing_57_868.jpg +./57--Angler/57_Angler_peoplefishing_57_139.jpg +./57--Angler/57_Angler_peoplefishing_57_251.jpg +./57--Angler/57_Angler_peoplefishing_57_120.jpg +./57--Angler/57_Angler_peoplefishing_57_20.jpg +./57--Angler/57_Angler_peoplefishing_57_589.jpg +./57--Angler/57_Angler_peoplefishing_57_661.jpg +./57--Angler/57_Angler_peoplefishing_57_1009.jpg +./57--Angler/57_Angler_peoplefishing_57_53.jpg +./57--Angler/57_Angler_peoplefishing_57_924.jpg +./57--Angler/57_Angler_peoplefishing_57_566.jpg +./57--Angler/57_Angler_peoplefishing_57_600.jpg +./57--Angler/57_Angler_peoplefishing_57_110.jpg +./57--Angler/57_Angler_peoplefishing_57_870.jpg +./57--Angler/57_Angler_peoplefishing_57_559.jpg +./57--Angler/57_Angler_peoplefishing_57_900.jpg +./57--Angler/57_Angler_peoplefishing_57_402.jpg +./57--Angler/57_Angler_peoplefishing_57_254.jpg +./6--Funeral/6_Funeral_Funeral_6_432.jpg +./6--Funeral/6_Funeral_Funeral_6_485.jpg +./6--Funeral/6_Funeral_Funeral_6_1029.jpg +./6--Funeral/6_Funeral_Funeral_6_537.jpg +./6--Funeral/6_Funeral_Funeral_6_790.jpg +./6--Funeral/6_Funeral_Funeral_6_241.jpg +./6--Funeral/6_Funeral_Funeral_6_177.jpg +./6--Funeral/6_Funeral_Funeral_6_1006.jpg +./6--Funeral/6_Funeral_Funeral_6_941.jpg +./6--Funeral/6_Funeral_Funeral_6_861.jpg +./6--Funeral/6_Funeral_Funeral_6_987.jpg +./6--Funeral/6_Funeral_Funeral_6_676.jpg +./6--Funeral/6_Funeral_Funeral_6_444.jpg +./6--Funeral/6_Funeral_Funeral_6_292.jpg +./6--Funeral/6_Funeral_Funeral_6_627.jpg +./6--Funeral/6_Funeral_Funeral_6_745.jpg +./6--Funeral/6_Funeral_Funeral_6_937.jpg +./6--Funeral/6_Funeral_Funeral_6_531.jpg +./6--Funeral/6_Funeral_Funeral_6_733.jpg +./6--Funeral/6_Funeral_Funeral_6_364.jpg +./6--Funeral/6_Funeral_Funeral_6_1005.jpg +./6--Funeral/6_Funeral_Funeral_6_870.jpg +./6--Funeral/6_Funeral_Funeral_6_610.jpg +./6--Funeral/6_Funeral_Funeral_6_211.jpg +./6--Funeral/6_Funeral_Funeral_6_618.jpg +./6--Funeral/6_Funeral_Funeral_6_128.jpg +./6--Funeral/6_Funeral_Funeral_6_461.jpg +./6--Funeral/6_Funeral_Funeral_6_109.jpg +./6--Funeral/6_Funeral_Funeral_6_779.jpg +./6--Funeral/6_Funeral_Funeral_6_690.jpg +./6--Funeral/6_Funeral_Funeral_6_572.jpg +./6--Funeral/6_Funeral_Funeral_6_252.jpg +./6--Funeral/6_Funeral_Funeral_6_140.jpg +./6--Funeral/6_Funeral_Funeral_6_760.jpg +./6--Funeral/6_Funeral_Funeral_6_77.jpg +./6--Funeral/6_Funeral_Funeral_6_909.jpg +./6--Funeral/6_Funeral_Funeral_6_759.jpg +./6--Funeral/6_Funeral_Funeral_6_280.jpg +./6--Funeral/6_Funeral_Funeral_6_160.jpg +./6--Funeral/6_Funeral_Funeral_6_696.jpg +./6--Funeral/6_Funeral_Funeral_6_483.jpg +./6--Funeral/6_Funeral_Funeral_6_315.jpg +./4--Dancing/4_Dancing_Dancing_4_1000.jpg +./4--Dancing/4_Dancing_Dancing_4_983.jpg +./4--Dancing/4_Dancing_Dancing_4_253.jpg +./4--Dancing/4_Dancing_Dancing_4_813.jpg +./4--Dancing/4_Dancing_Dancing_4_514.jpg +./4--Dancing/4_Dancing_Dancing_4_224.jpg +./4--Dancing/4_Dancing_Dancing_4_1028.jpg +./4--Dancing/4_Dancing_Dancing_4_375.jpg +./4--Dancing/4_Dancing_Dancing_4_53.jpg +./4--Dancing/4_Dancing_Dancing_4_156.jpg +./4--Dancing/4_Dancing_Dancing_4_769.jpg +./4--Dancing/4_Dancing_Dancing_4_718.jpg +./4--Dancing/4_Dancing_Dancing_4_228.jpg +./4--Dancing/4_Dancing_Dancing_4_878.jpg +./4--Dancing/4_Dancing_Dancing_4_494.jpg +./4--Dancing/4_Dancing_Dancing_4_378.jpg +./4--Dancing/4_Dancing_Dancing_4_885.jpg +./4--Dancing/4_Dancing_Dancing_4_960.jpg +./4--Dancing/4_Dancing_Dancing_4_1043.jpg +./4--Dancing/4_Dancing_Dancing_4_21.jpg +./4--Dancing/4_Dancing_Dancing_4_1026.jpg +./4--Dancing/4_Dancing_Dancing_4_327.jpg +./4--Dancing/4_Dancing_Dancing_4_194.jpg +./4--Dancing/4_Dancing_Dancing_4_97.jpg +./4--Dancing/4_Dancing_Dancing_4_162.jpg +./4--Dancing/4_Dancing_Dancing_4_922.jpg +./4--Dancing/4_Dancing_Dancing_4_319.jpg +./4--Dancing/4_Dancing_Dancing_4_489.jpg +./4--Dancing/4_Dancing_Dancing_4_84.jpg +./4--Dancing/4_Dancing_Dancing_4_57.jpg +./4--Dancing/4_Dancing_Dancing_4_384.jpg +./4--Dancing/4_Dancing_Dancing_4_915.jpg +./4--Dancing/4_Dancing_Dancing_4_189.jpg +./4--Dancing/4_Dancing_Dancing_4_517.jpg +./4--Dancing/4_Dancing_Dancing_4_41.jpg +./4--Dancing/4_Dancing_Dancing_4_124.jpg +./4--Dancing/4_Dancing_Dancing_4_289.jpg +./4--Dancing/4_Dancing_Dancing_4_854.jpg +./4--Dancing/4_Dancing_Dancing_4_1029.jpg +./4--Dancing/4_Dancing_Dancing_4_1036.jpg +./4--Dancing/4_Dancing_Dancing_4_240.jpg +./4--Dancing/4_Dancing_Dancing_4_422.jpg +./4--Dancing/4_Dancing_Dancing_4_715.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_569.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_102.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_846.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_781.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_483.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_301.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_554.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_676.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_286.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_706.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_751.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_303.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_542.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_241.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_526.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_382.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_460.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_313.jpg +./15--Stock_Market/15_Stock_Market_Stock_Market_15_731.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_801.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_35.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_733.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_236.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_453.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_411.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_457.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_827.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_567.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_362.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_158.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_313.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_465.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_380.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_602.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_275.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_356.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_343.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_762.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_94.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_134.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_357.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_314.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_522.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_107.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_766.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_579.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_664.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_781.jpg +./1--Handshaking/1_Handshaking_Handshaking_1_209.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_278.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_655.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_872.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_397.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_492.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_924.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_636.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_607.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_129.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_100.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_424.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_35.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_748.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_594.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_945.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_883.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_258.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_632.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_60.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_183.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_930.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_161.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_43.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_325.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_257.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_613.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_12.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_328.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_849.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_571.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_344.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_214.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_552.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_31.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_757.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_41.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_209.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_66.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_933.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_648.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_40.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_784.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_615.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_431.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_432.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_297.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_693.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_352.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_391.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_34.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_182.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_518.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_595.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_828.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_114.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_332.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_658.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_89.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_520.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_767.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_345.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_147.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_907.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_563.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_165.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_343.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_252.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_710.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_346.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_45.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_521.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_357.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_105.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_74.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_141.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_196.jpg +./9--Press_Conference/9_Press_Conference_Press_Conference_9_375.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_536.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_659.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_266.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_487.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_653.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_695.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_807.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_661.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_281.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_123.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_51.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_780.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_568.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_578.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_86.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_61.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_428.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_208.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_97.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_815.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_90.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_456.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_130.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_358.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_416.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_315.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_113.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_310.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_96.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_219.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_141.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_228.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_316.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_755.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_721.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_76.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_159.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_80.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_303.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_759.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_263.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_328.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_15.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_701.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_635.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_506.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_666.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_84.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_3.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_359.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_125.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_331.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_776.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_809.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_479.jpg +./52--Photographers/52_Photographers_taketouristphotos_52_288.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_30.jpg +./52--Photographers/52_Photographers_photographertakingphoto_52_743.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_171.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_490.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_113.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_452.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_668.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_211.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_470.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_368.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_300.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_57.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_765.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_588.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_406.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_944.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_1037.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_852.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_415.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_444.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_782.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_218.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_592.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_344.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_1048.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_469.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_972.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_803.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_271.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_1007.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_1009.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_46.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_220.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_1005.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_735.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_818.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_418.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_227.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_253.jpg +./17--Ceremony/17_Ceremony_Ceremony_17_234.jpg +./13--Interview/13_Interview_Interview_On_Location_13_74.jpg +./13--Interview/13_Interview_Interview_On_Location_13_208.jpg +./13--Interview/13_Interview_Interview_On_Location_13_186.jpg +./13--Interview/13_Interview_Interview_On_Location_13_282.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_409.jpg +./13--Interview/13_Interview_Interview_On_Location_13_56.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_217.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_223.jpg +./13--Interview/13_Interview_Interview_Sequences_13_103.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_245.jpg +./13--Interview/13_Interview_Interview_On_Location_13_225.jpg +./13--Interview/13_Interview_Interview_Sequences_13_121.jpg +./13--Interview/13_Interview_Interview_On_Location_13_537.jpg +./13--Interview/13_Interview_Interview_Sequences_13_15.jpg +./13--Interview/13_Interview_Interview_On_Location_13_542.jpg +./13--Interview/13_Interview_Interview_Sequences_13_3.jpg +./13--Interview/13_Interview_Interview_On_Location_13_912.jpg +./13--Interview/13_Interview_Interview_On_Location_13_238.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_327.jpg +./13--Interview/13_Interview_Interview_Sequences_13_347.jpg +./13--Interview/13_Interview_Interview_Sequences_13_7.jpg +./13--Interview/13_Interview_Interview_On_Location_13_554.jpg +./13--Interview/13_Interview_Interview_Sequences_13_636.jpg +./13--Interview/13_Interview_Interview_Sequences_13_793.jpg +./13--Interview/13_Interview_Interview_On_Location_13_3.jpg +./13--Interview/13_Interview_Interview_Sequences_13_1032.jpg +./13--Interview/13_Interview_Interview_On_Location_13_933.jpg +./13--Interview/13_Interview_Interview_On_Location_13_166.jpg +./13--Interview/13_Interview_Interview_On_Location_13_513.jpg +./13--Interview/13_Interview_Interview_On_Location_13_736.jpg +./13--Interview/13_Interview_Interview_Sequences_13_270.jpg +./13--Interview/13_Interview_Interview_Sequences_13_31.jpg +./13--Interview/13_Interview_Interview_On_Location_13_791.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_260.jpg +./13--Interview/13_Interview_Interview_Sequences_13_495.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_442.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_285.jpg +./13--Interview/13_Interview_Interview_Sequences_13_134.jpg +./13--Interview/13_Interview_Interview_Sequences_13_557.jpg +./13--Interview/13_Interview_Interview_On_Location_13_505.jpg +./13--Interview/13_Interview_Interview_On_Location_13_512.jpg +./13--Interview/13_Interview_Interview_Sequences_13_691.jpg +./13--Interview/13_Interview_Interview_Sequences_13_937.jpg +./13--Interview/13_Interview_Interview_On_Location_13_636.jpg +./13--Interview/13_Interview_Interview_Sequences_13_867.jpg +./13--Interview/13_Interview_Interview_On_Location_13_394.jpg +./13--Interview/13_Interview_Interview_Sequences_13_33.jpg +./13--Interview/13_Interview_Interview_On_Location_13_610.jpg +./13--Interview/13_Interview_Interview_Sequences_13_764.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_252.jpg +./13--Interview/13_Interview_Interview_On_Location_13_849.jpg +./13--Interview/13_Interview_Interview_On_Location_13_847.jpg +./13--Interview/13_Interview_Interview_Sequences_13_11.jpg +./13--Interview/13_Interview_Interview_On_Location_13_433.jpg +./13--Interview/13_Interview_Interview_Sequences_13_135.jpg +./13--Interview/13_Interview_Interview_Sequences_13_2.jpg +./13--Interview/13_Interview_Interview_Sequences_13_973.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_325.jpg +./13--Interview/13_Interview_Interview_Sequences_13_541.jpg +./13--Interview/13_Interview_Interview_On_Location_13_179.jpg +./13--Interview/13_Interview_Interview_On_Location_13_426.jpg +./13--Interview/13_Interview_Interview_Sequences_13_718.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_461.jpg +./13--Interview/13_Interview_Interview_Sequences_13_586.jpg +./13--Interview/13_Interview_Interview_Sequences_13_717.jpg +./13--Interview/13_Interview_Interview_On_Location_13_187.jpg +./13--Interview/13_Interview_Interview_Sequences_13_89.jpg +./13--Interview/13_Interview_Interview_Sequences_13_268.jpg +./13--Interview/13_Interview_Interview_Sequences_13_868.jpg +./13--Interview/13_Interview_Interview_Sequences_13_929.jpg +./13--Interview/13_Interview_Interview_On_Location_13_247.jpg +./13--Interview/13_Interview_Interview_Sequences_13_187.jpg +./13--Interview/13_Interview_Interview_On_Location_13_284.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_406.jpg +./13--Interview/13_Interview_Interview_Sequences_13_609.jpg +./13--Interview/13_Interview_Interview_On_Location_13_33.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_204.jpg +./13--Interview/13_Interview_Interview_Sequences_13_152.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_241.jpg +./13--Interview/13_Interview_Interview_On_Location_13_313.jpg +./13--Interview/13_Interview_Interview_On_Location_13_246.jpg +./13--Interview/13_Interview_Interview_On_Location_13_401.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_420.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_254.jpg +./13--Interview/13_Interview_Interview_On_Location_13_287.jpg +./13--Interview/13_Interview_Interview_On_Location_13_138.jpg +./13--Interview/13_Interview_Interview_Sequences_13_807.jpg +./13--Interview/13_Interview_Interview_Sequences_13_456.jpg +./13--Interview/13_Interview_Interview_On_Location_13_728.jpg +./13--Interview/13_Interview_Interview_On_Location_13_521.jpg +./13--Interview/13_Interview_Interview_Sequences_13_108.jpg +./13--Interview/13_Interview_Interview_On_Location_13_334.jpg +./13--Interview/13_Interview_Interview_On_Location_13_478.jpg +./13--Interview/13_Interview_Interview_Sequences_13_759.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_381.jpg +./13--Interview/13_Interview_Interview_On_Location_13_190.jpg +./13--Interview/13_Interview_Interview_On_Location_13_129.jpg +./13--Interview/13_Interview_Interview_On_Location_13_539.jpg +./13--Interview/13_Interview_Interview_Sequences_13_936.jpg +./13--Interview/13_Interview_Interview_Sequences_13_779.jpg +./13--Interview/13_Interview_Interview_On_Location_13_852.jpg +./13--Interview/13_Interview_Interview_Sequences_13_373.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_237.jpg +./13--Interview/13_Interview_Interview_Sequences_13_513.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_743.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_374.jpg +./13--Interview/13_Interview_Interview_Sequences_13_37.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_239.jpg +./13--Interview/13_Interview_Interview_On_Location_13_559.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_189.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_1001.jpg +./13--Interview/13_Interview_Interview_Sequences_13_884.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_475.jpg +./13--Interview/13_Interview_Interview_Sequences_13_209.jpg +./13--Interview/13_Interview_Interview_On_Location_13_301.jpg +./13--Interview/13_Interview_Interview_Sequences_13_813.jpg +./13--Interview/13_Interview_Interview_Sequences_13_189.jpg +./13--Interview/13_Interview_Interview_On_Location_13_605.jpg +./13--Interview/13_Interview_Interview_On_Location_13_491.jpg +./13--Interview/13_Interview_Interview_Sequences_13_477.jpg +./13--Interview/13_Interview_Interview_Sequences_13_864.jpg +./13--Interview/13_Interview_Interview_On_Location_13_861.jpg +./13--Interview/13_Interview_Interview_Sequences_13_55.jpg +./13--Interview/13_Interview_Interview_On_Location_13_921.jpg +./13--Interview/13_Interview_Interview_Sequences_13_237.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_36.jpg +./13--Interview/13_Interview_Interview_Sequences_13_40.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_425.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_107.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_668.jpg +./13--Interview/13_Interview_Interview_On_Location_13_569.jpg +./13--Interview/13_Interview_Interview_On_Location_13_940.jpg +./13--Interview/13_Interview_Interview_Sequences_13_5.jpg +./13--Interview/13_Interview_Interview_On_Location_13_865.jpg +./13--Interview/13_Interview_Interview_Sequences_13_92.jpg +./13--Interview/13_Interview_Interview_Sequences_13_859.jpg +./13--Interview/13_Interview_Interview_Sequences_13_111.jpg +./13--Interview/13_Interview_Interview_2_People_Visible_13_155.jpg +./13--Interview/13_Interview_Interview_On_Location_13_773.jpg +./13--Interview/13_Interview_Interview_On_Location_13_510.jpg +./13--Interview/13_Interview_Interview_Sequences_13_35.jpg +./13--Interview/13_Interview_Interview_Sequences_13_778.jpg +./46--Jockey/46_Jockey_Jockey_46_779.jpg +./46--Jockey/46_Jockey_Jockey_46_44.jpg +./46--Jockey/46_Jockey_Jockey_46_444.jpg +./46--Jockey/46_Jockey_Jockey_46_909.jpg +./46--Jockey/46_Jockey_Jockey_46_537.jpg +./46--Jockey/46_Jockey_Jockey_46_51.jpg +./46--Jockey/46_Jockey_Jockey_46_352.jpg +./46--Jockey/46_Jockey_Jockey_46_393.jpg +./46--Jockey/46_Jockey_Jockey_46_409.jpg +./46--Jockey/46_Jockey_Jockey_46_54.jpg +./46--Jockey/46_Jockey_Jockey_46_497.jpg +./46--Jockey/46_Jockey_Jockey_46_202.jpg +./46--Jockey/46_Jockey_Jockey_46_823.jpg +./46--Jockey/46_Jockey_Jockey_46_188.jpg +./46--Jockey/46_Jockey_Jockey_46_923.jpg +./46--Jockey/46_Jockey_Jockey_46_130.jpg +./46--Jockey/46_Jockey_Jockey_46_508.jpg +./46--Jockey/46_Jockey_Jockey_46_106.jpg +./46--Jockey/46_Jockey_Jockey_46_652.jpg +./46--Jockey/46_Jockey_Jockey_46_728.jpg +./46--Jockey/46_Jockey_Jockey_46_758.jpg +./46--Jockey/46_Jockey_Jockey_46_933.jpg +./46--Jockey/46_Jockey_Jockey_46_718.jpg +./46--Jockey/46_Jockey_Jockey_46_569.jpg +./46--Jockey/46_Jockey_Jockey_46_166.jpg +./46--Jockey/46_Jockey_Jockey_46_259.jpg +./46--Jockey/46_Jockey_Jockey_46_76.jpg +./46--Jockey/46_Jockey_Jockey_46_254.jpg +./46--Jockey/46_Jockey_Jockey_46_308.jpg +./46--Jockey/46_Jockey_Jockey_46_172.jpg +./46--Jockey/46_Jockey_Jockey_46_79.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_704.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_244.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_906.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_1014.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_659.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_229.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_64.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_690.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_1038.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_404.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_27.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_725.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_357.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_200.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_592.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_95.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_201.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_928.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_789.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_1020.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_172.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_85.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_978.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_34.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_1019.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_117.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_532.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_401.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_283.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_856.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_763.jpg +./59--people--driving--car/59_peopledrivingcar_peopledrivingcar_59_202.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_943.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_948.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_73.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_387.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_59.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_486.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_923.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_787.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_48.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_344.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_53.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_266.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_589.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_124.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_890.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_56.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_810.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_759.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_98.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_207.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_302.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_140.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_203.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_903.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_50.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_456.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_192.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_307.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_353.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_153.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_656.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_10.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_783.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_896.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_162.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_564.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_337.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_991.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_218.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_894.jpg +./49--Greeting/49_Greeting_peoplegreeting_49_920.jpg diff --git a/detect_rec_plate.py b/detect_rec_plate.py new file mode 100644 index 0000000..ab61819 --- /dev/null +++ b/detect_rec_plate.py @@ -0,0 +1,246 @@ +import torch +import cv2 +import numpy as np +import argparse +import copy +import time +import os +from ultralytics.nn.tasks import attempt_load_weights +from plate_recognition.plate_rec import get_plate_result,init_model,cv_imread +from plate_recognition.double_plate_split_merge import get_split_merge +from fonts.cv_puttext import cv2ImgAddText + +def allFilePath(rootPath,allFIleList):# 读取文件夹内的文件,放到list + fileList = os.listdir(rootPath) + for temp in fileList: + if os.path.isfile(os.path.join(rootPath,temp)): + allFIleList.append(os.path.join(rootPath,temp)) + else: + allFilePath(os.path.join(rootPath,temp),allFIleList) + +def four_point_transform(image, pts): #透视变换得到车牌小图 + # rect = order_points(pts) + rect = pts.astype('float32') + (tl, tr, br, bl) = rect + widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) + widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) + maxWidth = max(int(widthA), int(widthB)) + heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) + heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) + maxHeight = max(int(heightA), int(heightB)) + dst = np.array([ + [0, 0], + [maxWidth - 1, 0], + [maxWidth - 1, maxHeight - 1], + [0, maxHeight - 1]], dtype = "float32") + M = cv2.getPerspectiveTransform(rect, dst) + warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) + return warped + + +def letter_box(img,size=(640,640)): #yolo 前处理 letter_box操作 + h,w,_=img.shape + r=min(size[0]/h,size[1]/w) + new_h,new_w=int(h*r),int(w*r) + new_img = cv2.resize(img,(new_w,new_h)) + left= int((size[1]-new_w)/2) + top=int((size[0]-new_h)/2) + right = size[1]-left-new_w + bottom=size[0]-top-new_h + img =cv2.copyMakeBorder(new_img,top,bottom,left,right,cv2.BORDER_CONSTANT,value=(114,114,114)) + return img,r,left,top + +def load_model(weights, device): #加载yolov8 模型 + model = attempt_load_weights(weights,device=device) # load FP32 model + return model + +def xywh2xyxy(det): #xywh转化为xyxy + y = det.clone() + y[:,0]=det[:,0]-det[0:,2]/2 + y[:,1]=det[:,1]-det[0:,3]/2 + y[:,2]=det[:,0]+det[0:,2]/2 + y[:,3]=det[:,1]+det[0:,3]/2 + return y + +def my_nums(dets,iou_thresh): #nms操作 + y = dets.clone() + y_box_score = y[:,:5] + index = torch.argsort(y_box_score[:,-1],descending=True) + keep = [] + while index.size()[0]>0: + i = index[0].item() + keep.append(i) + x1=torch.maximum(y_box_score[i,0],y_box_score[index[1:],0]) + y1=torch.maximum(y_box_score[i,1],y_box_score[index[1:],1]) + x2=torch.minimum(y_box_score[i,2],y_box_score[index[1:],2]) + y2=torch.minimum(y_box_score[i,3],y_box_score[index[1:],3]) + zero_=torch.tensor(0).to(device) + w=torch.maximum(zero_,x2-x1) + h=torch.maximum(zero_,y2-y1) + inter_area = w*h + nuion_area1 =(y_box_score[i,2]-y_box_score[i,0])*(y_box_score[i,3]-y_box_score[i,1]) #计算交集 + union_area2 =(y_box_score[index[1:],2]-y_box_score[index[1:],0])*(y_box_score[index[1:],3]-y_box_score[index[1:],1])#计算并集 + + iou = inter_area/(nuion_area1+union_area2-inter_area)#计算iou + + idx = torch.where(iou<=iou_thresh)[0] #保留iou小于iou_thresh的 + index=index[idx+1] + return keep + + +def restore_box(dets,r,left,top): #坐标还原到原图上 + + dets[:,[0,2,5,7,9,11]]=dets[:,[0,2,5,7,9,11]]-left + dets[:,[1,3,6,8,10,12]]= dets[:,[1,3,6,8,10,12]]-top + dets[:,:4]/=r + dets[:,5:13]/=r + + return dets + # pass + +def post_processing(prediction,conf,iou_thresh,r,left,top): #后处理 + + prediction = prediction.permute(0,2,1).squeeze(0) + xc = prediction[:, 4:6].amax(1) > conf #过滤掉小于conf的框 + x = prediction[xc] + if not len(x): + return [] + boxes = x[:,:4] #框 + boxes = xywh2xyxy(boxes) #中心点 宽高 变为 左上 右下两个点 + score,index = torch.max(x[:,4:6],dim=-1,keepdim=True) #找出得分和所属类别 + x = torch.cat((boxes,score,x[:,6:14],index),dim=1) #重新组合 + + score = x[:,4] + keep =my_nums(x,iou_thresh) + x=x[keep] + x=restore_box(x,r,left,top) + return x + +def pre_processing(img,opt,device): #前处理 + img, r,left,top= letter_box(img,(opt.img_size,opt.img_size)) + # print(img.shape) + img=img[:,:,::-1].transpose((2,0,1)).copy() #bgr2rgb hwc2chw + img = torch.from_numpy(img).to(device) + img = img.float() + img = img/255.0 + img =img.unsqueeze(0) + return img ,r,left,top + +def det_rec_plate(img,img_ori,detect_model,plate_rec_model): + result_list=[] + img,r,left,top = pre_processing(img,opt,device) #前处理 + predict = detect_model(img)[0] + outputs=post_processing(predict,0.3,0.5,r,left,top) #后处理 + for output in outputs: + result_dict={} + output = output.squeeze().cpu().numpy().tolist() + rect=output[:4] + rect = [int(x) for x in rect] + label = output[-1] + land_marks=np.array(output[5:13],dtype='int64').reshape(4,2) + roi_img = four_point_transform(img_ori,land_marks) #透视变换得到车牌小图 + if int(label): #判断是否是双层车牌,是双牌的话进行分割后然后拼接 + roi_img=get_split_merge(roi_img) + plate_number,rec_prob,plate_color,color_conf=get_plate_result(roi_img,device,plate_rec_model,is_color=True) + + result_dict['plate_no']=plate_number #车牌号 + result_dict['plate_color']=plate_color #车牌颜色 + result_dict['rect']=rect #车牌roi区域 + result_dict['detect_conf']=output[4] #检测区域得分 + result_dict['landmarks']=land_marks.tolist() #车牌角点坐标 + # result_dict['rec_conf']=rec_prob #每个字符的概率 + result_dict['roi_height']=roi_img.shape[0] #车牌高度 + # result_dict['plate_color']=plate_color + # if is_color: + result_dict['color_conf']=color_conf #颜色得分 + result_dict['plate_type']=int(label) #单双层 0单层 1双层 + result_list.append(result_dict) + return result_list + + +def draw_result(orgimg,dict_list,is_color=False): # 车牌结果画出来 + result_str ="" + for result in dict_list: + rect_area = result['rect'] + + x,y,w,h = rect_area[0],rect_area[1],rect_area[2]-rect_area[0],rect_area[3]-rect_area[1] + padding_w = 0.05*w + padding_h = 0.11*h + rect_area[0]=max(0,int(x-padding_w)) + rect_area[1]=max(0,int(y-padding_h)) + rect_area[2]=min(orgimg.shape[1],int(rect_area[2]+padding_w)) + rect_area[3]=min(orgimg.shape[0],int(rect_area[3]+padding_h)) + + height_area = result['roi_height'] + landmarks=result['landmarks'] + result_p = result['plate_no'] + if result['plate_type']==0:#单层 + result_p+=" "+result['plate_color'] + else: #双层 + result_p+=" "+result['plate_color']+"双层" + result_str+=result_p+" " + for i in range(4): #关键点 + cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1) + cv2.rectangle(orgimg,(rect_area[0],rect_area[1]),(rect_area[2],rect_area[3]),(0,0,255),2) #画框 + + labelSize = cv2.getTextSize(result_p,cv2.FONT_HERSHEY_SIMPLEX,0.5,1) #获得字体的大小 + if rect_area[0]+labelSize[0][0]>orgimg.shape[1]: #防止显示的文字越界 + rect_area[0]=int(orgimg.shape[1]-labelSize[0][0]) + orgimg=cv2.rectangle(orgimg,(rect_area[0],int(rect_area[1]-round(1.6*labelSize[0][1]))),(int(rect_area[0]+round(1.2*labelSize[0][0])),rect_area[1]+labelSize[1]),(255,255,255),cv2.FILLED)#画文字框,背景白色 + + if len(result)>=6: + orgimg=cv2ImgAddText(orgimg,result_p,rect_area[0],int(rect_area[1]-round(1.6*labelSize[0][1])),(0,0,0),21) + # orgimg=cv2ImgAddText(orgimg,result_p,rect_area[0]-height_area,rect_area[1]-height_area-10,(0,255,0),height_area) + + print(result_str) + return orgimg + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--detect_model', nargs='+', type=str, default=r'weights/yolov8-lite-t-plate.pt', help='model.pt path(s)') #yolov8检测模型 + parser.add_argument('--rec_model', type=str, default=r'weights/plate_rec_color.pth', help='model.pt path(s)')#车牌字符识别模型 + parser.add_argument('--image_path', type=str, default=r'imgs', help='source') #待识别图片路径 + parser.add_argument('--img_size', type=int, default=320, help='inference size (pixels)') #yolov8 网络模型输入大小 + parser.add_argument('--output', type=str, default='result', help='source') #结果保存的文件夹 + device =torch.device("cuda" if torch.cuda.is_available() else "cpu") + + clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)] + opt = parser.parse_args() + save_path = opt.output + + if not os.path.exists(save_path): + os.mkdir(save_path) + + detect_model = load_model(opt.detect_model, device) #初始化yolov8识别模型 + plate_rec_model=init_model(device,opt.rec_model,is_color=True) #初始化识别模型 + #算参数量 + total = sum(p.numel() for p in detect_model.parameters()) + total_1 = sum(p.numel() for p in plate_rec_model.parameters()) + print("yolov8 detect params: %.2fM,rec params: %.2fM" % (total/1e6,total_1/1e6)) + + detect_model.eval() + # print(detect_model) + file_list = [] + allFilePath(opt.image_path,file_list) + count=0 + time_all = 0 + time_begin=time.time() + for pic_ in file_list: + print(count,pic_,end=" ") + time_b = time.time() #开始时间 + img = cv2.imread(pic_) + img_ori = copy.deepcopy(img) + result_list=det_rec_plate(img,img_ori,detect_model,plate_rec_model) + time_e=time.time() + ori_img=draw_result(img,result_list) #将结果画在图上 + img_name = os.path.basename(pic_) + save_img_path = os.path.join(save_path,img_name) #图片保存的路径 + time_gap = time_e-time_b #计算单个图片识别耗时 + if count: + time_all+=time_gap + count+=1 + cv2.imwrite(save_img_path,ori_img) #op + # print(result_list) + print(f"sumTime time is {time.time()-time_begin} s, average pic time is {time_all/(len(file_list)-1)}") + \ No newline at end of file diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..3b3e306 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,85 @@ +# Ultralytics Docs + +Ultralytics Docs are deployed to [https://docs.ultralytics.com](https://docs.ultralytics.com). + +### Install Ultralytics package + +To install the ultralytics package in developer mode, you will need to have Git and Python 3 installed on your system. +Then, follow these steps: + +1. Clone the ultralytics repository to your local machine using Git: + +```bash +git clone https://github.com/ultralytics/ultralytics.git +``` + +2. Navigate to the root directory of the repository: + +```bash +cd ultralytics +``` + +3. Install the package in developer mode using pip: + +```bash +pip install -e '.[dev]' +``` + +This will install the ultralytics package and its dependencies in developer mode, allowing you to make changes to the +package code and have them reflected immediately in your Python environment. + +Note that you may need to use the pip3 command instead of pip if you have multiple versions of Python installed on your +system. + +### Building and Serving Locally + +The `mkdocs serve` command is used to build and serve a local version of the MkDocs documentation site. It is typically +used during the development and testing phase of a documentation project. + +```bash +mkdocs serve +``` + +Here is a breakdown of what this command does: + +- `mkdocs`: This is the command-line interface (CLI) for the MkDocs static site generator. It is used to build and serve + MkDocs sites. +- `serve`: This is a subcommand of the `mkdocs` CLI that tells it to build and serve the documentation site locally. +- `-a`: This flag specifies the hostname and port number to bind the server to. The default value is `localhost:8000`. +- `-t`: This flag specifies the theme to use for the documentation site. The default value is `mkdocs`. +- `-s`: This flag tells the `serve` command to serve the site in silent mode, which means it will not display any log + messages or progress updates. + When you run the `mkdocs serve` command, it will build the documentation site using the files in the `docs/` directory + and serve it at the specified hostname and port number. You can then view the site by going to the URL in your web + browser. + +While the site is being served, you can make changes to the documentation files and see them reflected in the live site +immediately. This is useful for testing and debugging your documentation before deploying it to a live server. + +To stop the serve command and terminate the local server, you can use the `CTRL+C` keyboard shortcut. + +### Deploying Your Documentation Site + +To deploy your MkDocs documentation site, you will need to choose a hosting provider and a deployment method. Some +popular options include GitHub Pages, GitLab Pages, and Amazon S3. + +Before you can deploy your site, you will need to configure your `mkdocs.yml` file to specify the remote host and any +other necessary deployment settings. + +Once you have configured your `mkdocs.yml` file, you can use the `mkdocs deploy` command to build and deploy your site. +This command will build the documentation site using the files in the `docs/` directory and the specified configuration +file and theme, and then deploy the site to the specified remote host. + +For example, to deploy your site to GitHub Pages using the gh-deploy plugin, you can use the following command: + +```bash +mkdocs gh-deploy +``` + +If you are using GitHub Pages, you can set a custom domain for your documentation site by going to the "Settings" page +for your repository and updating the "Custom domain" field in the "GitHub Pages" section. + +![196814117-fc16e711-d2be-4722-9536-b7c6d78fd167](https://user-images.githubusercontent.com/26833433/210150206-9e86dcd7-10af-43e4-9eb2-9518b3799eac.png) + +For more information on deploying your MkDocs documentation site, see +the [MkDocs documentation](https://www.mkdocs.org/user-guide/deploying-your-docs/). diff --git a/docs/SECURITY.md b/docs/SECURITY.md new file mode 100644 index 0000000..c00e145 --- /dev/null +++ b/docs/SECURITY.md @@ -0,0 +1,26 @@ +At [Ultralytics](https://ultralytics.com), the security of our users' data and systems is of utmost importance. To +ensure the safety and security of our [open-source projects](https://github.com/ultralytics), we have implemented +several measures to detect and prevent security vulnerabilities. + +[![ultralytics](https://snyk.io/advisor/python/ultralytics/badge.svg)](https://snyk.io/advisor/python/ultralytics) + +## Snyk Scanning + +We use [Snyk](https://snyk.io/advisor/python/ultralytics) to regularly scan the YOLOv8 repository for vulnerabilities +and security issues. Our goal is to identify and remediate any potential threats as soon as possible, to minimize any +risks to our users. + +## GitHub CodeQL Scanning + +In addition to our Snyk scans, we also use +GitHub's [CodeQL](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/about-code-scanning-with-codeql) +scans to proactively identify and address security vulnerabilities. + +## Reporting Security Issues + +If you suspect or discover a security vulnerability in the YOLOv8 repository, please let us know immediately. You can +reach out to us directly via our [contact form](https://ultralytics.com/contact) or +via [security@ultralytics.com](mailto:security@ultralytics.com). Our security team will investigate and respond as soon +as possible. + +We appreciate your help in keeping the YOLOv8 repository secure and safe for everyone. diff --git a/docs/app.md b/docs/app.md new file mode 100644 index 0000000..8aaf686 --- /dev/null +++ b/docs/app.md @@ -0,0 +1,48 @@ +# Ultralytics HUB App for YOLOv8 + + + +
+
+ + + + + + + + + + + + + + + + + +
+
+ +   + + +
+ +Welcome to the Ultralytics HUB app, which is designed to demonstrate the power and capabilities of the YOLOv5 and YOLOv8 +models. This app is available for download on +the [Apple App Store](https://apps.apple.com/xk/app/ultralytics/id1583935240) and +the [Google Play Store](https://play.google.com/store/apps/details?id=com.ultralytics.ultralytics_app). + +**To install the app, simply scan the QR code provided above**. At the moment, the app features YOLOv5 models, with +YOLOv8 models set to be available soon. + +With the YOLOv5 model, you can easily detect and classify objects in images and videos with high accuracy and speed. The +model has been trained on a vast dataset and can recognize a wide range of objects, including pedestrians, traffic +signs, and cars. + +Using this app, you can try out YOLOv5 on your images and videos, and observe how the model works in real-time. +Additionally, you can learn more about YOLOv5's functionality and how it can be integrated into real-world applications. + +We are confident that you will enjoy using YOLOv5 and be amazed at its capabilities. Thank you for choosing Ultralytics +for your AI solutions. \ No newline at end of file diff --git a/docs/hub.md b/docs/hub.md new file mode 100644 index 0000000..199fa63 --- /dev/null +++ b/docs/hub.md @@ -0,0 +1,112 @@ +# Ultralytics HUB + + + +
+
+ + + + + + + + + + + + + + + + + +
+
+ + CI CPU + + Open In Colab +
+ + +[Ultralytics HUB](https://hub.ultralytics.com) is a new no-code online tool developed +by [Ultralytics](https://ultralytics.com), the creators of the popular [YOLOv5](https://github.com/ultralytics/yolov5) +object detection and image segmentation models. With Ultralytics HUB, users can easily train and deploy YOLO models +without any coding or technical expertise. + +Ultralytics HUB is designed to be user-friendly and intuitive, with a drag-and-drop interface that allows users to +easily upload their data and select their model configurations. It also offers a range of pre-trained models and +templates to choose from, making it easy for users to get started with training their own models. Once a model is +trained, it can be easily deployed and used for real-time object detection and image segmentation tasks. Overall, +Ultralytics HUB is an essential tool for anyone looking to use YOLO for their object detection and image segmentation +projects. + +**[Get started now](https://hub.ultralytics.com)** and experience the power and simplicity of Ultralytics HUB for +yourself. Sign up for a free account and start building, training, and deploying YOLOv5 and YOLOv8 models today. + +## 1. Upload a Dataset + +Ultralytics HUB datasets are just like YOLOv5 🚀 datasets, they use the same structure and the same label formats to keep +everything simple. + +When you upload a dataset to Ultralytics HUB, make sure to **place your dataset YAML inside the dataset root directory** +as in the example shown below, and then zip for upload to https://hub.ultralytics.com/. Your **dataset YAML, directory +and zip** should all share the same name. For example, if your dataset is called 'coco6' as in our +example [ultralytics/hub/coco6.zip](https://github.com/ultralytics/hub/blob/master/coco6.zip), then you should have a +coco6.yaml inside your coco6/ directory, which should zip to create coco6.zip for upload: + +```bash +zip -r coco6.zip coco6 +``` + +The example [coco6.zip](https://github.com/ultralytics/hub/blob/master/coco6.zip) dataset in this repository can be +downloaded and unzipped to see exactly how to structure your custom dataset. + +

+ +

+ +The dataset YAML is the same standard YOLOv5 YAML format. See +the [YOLOv5 Train Custom Data tutorial](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) for full details. + +```yaml +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: # dataset root dir (leave empty for HUB) +train: images/train # train images (relative to 'path') 8 images +val: images/val # val images (relative to 'path') 8 images +test: # test images (optional) + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + ... +``` + +After zipping your dataset, sign in to [Ultralytics HUB](https://bit.ly/ultralytics_hub) and click the Datasets tab. +Click 'Upload Dataset' to upload, scan and visualize your new dataset before training new YOLOv5 models on it! + +HUB Dataset Upload + +## 2. Train a Model + +Connect to the Ultralytics HUB notebook and use your model API key to begin training! + + +Open In Colab + +## 3. Deploy to Real World + +Export your model to 13 different formats, including TensorFlow, ONNX, OpenVINO, CoreML, Paddle and many others. Run +models directly on your [iOS](https://apps.apple.com/xk/app/ultralytics/id1583935240) or +[Android](https://play.google.com/store/apps/details?id=com.ultralytics.ultralytics_app) mobile device by downloading +the [Ultralytics App](https://ultralytics.com/app_install)! + +## ❓ Issues + +If you are a new [Ultralytics HUB](https://bit.ly/ultralytics_hub) user and have questions or comments, you are in the +right place! Please raise a [New Issue](https://github.com/ultralytics/hub/issues/new/choose) and let us know what we +can do to make your life better 😃! diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..ec61e49 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,45 @@ +
+

+ + +

+ Ultralytics CI + YOLOv8 Citation + Docker Pulls +
+ Run on Gradient + Open In Colab + Open In Kaggle +
+ +Introducing [Ultralytics](https://ultralytics.com) [YOLOv8](https://github.com/ultralytics/ultralytics), the latest version of the acclaimed real-time object detection and image segmentation model. YOLOv8 is built on cutting-edge advancements in deep learning and computer vision, offering unparalleled performance in terms of speed and accuracy. Its streamlined design makes it suitable for various applications and easily adaptable to different hardware platforms, from edge devices to cloud APIs. + +Explore the YOLOv8 Docs, a comprehensive resource designed to help you understand and utilize its features and capabilities. Whether you are a seasoned machine learning practitioner or new to the field, this hub aims to maximize YOLOv8's potential in your projects + +## Where to Start + +- **Install** `ultralytics` with pip and get up and running in minutes   [:material-clock-fast: Get Started](quickstart.md){ .md-button } +- **Predict** new images and videos with YOLOv8   [:octicons-image-16: Predict on Images](modes/predict.md){ .md-button } +- **Train** a new YOLOv8 model on your own custom dataset   [:fontawesome-solid-brain: Train a Model](modes/train.md){ .md-button } +- **Explore** YOLOv8 tasks like segment, classify, pose and track   [:material-magnify-expand: Explore Tasks](tasks/index.md){ .md-button } + +## YOLO: A Brief History + +[YOLO](https://arxiv.org/abs/1506.02640) (You Only Look Once), a popular object detection and image segmentation model, was developed by Joseph Redmon and Ali Farhadi at the University of Washington. Launched in 2015, YOLO quickly gained popularity for its high speed and accuracy. + +- [YOLOv2](https://arxiv.org/abs/1612.08242), released in 2016, improved the original model by incorporating batch normalization, anchor boxes, and dimension clusters. +- [YOLOv3](https://pjreddie.com/media/files/papers/YOLOv3.pdf), launched in 2018, further enhanced the model's performance using a more efficient backbone network, multiple anchors and spatial pyramid pooling. +- [YOLOv4](https://arxiv.org/abs/2004.10934) was released in 2020, introducing innovations like Mosaic data augmentation, a new anchor-free detection head, and a new loss function. +- [YOLOv5](https://github.com/ultralytics/yolov5) further improved the model's performance and added new features such as hyperparameter optimization, integrated experiment tracking and automatic export to popular export formats. +- [YOLOv6](https://github.com/meituan/YOLOv6) was open-sourced by Meituan in 2022 and is in use in many of the company's autonomous delivery robots. +- [YOLOv7](https://github.com/WongKinYiu/yolov7) added additional tasks such as pose estimation on the COCO keypoints dataset. + +Since its launch YOLO has been employed in various applications, including autonomous vehicles, security and surveillance, and medical imaging, and has won several competitions like the COCO Object Detection Challenge and the DOTA Object Detection Challenge. + +## Ultralytics YOLOv8 + +[Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) is the latest version of the YOLO object detection and image segmentation model. As a cutting-edge, state-of-the-art (SOTA) model, YOLOv8 builds on the success of previous versions, introducing new features and improvements for enhanced performance, flexibility, and efficiency. + +YOLOv8 is designed with a strong focus on speed, size, and accuracy, making it a compelling choice for various vision AI tasks. It outperforms previous versions by incorporating innovations like a new backbone network, a new anchor-free split head, and new loss functions. These improvements enable YOLOv8 to deliver superior results, while maintaining a compact size and exceptional speed. + +Additionally, YOLOv8 supports a full range of vision AI tasks, including [detection](tasks/detect.md), [segmentation](tasks/segment.md), [pose estimation](tasks/pose.md), [tracking](modes/track.md), and [classification](tasks/classify.md). This versatility allows users to leverage YOLOv8's capabilities across diverse applications and domains. diff --git a/docs/modes/benchmark.md b/docs/modes/benchmark.md new file mode 100644 index 0000000..662a013 --- /dev/null +++ b/docs/modes/benchmark.md @@ -0,0 +1,65 @@ + + +**Benchmark mode** is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks +provide information on the size of the exported format, its `mAP50-95` metrics (for object detection, segmentation and pose) +or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various export +formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for +their specific use case based on their requirements for speed and accuracy. + +!!! tip "Tip" + + * Export to ONNX or OpenVINO for up to 3x CPU speedup. + * Export to TensorRT for up to 5x GPU speedup. + +## Usage Examples + +Run YOLOv8n benchmarks on all supported export formats including ONNX, TensorRT etc. See Arguments section below for a +full list of export arguments. + +!!! example "" + + === "Python" + + ```python + from ultralytics.yolo.utils.benchmarks import benchmark + + # Benchmark + benchmark(model='yolov8n.pt', imgsz=640, half=False, device=0) + ``` + === "CLI" + + ```bash + yolo benchmark model=yolov8n.pt imgsz=640 half=False device=0 + ``` + +## Arguments + +Arguments such as `model`, `imgsz`, `half`, `device`, and `hard_fail` provide users with the flexibility to fine-tune +the benchmarks to their specific needs and compare the performance of different export formats with ease. + +| Key | Value | Description | +|-------------|---------|----------------------------------------------------------------------| +| `model` | `None` | path to model file, i.e. yolov8n.pt, yolov8n.yaml | +| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | +| `half` | `False` | FP16 quantization | +| `device` | `None` | device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu | +| `hard_fail` | `False` | do not continue on error (bool), or val floor threshold (float) | + +## Export Formats + +Benchmarks will attempt to run automatically on all possible export formats below. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|---------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | diff --git a/docs/modes/export.md b/docs/modes/export.md new file mode 100644 index 0000000..f454466 --- /dev/null +++ b/docs/modes/export.md @@ -0,0 +1,81 @@ + + +**Export mode** is used for exporting a YOLOv8 model to a format that can be used for deployment. In this mode, the +model is converted to a format that can be used by other software applications or hardware devices. This mode is useful +when deploying the model to production environments. + +!!! tip "Tip" + + * Export to ONNX or OpenVINO for up to 3x CPU speedup. + * Export to TensorRT for up to 5x GPU speedup. + +## Usage Examples + +Export a YOLOv8n model to a different format like ONNX or TensorRT. See Arguments section below for a full list of +export arguments. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom trained + + # Export the model + model.export(format='onnx') + ``` + === "CLI" + + ```bash + yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=path/to/best.pt format=onnx # export custom trained model + ``` + +## Arguments + +Export settings for YOLO models refer to the various configurations and options used to save or +export the model for use in other environments or platforms. These settings can affect the model's performance, size, +and compatibility with different systems. Some common YOLO export settings include the format of the exported model +file (e.g. ONNX, TensorFlow SavedModel), the device on which the model will be run (e.g. CPU, GPU), and the presence of +additional features such as masks or multiple labels per box. Other factors that may affect the export process include +the specific task the model is being used for and the requirements or constraints of the target environment or platform. +It is important to carefully consider and configure these settings to ensure that the exported model is optimized for +the intended use case and can be used effectively in the target environment. + +| Key | Value | Description | +|-------------|-----------------|------------------------------------------------------| +| `format` | `'torchscript'` | format to export to | +| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | +| `keras` | `False` | use Keras for TF SavedModel export | +| `optimize` | `False` | TorchScript: optimize for mobile | +| `half` | `False` | FP16 quantization | +| `int8` | `False` | INT8 quantization | +| `dynamic` | `False` | ONNX/TF/TensorRT: dynamic axes | +| `simplify` | `False` | ONNX: simplify model | +| `opset` | `None` | ONNX: opset version (optional, defaults to latest) | +| `workspace` | `4` | TensorRT: workspace size (GB) | +| `nms` | `False` | CoreML: add NMS | + +## Export Formats + +Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, +i.e. `format='onnx'` or `format='engine'`. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|---------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | diff --git a/docs/modes/index.md b/docs/modes/index.md new file mode 100644 index 0000000..1ca2383 --- /dev/null +++ b/docs/modes/index.md @@ -0,0 +1,62 @@ +# Ultralytics YOLOv8 Modes + + + +Ultralytics YOLOv8 supports several **modes** that can be used to perform different tasks. These modes are: + +**Train**: For training a YOLOv8 model on a custom dataset. +**Val**: For validating a YOLOv8 model after it has been trained. +**Predict**: For making predictions using a trained YOLOv8 model on new images or videos. +**Export**: For exporting a YOLOv8 model to a format that can be used for deployment. +**Track**: For tracking objects in real-time using a YOLOv8 model. +**Benchmark**: For benchmarking YOLOv8 exports (ONNX, TensorRT, etc.) speed and accuracy. + +## [Train](train.md) + +Train mode is used for training a YOLOv8 model on a custom dataset. In this mode, the model is trained using the +specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can +accurately predict the classes and locations of objects in an image. + +[Train Examples](train.md){ .md-button .md-button--primary} + +## [Val](val.md) + +Val mode is used for validating a YOLOv8 model after it has been trained. In this mode, the model is evaluated on a +validation set to measure its accuracy and generalization performance. This mode can be used to tune the hyperparameters +of the model to improve its performance. + +[Val Examples](val.md){ .md-button .md-button--primary} + +## [Predict](predict.md) + +Predict mode is used for making predictions using a trained YOLOv8 model on new images or videos. In this mode, the +model is loaded from a checkpoint file, and the user can provide images or videos to perform inference. The model +predicts the classes and locations of objects in the input images or videos. + +[Predict Examples](predict.md){ .md-button .md-button--primary} + +## [Export](export.md) + +Export mode is used for exporting a YOLOv8 model to a format that can be used for deployment. In this mode, the model is +converted to a format that can be used by other software applications or hardware devices. This mode is useful when +deploying the model to production environments. + +[Export Examples](export.md){ .md-button .md-button--primary} + +## [Track](track.md) + +Track mode is used for tracking objects in real-time using a YOLOv8 model. In this mode, the model is loaded from a +checkpoint file, and the user can provide a live video stream to perform real-time object tracking. This mode is useful +for applications such as surveillance systems or self-driving cars. + +[Track Examples](track.md){ .md-button .md-button--primary} + +## [Benchmark](benchmark.md) + +Benchmark mode is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks provide +information on the size of the exported format, its `mAP50-95` metrics (for object detection, segmentation and pose) +or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various export +formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for +their specific use case based on their requirements for speed and accuracy. + +[Benchmark Examples](benchmark.md){ .md-button .md-button--primary} diff --git a/docs/modes/predict.md b/docs/modes/predict.md new file mode 100644 index 0000000..30f8743 --- /dev/null +++ b/docs/modes/predict.md @@ -0,0 +1,276 @@ + + +YOLOv8 **predict mode** can generate predictions for various tasks, returning either a list of `Results` objects or a +memory-efficient generator of `Results` objects when using the streaming mode. Enable streaming mode by +passing `stream=True` in the predictor's call method. + +!!! example "Predict" + + === "Return a list with `Stream=False`" + ```python + inputs = [img, img] # list of numpy arrays + results = model(inputs) # list of Results objects + + for result in results: + boxes = result.boxes # Boxes object for bbox outputs + masks = result.masks # Masks object for segmentation masks outputs + probs = result.probs # Class probabilities for classification outputs + ``` + + === "Return a generator with `Stream=True`" + ```python + inputs = [img, img] # list of numpy arrays + results = model(inputs, stream=True) # generator of Results objects + + for result in results: + boxes = result.boxes # Boxes object for bbox outputs + masks = result.masks # Masks object for segmentation masks outputs + probs = result.probs # Class probabilities for classification outputs + ``` + +!!! tip "Tip" + + Streaming mode with `stream=True` should be used for long videos or large predict sources, otherwise results will accumuate in memory and will eventually cause out-of-memory errors. + +## Sources + +YOLOv8 can accept various input sources, as shown in the table below. This includes images, URLs, PIL images, OpenCV, +numpy arrays, torch tensors, CSV files, videos, directories, globs, YouTube videos, and streams. The table indicates +whether each source can be used in streaming mode with `stream=True` ✅ and an example argument for each source. + +| source | model(arg) | type | notes | +|-------------|--------------------------------------------|----------------|------------------| +| image | `'im.jpg'` | `str`, `Path` | | +| URL | `'https://ultralytics.com/images/bus.jpg'` | `str` | | +| screenshot | `'screen'` | `str` | | +| PIL | `Image.open('im.jpg')` | `PIL.Image` | HWC, RGB | +| OpenCV | `cv2.imread('im.jpg')[:,:,::-1]` | `np.ndarray` | HWC, BGR to RGB | +| numpy | `np.zeros((640,1280,3))` | `np.ndarray` | HWC | +| torch | `torch.zeros(16,3,320,640)` | `torch.Tensor` | BCHW, RGB | +| CSV | `'sources.csv'` | `str`, `Path` | RTSP, RTMP, HTTP | +| video ✅ | `'vid.mp4'` | `str`, `Path` | | +| directory ✅ | `'path/'` | `str`, `Path` | | +| glob ✅ | `'path/*.jpg'` | `str` | Use `*` operator | +| YouTube ✅ | `'https://youtu.be/Zgi9g1ksQHc'` | `str` | | +| stream ✅ | `'rtsp://example.com/media.mp4'` | `str` | RTSP, RTMP, HTTP | + + +## Arguments +`model.predict` accepts multiple arguments that control the predction operation. These arguments can be passed directly to `model.predict`: +!!! example + ``` + model.predict(source, save=True, imgsz=320, conf=0.5) + ``` + +All supported arguments: + +| Key | Value | Description | +|------------------|------------------------|----------------------------------------------------------| +| `source` | `'ultralytics/assets'` | source directory for images or videos | +| `conf` | `0.25` | object confidence threshold for detection | +| `iou` | `0.7` | intersection over union (IoU) threshold for NMS | +| `half` | `False` | use half precision (FP16) | +| `device` | `None` | device to run on, i.e. cuda device=0/1/2/3 or device=cpu | +| `show` | `False` | show results if possible | +| `save` | `False` | save images with results | +| `save_txt` | `False` | save results as .txt file | +| `save_conf` | `False` | save results with confidence scores | +| `save_crop` | `False` | save cropped images with results | +| `hide_labels` | `False` | hide labels | +| `hide_conf` | `False` | hide confidence scores | +| `max_det` | `300` | maximum number of detections per image | +| `vid_stride` | `False` | video frame-rate stride | +| `line_thickness` | `3` | bounding box thickness (pixels) | +| `visualize` | `False` | visualize model features | +| `augment` | `False` | apply image augmentation to prediction sources | +| `agnostic_nms` | `False` | class-agnostic NMS | +| `retina_masks` | `False` | use high-resolution segmentation masks | +| `classes` | `None` | filter results by class, i.e. class=0, or class=[0,2,3] | +| `boxes` | `True` | Show boxes in segmentation predictions | + +## Image and Video Formats + +YOLOv8 supports various image and video formats, as specified +in [yolo/data/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/data/utils.py). See the +tables below for the valid suffixes and example predict commands. + +### Image Suffixes + +| Image Suffixes | Example Predict Command | Reference | +|----------------|----------------------------------|-------------------------------------------------------------------------------| +| .bmp | `yolo predict source=image.bmp` | [Microsoft BMP File Format](https://en.wikipedia.org/wiki/BMP_file_format) | +| .dng | `yolo predict source=image.dng` | [Adobe DNG](https://www.adobe.com/products/photoshop/extend.displayTab2.html) | +| .jpeg | `yolo predict source=image.jpeg` | [JPEG](https://en.wikipedia.org/wiki/JPEG) | +| .jpg | `yolo predict source=image.jpg` | [JPEG](https://en.wikipedia.org/wiki/JPEG) | +| .mpo | `yolo predict source=image.mpo` | [Multi Picture Object](https://fileinfo.com/extension/mpo) | +| .png | `yolo predict source=image.png` | [Portable Network Graphics](https://en.wikipedia.org/wiki/PNG) | +| .tif | `yolo predict source=image.tif` | [Tag Image File Format](https://en.wikipedia.org/wiki/TIFF) | +| .tiff | `yolo predict source=image.tiff` | [Tag Image File Format](https://en.wikipedia.org/wiki/TIFF) | +| .webp | `yolo predict source=image.webp` | [WebP](https://en.wikipedia.org/wiki/WebP) | +| .pfm | `yolo predict source=image.pfm` | [Portable FloatMap](https://en.wikipedia.org/wiki/Netpbm#File_formats) | + +### Video Suffixes + +| Video Suffixes | Example Predict Command | Reference | +|----------------|----------------------------------|----------------------------------------------------------------------------------| +| .asf | `yolo predict source=video.asf` | [Advanced Systems Format](https://en.wikipedia.org/wiki/Advanced_Systems_Format) | +| .avi | `yolo predict source=video.avi` | [Audio Video Interleave](https://en.wikipedia.org/wiki/Audio_Video_Interleave) | +| .gif | `yolo predict source=video.gif` | [Graphics Interchange Format](https://en.wikipedia.org/wiki/GIF) | +| .m4v | `yolo predict source=video.m4v` | [MPEG-4 Part 14](https://en.wikipedia.org/wiki/M4V) | +| .mkv | `yolo predict source=video.mkv` | [Matroska](https://en.wikipedia.org/wiki/Matroska) | +| .mov | `yolo predict source=video.mov` | [QuickTime File Format](https://en.wikipedia.org/wiki/QuickTime_File_Format) | +| .mp4 | `yolo predict source=video.mp4` | [MPEG-4 Part 14 - Wikipedia](https://en.wikipedia.org/wiki/MPEG-4_Part_14) | +| .mpeg | `yolo predict source=video.mpeg` | [MPEG-1 Part 2](https://en.wikipedia.org/wiki/MPEG-1) | +| .mpg | `yolo predict source=video.mpg` | [MPEG-1 Part 2](https://en.wikipedia.org/wiki/MPEG-1) | +| .ts | `yolo predict source=video.ts` | [MPEG Transport Stream](https://en.wikipedia.org/wiki/MPEG_transport_stream) | +| .wmv | `yolo predict source=video.wmv` | [Windows Media Video](https://en.wikipedia.org/wiki/Windows_Media_Video) | +| .webm | `yolo predict source=video.webm` | [WebM Project](https://en.wikipedia.org/wiki/WebM) | + +## Working with Results + +The `Results` object contains the following components: + +- `Results.boxes`: `Boxes` object with properties and methods for manipulating bounding boxes +- `Results.masks`: `Masks` object for indexing masks or getting segment coordinates +- `Results.probs`: `torch.Tensor` containing class probabilities or logits +- `Results.orig_img`: Original image loaded in memory +- `Results.path`: `Path` containing the path to the input image + +Each result is composed of a `torch.Tensor` by default, which allows for easy manipulation: + +!!! example "Results" + + ```python + results = results.cuda() + results = results.cpu() + results = results.to('cpu') + results = results.numpy() + ``` + +### Boxes + +`Boxes` object can be used to index, manipulate, and convert bounding boxes to different formats. Box format conversion +operations are cached, meaning they're only calculated once per object, and those values are reused for future calls. + +- Indexing a `Boxes` object returns a `Boxes` object: + +!!! example "Boxes" + + ```python + results = model(img) + boxes = results[0].boxes + box = boxes[0] # returns one box + box.xyxy + ``` + +- Properties and conversions + +!!! example "Boxes Properties" + + ```python + boxes.xyxy # box with xyxy format, (N, 4) + boxes.xywh # box with xywh format, (N, 4) + boxes.xyxyn # box with xyxy format but normalized, (N, 4) + boxes.xywhn # box with xywh format but normalized, (N, 4) + boxes.conf # confidence score, (N, 1) + boxes.cls # cls, (N, 1) + boxes.data # raw bboxes tensor, (N, 6) or boxes.boxes + ``` + +### Masks + +`Masks` object can be used index, manipulate and convert masks to segments. The segment conversion operation is cached. + +!!! example "Masks" + + ```python + results = model(inputs) + masks = results[0].masks # Masks object + masks.xy # x, y segments (pixels), List[segment] * N + masks.xyn # x, y segments (normalized), List[segment] * N + masks.data # raw masks tensor, (N, H, W) or masks.masks + ``` + +### probs + +`probs` attribute of `Results` class is a `Tensor` containing class probabilities of a classification operation. + +!!! example "Probs" + + ```python + results = model(inputs) + results[0].probs # cls prob, (num_class, ) + ``` + +Class reference documentation for `Results` module and its components can be found [here](../reference/results.md) + +## Plotting results + +You can use `plot()` function of `Result` object to plot results on in image object. It plots all components(boxes, +masks, classification logits, etc.) found in the results object + +!!! example "Plotting" + + ```python + res = model(img) + res_plotted = res[0].plot() + cv2.imshow("result", res_plotted) + ``` +| Argument | Description | +| ----------- | ------------- | +| `conf (bool)` | Whether to plot the detection confidence score. | +| `line_width (float, optional)` | The line width of the bounding boxes. If None, it is scaled to the image size. | +| `font_size (float, optional)` | The font size of the text. If None, it is scaled to the image size. | +| `font (str)` | The font to use for the text. | +| `pil (bool)` | Whether to return the image as a PIL Image. | +| `example (str)` | An example string to display. Useful for indicating the expected format of the output. | +| `img (numpy.ndarray)` | Plot to another image. if not, plot to original image. | +| `labels (bool)` | Whether to plot the label of bounding boxes. | +| `boxes (bool)` | Whether to plot the bounding boxes. | +| `masks (bool)` | Whether to plot the masks. | +| `probs (bool)` | Whether to plot classification probability. | + + +## Streaming Source `for`-loop + +Here's a Python script using OpenCV (cv2) and YOLOv8 to run inference on video frames. This script assumes you have already installed the necessary packages (opencv-python and ultralytics). + +!!! example "Streaming for-loop" + + ```python + import cv2 + from ultralytics import YOLO + + # Load the YOLOv8 model + model = YOLO('yolov8n.pt') + + # Open the video file + video_path = "path/to/your/video/file.mp4" + cap = cv2.VideoCapture(video_path) + + # Loop through the video frames + while cap.isOpened(): + # Read a frame from the video + success, frame = cap.read() + + if success: + # Run YOLOv8 inference on the frame + results = model(frame) + + # Visualize the results on the frame + annotated_frame = results[0].plot() + + # Display the annotated frame + cv2.imshow("YOLOv8 Inference", annotated_frame) + + # Break the loop if 'q' is pressed + if cv2.waitKey(1) & 0xFF == ord("q"): + break + else: + # Break the loop if the end of the video is reached + break + + # Release the video capture object and close the display window + cap.release() + cv2.destroyAllWindows() + ``` \ No newline at end of file diff --git a/docs/modes/track.md b/docs/modes/track.md new file mode 100644 index 0000000..8058f38 --- /dev/null +++ b/docs/modes/track.md @@ -0,0 +1,96 @@ + + +Object tracking is a task that involves identifying the location and class of objects, then assigning a unique ID to +that detection in video streams. + +The output of tracker is the same as detection with an added object ID. + +## Available Trackers + +The following tracking algorithms have been implemented and can be enabled by passing `tracker=tracker_type.yaml` + +* [BoT-SORT](https://github.com/NirAharon/BoT-SORT) - `botsort.yaml` +* [ByteTrack](https://github.com/ifzhang/ByteTrack) - `bytetrack.yaml` + +The default tracker is BoT-SORT. + +## Tracking + +Use a trained YOLOv8n/YOLOv8n-seg model to run tracker on video streams. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official detection model + model = YOLO('yolov8n-seg.pt') # load an official segmentation model + model = YOLO('path/to/best.pt') # load a custom model + + # Track with the model + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True) + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True, tracker="bytetrack.yaml") + ``` + === "CLI" + + ```bash + yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" # official detection model + yolo track model=yolov8n-seg.pt source=... # official segmentation model + yolo track model=path/to/best.pt source=... # custom model + yolo track model=path/to/best.pt tracker="bytetrack.yaml" # bytetrack tracker + + ``` + +As in the above usage, we support both the detection and segmentation models for tracking and the only thing you need to +do is loading the corresponding (detection or segmentation) model. + +## Configuration + +### Tracking + +Tracking shares the configuration with predict, i.e `conf`, `iou`, `show`. More configurations please refer +to [predict page](https://docs.ultralytics.com/modes/predict/). +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.pt') + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", conf=0.3, iou=0.5, show=True) + ``` + === "CLI" + + ```bash + yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" conf=0.3, iou=0.5 show + + ``` + +### Tracker + +We also support using a modified tracker config file, just copy a config file i.e `custom_tracker.yaml` +from [ultralytics/tracker/cfg](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/tracker/cfg) and modify +any configurations(expect the `tracker_type`) you need to. +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.pt') + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", tracker='custom_tracker.yaml') + ``` + === "CLI" + + ```bash + yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" tracker='custom_tracker.yaml' + ``` + +Please refer to [ultralytics/tracker/cfg](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/tracker/cfg) +page + diff --git a/docs/modes/train.md b/docs/modes/train.md new file mode 100644 index 0000000..a9275a0 --- /dev/null +++ b/docs/modes/train.md @@ -0,0 +1,99 @@ + + +**Train mode** is used for training a YOLOv8 model on a custom dataset. In this mode, the model is trained using the +specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can +accurately predict the classes and locations of objects in an image. + +!!! tip "Tip" + + * YOLOv8 datasets like COCO, VOC, ImageNet and many others automatically download on first use, i.e. `yolo train data=coco.yaml` + +## Usage Examples + +Train YOLOv8n on the COCO128 dataset for 100 epochs at image size 640. See Arguments section below for a full list of +training arguments. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.yaml') # build a new model from YAML + model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) + model = YOLO('yolov8n.yaml').load('yolov8n.pt') # build from YAML and transfer weights + + # Train the model + model.train(data='coco128.yaml', epochs=100, imgsz=640) + ``` + === "CLI" + + ```bash + # Build a new model from YAML and start training from scratch + yolo detect train data=coco128.yaml model=yolov8n.yaml epochs=100 imgsz=640 + + # Start training from a pretrained *.pt model + yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 + + # Build a new model from YAML, transfer pretrained weights to it and start training + yolo detect train data=coco128.yaml model=yolov8n.yaml pretrained=yolov8n.pt epochs=100 imgsz=640 + ``` + +## Arguments + +Training settings for YOLO models refer to the various hyperparameters and configurations used to train the model on a +dataset. These settings can affect the model's performance, speed, and accuracy. Some common YOLO training settings +include the batch size, learning rate, momentum, and weight decay. Other factors that may affect the training process +include the choice of optimizer, the choice of loss function, and the size and composition of the training dataset. It +is important to carefully tune and experiment with these settings to achieve the best possible performance for a given +task. + +| Key | Value | Description | +|-------------------|----------|-----------------------------------------------------------------------------| +| `model` | `None` | path to model file, i.e. yolov8n.pt, yolov8n.yaml | +| `data` | `None` | path to data file, i.e. coco128.yaml | +| `epochs` | `100` | number of epochs to train for | +| `patience` | `50` | epochs to wait for no observable improvement for early stopping of training | +| `batch` | `16` | number of images per batch (-1 for AutoBatch) | +| `imgsz` | `640` | size of input images as integer or w,h | +| `save` | `True` | save train checkpoints and predict results | +| `save_period` | `-1` | Save checkpoint every x epochs (disabled if < 1) | +| `cache` | `False` | True/ram, disk or False. Use cache for data loading | +| `device` | `None` | device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu | +| `workers` | `8` | number of worker threads for data loading (per RANK if DDP) | +| `project` | `None` | project name | +| `name` | `None` | experiment name | +| `exist_ok` | `False` | whether to overwrite existing experiment | +| `pretrained` | `False` | whether to use a pretrained model | +| `optimizer` | `'SGD'` | optimizer to use, choices=['SGD', 'Adam', 'AdamW', 'RMSProp'] | +| `verbose` | `False` | whether to print verbose output | +| `seed` | `0` | random seed for reproducibility | +| `deterministic` | `True` | whether to enable deterministic mode | +| `single_cls` | `False` | train multi-class data as single-class | +| `image_weights` | `False` | use weighted image selection for training | +| `rect` | `False` | rectangular training with each batch collated for minimum padding | +| `cos_lr` | `False` | use cosine learning rate scheduler | +| `close_mosaic` | `10` | disable mosaic augmentation for final 10 epochs | +| `resume` | `False` | resume training from last checkpoint | +| `amp` | `True` | Automatic Mixed Precision (AMP) training, choices=[True, False] | +| `lr0` | `0.01` | initial learning rate (i.e. SGD=1E-2, Adam=1E-3) | +| `lrf` | `0.01` | final learning rate (lr0 * lrf) | +| `momentum` | `0.937` | SGD momentum/Adam beta1 | +| `weight_decay` | `0.0005` | optimizer weight decay 5e-4 | +| `warmup_epochs` | `3.0` | warmup epochs (fractions ok) | +| `warmup_momentum` | `0.8` | warmup initial momentum | +| `warmup_bias_lr` | `0.1` | warmup initial bias lr | +| `box` | `7.5` | box loss gain | +| `cls` | `0.5` | cls loss gain (scale with pixels) | +| `dfl` | `1.5` | dfl loss gain | +| `pose` | `12.0` | pose loss gain (pose-only) | +| `kobj` | `2.0` | keypoint obj loss gain (pose-only) | +| `fl_gamma` | `0.0` | focal loss gamma (efficientDet default gamma=1.5) | +| `label_smoothing` | `0.0` | label smoothing (fraction) | +| `nbs` | `64` | nominal batch size | +| `overlap_mask` | `True` | masks should overlap during training (segment train only) | +| `mask_ratio` | `4` | mask downsample ratio (segment train only) | +| `dropout` | `0.0` | use dropout regularization (classify train only) | +| `val` | `True` | validate/test during training | diff --git a/docs/modes/val.md b/docs/modes/val.md new file mode 100644 index 0000000..b0a866d --- /dev/null +++ b/docs/modes/val.md @@ -0,0 +1,86 @@ + + +**Val mode** is used for validating a YOLOv8 model after it has been trained. In this mode, the model is evaluated on a +validation set to measure its accuracy and generalization performance. This mode can be used to tune the hyperparameters +of the model to improve its performance. + +!!! tip "Tip" + + * YOLOv8 models automatically remember their training settings, so you can validate a model at the same image size and on the original dataset easily with just `yolo val model=yolov8n.pt` or `model('yolov8n.pt').val()` + +## Usage Examples + +Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the `model` retains it's +training `data` and arguments as model attributes. See Arguments section below for a full list of export arguments. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Validate the model + metrics = model.val() # no arguments needed, dataset and settings remembered + metrics.box.map # map50-95 + metrics.box.map50 # map50 + metrics.box.map75 # map75 + metrics.box.maps # a list contains map50-95 of each category + ``` + === "CLI" + + ```bash + yolo detect val model=yolov8n.pt # val official model + yolo detect val model=path/to/best.pt # val custom model + ``` + +## Arguments + +Validation settings for YOLO models refer to the various hyperparameters and configurations used to +evaluate the model's performance on a validation dataset. These settings can affect the model's performance, speed, and +accuracy. Some common YOLO validation settings include the batch size, the frequency with which validation is performed +during training, and the metrics used to evaluate the model's performance. Other factors that may affect the validation +process include the size and composition of the validation dataset and the specific task the model is being used for. It +is important to carefully tune and experiment with these settings to ensure that the model is performing well on the +validation dataset and to detect and prevent overfitting. + +| Key | Value | Description | +|---------------|---------|--------------------------------------------------------------------| +| `data` | `None` | path to data file, i.e. coco128.yaml | +| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | +| `batch` | `16` | number of images per batch (-1 for AutoBatch) | +| `save_json` | `False` | save results to JSON file | +| `save_hybrid` | `False` | save hybrid version of labels (labels + additional predictions) | +| `conf` | `0.001` | object confidence threshold for detection | +| `iou` | `0.6` | intersection over union (IoU) threshold for NMS | +| `max_det` | `300` | maximum number of detections per image | +| `half` | `True` | use half precision (FP16) | +| `device` | `None` | device to run on, i.e. cuda device=0/1/2/3 or device=cpu | +| `dnn` | `False` | use OpenCV DNN for ONNX inference | +| `plots` | `False` | show plots during training | +| `rect` | `False` | rectangular val with each batch collated for minimum padding | +| `split` | `val` | dataset split to use for validation, i.e. 'val', 'test' or 'train' | + +## Export Formats + +Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, +i.e. `format='onnx'` or `format='engine'`. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|---------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | diff --git a/docs/quickstart.md b/docs/quickstart.md new file mode 100644 index 0000000..8725b77 --- /dev/null +++ b/docs/quickstart.md @@ -0,0 +1,133 @@ +## Install + +Install YOLOv8 via the `ultralytics` pip package for the latest stable release or by cloning +the [https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics) repository for the most +up-to-date version. + +!!! example "Install" + + === "pip install (recommended)" + ```bash + pip install ultralytics + ``` + + === "git clone (for development)" + ```bash + git clone https://github.com/ultralytics/ultralytics + cd ultralytics + pip install -e . + ``` + +See the `ultralytics` [requirements.txt](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) file for a list of dependencies. Note that `pip` automatically installs all required dependencies. + +!!! tip "Tip" + + PyTorch requirements vary by operating system and CUDA requirements, so it's recommended to install PyTorch first following instructions at [https://pytorch.org/get-started/locally](https://pytorch.org/get-started/locally). + + + PyTorch Installation Instructions + + + +## Use with CLI + +The YOLO command line interface (CLI) allows for simple single-line commands without the need for a Python environment. +CLI requires no customization or Python code. You can simply run all tasks from the terminal with the `yolo` command. Check out the [CLI Guide](usage/cli.md) to learn more about using YOLOv8 from the command line. + + +!!! example + + === "Syntax" + + Ultralytics `yolo` commands use the following syntax: + ```bash + yolo TASK MODE ARGS + + Where TASK (optional) is one of [detect, segment, classify] + MODE (required) is one of [train, val, predict, export, track] + ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults. + ``` + See all ARGS in the full [Configuration Guide](usage/cfg.md) or with `yolo cfg` + + === "Train" + + Train a detection model for 10 epochs with an initial learning_rate of 0.01 + ```bash + yolo train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01 + ``` + + === "Predict" + + Predict a YouTube video using a pretrained segmentation model at image size 320: + ```bash + yolo predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320 + ``` + + === "Val" + + Val a pretrained detection model at batch-size 1 and image size 640: + ```bash + yolo val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640 + ``` + + === "Export" + + Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) + ```bash + yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 + ``` + + === "Special" + + Run special commands to see version, view settings, run checks and more: + ```bash + yolo help + yolo checks + yolo version + yolo settings + yolo copy-cfg + yolo cfg + ``` + + +!!! warning "Warning" + + Arguments must be passed as `arg=val` pairs, split by an equals `=` sign and delimited by spaces ` ` between pairs. Do not use `--` argument prefixes or commas `,` beteen arguments. + + - `yolo predict model=yolov8n.pt imgsz=640 conf=0.25`   ✅ + - `yolo predict model yolov8n.pt imgsz 640 conf 0.25`   ❌ + - `yolo predict --model yolov8n.pt --imgsz 640 --conf 0.25`   ❌ + +[CLI Guide](usage/cli.md){ .md-button .md-button--primary} + +## Use with Python + +YOLOv8's Python interface allows for seamless integration into your Python projects, making it easy to load, run, and process the model's output. Designed with simplicity and ease of use in mind, the Python interface enables users to quickly implement object detection, segmentation, and classification in their projects. This makes YOLOv8's Python interface an invaluable tool for anyone looking to incorporate these functionalities into their Python projects. + +For example, users can load a model, train it, evaluate its performance on a validation set, and even export it to ONNX format with just a few lines of code. Check out the [Python Guide](usage/python.md) to learn more about using YOLOv8 within your Python projects. + +!!! example + + ```python + from ultralytics import YOLO + + # Create a new YOLO model from scratch + model = YOLO('yolov8n.yaml') + + # Load a pretrained YOLO model (recommended for training) + model = YOLO('yolov8n.pt') + + # Train the model using the 'coco128.yaml' dataset for 3 epochs + results = model.train(data='coco128.yaml', epochs=3) + + # Evaluate the model's performance on the validation set + results = model.val() + + # Perform object detection on an image using the model + results = model('https://ultralytics.com/images/bus.jpg') + + # Export the model to ONNX format + success = model.export(format='onnx') + ``` + +[Python Guide](usage/python.md){.md-button .md-button--primary} diff --git a/docs/reference/base_pred.md b/docs/reference/base_pred.md new file mode 100644 index 0000000..5a61c50 --- /dev/null +++ b/docs/reference/base_pred.md @@ -0,0 +1,8 @@ +All task Predictors are inherited from `BasePredictors` class that contains the model validation routine boilerplate. +You can override any function of these Trainers to suit your needs. + +--- + +### BasePredictor API Reference + +:::ultralytics.yolo.engine.predictor.BasePredictor \ No newline at end of file diff --git a/docs/reference/base_trainer.md b/docs/reference/base_trainer.md new file mode 100644 index 0000000..a93af69 --- /dev/null +++ b/docs/reference/base_trainer.md @@ -0,0 +1,8 @@ +All task Trainers are inherited from `BaseTrainer` class that contains the model training and optimization routine +boilerplate. You can override any function of these Trainers to suit your needs. + +--- + +### BaseTrainer API Reference + +:::ultralytics.yolo.engine.trainer.BaseTrainer \ No newline at end of file diff --git a/docs/reference/base_val.md b/docs/reference/base_val.md new file mode 100644 index 0000000..37b7d9c --- /dev/null +++ b/docs/reference/base_val.md @@ -0,0 +1,8 @@ +All task Validators are inherited from `BaseValidator` class that contains the model validation routine boilerplate. You +can override any function of these Trainers to suit your needs. + +--- + +### BaseValidator API Reference + +:::ultralytics.yolo.engine.validator.BaseValidator \ No newline at end of file diff --git a/docs/reference/exporter.md b/docs/reference/exporter.md new file mode 100644 index 0000000..4ce31e1 --- /dev/null +++ b/docs/reference/exporter.md @@ -0,0 +1,3 @@ +### Exporter API Reference + +:::ultralytics.yolo.engine.exporter.Exporter \ No newline at end of file diff --git a/docs/reference/model.md b/docs/reference/model.md new file mode 100644 index 0000000..6edc97b --- /dev/null +++ b/docs/reference/model.md @@ -0,0 +1 @@ +::: ultralytics.yolo.engine.model diff --git a/docs/reference/nn.md b/docs/reference/nn.md new file mode 100644 index 0000000..0c7b1a8 --- /dev/null +++ b/docs/reference/nn.md @@ -0,0 +1,19 @@ +# nn Module + +Ultralytics nn module contains 3 main components: + +1. **AutoBackend**: A module that can run inference on all popular model formats +2. **BaseModel**: `BaseModel` class defines the operations supported by tasks like Detection and Segmentation +3. **modules**: Optimized and reusable neural network blocks built on PyTorch. + +## AutoBackend + +:::ultralytics.nn.autobackend.AutoBackend + +## BaseModel + +:::ultralytics.nn.tasks.BaseModel + +## Modules + +TODO \ No newline at end of file diff --git a/docs/reference/ops.md b/docs/reference/ops.md new file mode 100644 index 0000000..3f8246d --- /dev/null +++ b/docs/reference/ops.md @@ -0,0 +1,208 @@ +This module contains optimized deep learning related operations used in the Ultralytics YOLO framework + +## Non-max suppression + +:::ultralytics.yolo.utils.ops.non_max_suppression +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## Scale boxes + +:::ultralytics.yolo.utils.ops.scale_boxes +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## Scale image + +:::ultralytics.yolo.utils.ops.scale_image +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## clip boxes + +:::ultralytics.yolo.utils.ops.clip_boxes +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +# Box Format Conversion + +## xyxy2xywh + +:::ultralytics.yolo.utils.ops.xyxy2xywh +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## xywh2xyxy + +:::ultralytics.yolo.utils.ops.xywh2xyxy +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## xywhn2xyxy + +:::ultralytics.yolo.utils.ops.xywhn2xyxy +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## xyxy2xywhn + +:::ultralytics.yolo.utils.ops.xyxy2xywhn +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## xyn2xy + +:::ultralytics.yolo.utils.ops.xyn2xy +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## xywh2ltwh + +:::ultralytics.yolo.utils.ops.xywh2ltwh +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## xyxy2ltwh + +:::ultralytics.yolo.utils.ops.xyxy2ltwh +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## ltwh2xywh + +:::ultralytics.yolo.utils.ops.ltwh2xywh +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## ltwh2xyxy + +:::ultralytics.yolo.utils.ops.ltwh2xyxy +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## segment2box + +:::ultralytics.yolo.utils.ops.segment2box +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +# Mask Operations + +## resample_segments + +:::ultralytics.yolo.utils.ops.resample_segments +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## crop_mask + +:::ultralytics.yolo.utils.ops.crop_mask +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## process_mask_upsample + +:::ultralytics.yolo.utils.ops.process_mask_upsample +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## process_mask + +:::ultralytics.yolo.utils.ops.process_mask +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## process_mask_native + +:::ultralytics.yolo.utils.ops.process_mask_native +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## scale_coords + +:::ultralytics.yolo.utils.ops.scale_coords +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## masks2segments + +:::ultralytics.yolo.utils.ops.masks2segments +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## clip_coords + +:::ultralytics.yolo.utils.ops.clip_coords +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + + + + + diff --git a/docs/reference/results.md b/docs/reference/results.md new file mode 100644 index 0000000..e222ec4 --- /dev/null +++ b/docs/reference/results.md @@ -0,0 +1,11 @@ +### Results API Reference + +:::ultralytics.yolo.engine.results.Results + +### Boxes API Reference + +:::ultralytics.yolo.engine.results.Boxes + +### Masks API Reference + +:::ultralytics.yolo.engine.results.Masks diff --git a/docs/tasks/classify.md b/docs/tasks/classify.md new file mode 100644 index 0000000..d985d46 --- /dev/null +++ b/docs/tasks/classify.md @@ -0,0 +1,170 @@ +Image classification is the simplest of the three tasks and involves classifying an entire image into one of a set of +predefined classes. + + + +The output of an image classifier is a single class label and a confidence score. Image +classification is useful when you need to know only what class an image belongs to and don't need to know where objects +of that class are located or what their exact shape is. + +!!! tip "Tip" + + YOLOv8 Classify models use the `-cls` suffix, i.e. `yolov8n-cls.pt` and are pretrained on [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml). + +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8) + +YOLOv8 pretrained Classify models are shown here. Detect, Segment and Pose models are pretrained on +the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify +models are pretrained on +the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset. + +[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest +Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. + +| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 | +|----------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|--------------------------------|-------------------------------------|--------------------|--------------------------| +| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 | +| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 | +| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 | +| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 | +| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 | + +- **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set. +
Reproduce by `yolo val classify data=path/to/ImageNet device=0` +- **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) + instance. +
Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` + +## Train + +Train YOLOv8n-cls on the MNIST160 dataset for 100 epochs at image size 64. For a full list of available arguments +see the [Configuration](../usage/cfg.md) page. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-cls.yaml') # build a new model from YAML + model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) + model = YOLO('yolov8n-cls.yaml').load('yolov8n-cls.pt') # build from YAML and transfer weights + + # Train the model + model.train(data='mnist160', epochs=100, imgsz=64) + ``` + + === "CLI" + + ```bash + # Build a new model from YAML and start training from scratch + yolo classify train data=mnist160 model=yolov8n-cls.yaml epochs=100 imgsz=64 + + # Start training from a pretrained *.pt model + yolo classify train data=mnist160 model=yolov8n-cls.pt epochs=100 imgsz=64 + + # Build a new model from YAML, transfer pretrained weights to it and start training + yolo classify train data=mnist160 model=yolov8n-cls.yaml pretrained=yolov8n-cls.pt epochs=100 imgsz=64 + ``` + +## Val + +Validate trained YOLOv8n-cls model accuracy on the MNIST160 dataset. No argument need to passed as the `model` retains +it's training `data` and arguments as model attributes. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-cls.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Validate the model + metrics = model.val() # no arguments needed, dataset and settings remembered + metrics.top1 # top1 accuracy + metrics.top5 # top5 accuracy + ``` + === "CLI" + + ```bash + yolo classify val model=yolov8n-cls.pt # val official model + yolo classify val model=path/to/best.pt # val custom model + ``` + +## Predict + +Use a trained YOLOv8n-cls model to run predictions on images. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-cls.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Predict with the model + results = model('https://ultralytics.com/images/bus.jpg') # predict on an image + ``` + === "CLI" + + ```bash + yolo classify predict model=yolov8n-cls.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo classify predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model + ``` + +See full `predict` mode details in the [Predict](https://docs.ultralytics.com/modes/predict/) page. + +## Export + +Export a YOLOv8n-cls model to a different format like ONNX, CoreML, etc. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-cls.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom trained + + # Export the model + model.export(format='onnx') + ``` + === "CLI" + + ```bash + yolo export model=yolov8n-cls.pt format=onnx # export official model + yolo export model=path/to/best.pt format=onnx # export custom trained model + ``` + +Available YOLOv8-cls export formats are in the table below. You can predict or validate directly on exported models, +i.e. `yolo predict model=yolov8n-cls.onnx`. Usage examples are shown for your model after export completes. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|-------------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n-cls.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n-cls.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-cls.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-cls_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-cls.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-cls.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-cls_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-cls.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-cls.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n-cls_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n-cls_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-cls_paddle_model/` | ✅ | + +See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. diff --git a/docs/tasks/detect.md b/docs/tasks/detect.md new file mode 100644 index 0000000..89cd1d2 --- /dev/null +++ b/docs/tasks/detect.md @@ -0,0 +1,171 @@ +Object detection is a task that involves identifying the location and class of objects in an image or video stream. + + + +The output of an object detector is a set of bounding boxes that enclose the objects in the image, along with class +labels +and confidence scores for each box. Object detection is a good choice when you need to identify objects of interest in a +scene, but don't need to know exactly where the object is or its exact shape. + +!!! tip "Tip" + + YOLOv8 Detect models are the default YOLOv8 models, i.e. `yolov8n.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml). + +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8) + +YOLOv8 pretrained Detect models are shown here. Detect, Segment and Pose models are pretrained on +the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify +models are pretrained on +the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset. + +[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest +Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. + +| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +|--------------------------------------------------------------------------------------|-----------------------|----------------------|--------------------------------|-------------------------------------|--------------------|-------------------| +| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | +| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | +| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | +| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | +| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | + +- **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset. +
Reproduce by `yolo val detect data=coco.yaml device=0` +- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) + instance. +
Reproduce by `yolo val detect data=coco128.yaml batch=1 device=0|cpu` + +## Train + +Train YOLOv8n on the COCO128 dataset for 100 epochs at image size 640. For a full list of available arguments see +the [Configuration](../usage/cfg.md) page. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.yaml') # build a new model from YAML + model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) + model = YOLO('yolov8n.yaml').load('yolov8n.pt') # build from YAML and transfer weights + + # Train the model + model.train(data='coco128.yaml', epochs=100, imgsz=640) + ``` + === "CLI" + + ```bash + # Build a new model from YAML and start training from scratch + yolo detect train data=coco128.yaml model=yolov8n.yaml epochs=100 imgsz=640 + + # Start training from a pretrained *.pt model + yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 + + # Build a new model from YAML, transfer pretrained weights to it and start training + yolo detect train data=coco128.yaml model=yolov8n.yaml pretrained=yolov8n.pt epochs=100 imgsz=640 + ``` + +## Val + +Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the `model` retains it's +training `data` and arguments as model attributes. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Validate the model + metrics = model.val() # no arguments needed, dataset and settings remembered + metrics.box.map # map50-95 + metrics.box.map50 # map50 + metrics.box.map75 # map75 + metrics.box.maps # a list contains map50-95 of each category + ``` + === "CLI" + + ```bash + yolo detect val model=yolov8n.pt # val official model + yolo detect val model=path/to/best.pt # val custom model + ``` + +## Predict + +Use a trained YOLOv8n model to run predictions on images. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Predict with the model + results = model('https://ultralytics.com/images/bus.jpg') # predict on an image + ``` + === "CLI" + + ```bash + yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo detect predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model + ``` + +See full `predict` mode details in the [Predict](https://docs.ultralytics.com/modes/predict/) page. + +## Export + +Export a YOLOv8n model to a different format like ONNX, CoreML, etc. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom trained + + # Export the model + model.export(format='onnx') + ``` + === "CLI" + + ```bash + yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=path/to/best.pt format=onnx # export custom trained model + ``` + +Available YOLOv8 export formats are in the table below. You can predict or validate directly on exported models, +i.e. `yolo predict model=yolov8n.onnx`. Usage examples are shown for your model after export completes. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|---------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | + +See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. diff --git a/docs/tasks/index.md b/docs/tasks/index.md new file mode 100644 index 0000000..47bbd39 --- /dev/null +++ b/docs/tasks/index.md @@ -0,0 +1,44 @@ +# Ultralytics YOLOv8 Tasks + +YOLOv8 is an AI framework that supports multiple computer vision **tasks**. The framework can be used to +perform [detection](detect.md), [segmentation](segment.md), [classification](classify.md), +and [pose](pose.md) estimation. Each of these tasks has a different objective and use case. + + + +## [Detection](detect.md) + +Detection is the primary task supported by YOLOv8. It involves detecting objects in an image or video frame and drawing +bounding boxes around them. The detected objects are classified into different categories based on their features. +YOLOv8 can detect multiple objects in a single image or video frame with high accuracy and speed. + +[Detection Examples](detect.md){ .md-button .md-button--primary} + +## [Segmentation](segment.md) + +Segmentation is a task that involves segmenting an image into different regions based on the content of the image. Each +region is assigned a label based on its content. This task is useful in applications such as image segmentation and +medical imaging. YOLOv8 uses a variant of the U-Net architecture to perform segmentation. + +[Segmentation Examples](segment.md){ .md-button .md-button--primary} + +## [Classification](classify.md) + +Classification is a task that involves classifying an image into different categories. YOLOv8 can be used to classify +images based on their content. It uses a variant of the EfficientNet architecture to perform classification. + +[Classification Examples](classify.md){ .md-button .md-button--primary} + +## [Pose](pose.md) + +Pose/keypoint detection is a task that involves detecting specific points in an image or video frame. These points are +referred to as keypoints and are used to track movement or pose estimation. YOLOv8 can detect keypoints in an image or +video frame with high accuracy and speed. + +[Pose Examples](pose.md){ .md-button .md-button--primary} + +## Conclusion + +YOLOv8 supports multiple tasks, including detection, segmentation, classification, and keypoints detection. Each of +these tasks has different objectives and use cases. By understanding the differences between these tasks, you can choose +the appropriate task for your computer vision application. \ No newline at end of file diff --git a/docs/tasks/pose.md b/docs/tasks/pose.md new file mode 100644 index 0000000..e32bc7d --- /dev/null +++ b/docs/tasks/pose.md @@ -0,0 +1,175 @@ +Pose estimation is a task that involves identifying the location of specific points in an image, usually referred +to as keypoints. The keypoints can represent various parts of the object such as joints, landmarks, or other distinctive +features. The locations of the keypoints are usually represented as a set of 2D `[x, y]` or 3D `[x, y, visible]` +coordinates. + + + +The output of a pose estimation model is a set of points that represent the keypoints on an object in the image, usually +along with the confidence scores for each point. Pose estimation is a good choice when you need to identify specific +parts of an object in a scene, and their location in relation to each other. + +!!! tip "Tip" + + YOLOv8 _pose_ models use the `-pose` suffix, i.e. `yolov8n-pose.pt`. These models are trained on the [COCO keypoints](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco-pose.yaml) dataset and are suitable for a variety of pose estimation tasks. + +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8) + +YOLOv8 pretrained Pose models are shown here. Detect, Segment and Pose models are pretrained on +the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify +models are pretrained on +the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset. + +[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest +Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. + +| Model | size
(pixels) | mAPbox
50-95 | mAPpose
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +|------------------------------------------------------------------------------------------------------|-----------------------|----------------------|-----------------------|--------------------------------|-------------------------------------|--------------------|-------------------| +| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt) | 640 | - | 49.7 | - | - | 3.3 | 9.2 | +| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt) | 640 | - | 59.2 | - | - | 11.6 | 30.2 | +| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt) | 640 | - | 63.6 | - | - | 26.4 | 81.0 | +| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt) | 640 | - | 67.0 | - | - | 44.4 | 168.6 | +| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | - | 68.9 | - | - | 69.4 | 263.2 | +| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | - | 71.5 | - | - | 99.1 | 1066.4 | + +- **mAPval** values are for single-model single-scale on [COCO Keypoints val2017](http://cocodataset.org) + dataset. +
Reproduce by `yolo val pose data=coco-pose.yaml device=0` +- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) + instance. +
Reproduce by `yolo val pose data=coco8-pose.yaml batch=1 device=0|cpu` + +## Train + +Train a YOLOv8-pose model on the COCO128-pose dataset. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-pose.yaml') # build a new model from YAML + model = YOLO('yolov8n-pose.pt') # load a pretrained model (recommended for training) + model = YOLO('yolov8n-pose.yaml').load('yolov8n-pose.pt') # build from YAML and transfer weights + + # Train the model + model.train(data='coco128-pose.yaml', epochs=100, imgsz=640) + ``` + === "CLI" + + ```bash + # Build a new model from YAML and start training from scratch + yolo detect train data=coco128-pose.yaml model=yolov8n-pose.yaml epochs=100 imgsz=640 + + # Start training from a pretrained *.pt model + yolo detect train data=coco128-pose.yaml model=yolov8n-pose.pt epochs=100 imgsz=640 + + # Build a new model from YAML, transfer pretrained weights to it and start training + yolo detect train data=coco128-pose.yaml model=yolov8n-pose.yaml pretrained=yolov8n-pose.pt epochs=100 imgsz=640 + ``` + +## Val + +Validate trained YOLOv8n-pose model accuracy on the COCO128-pose dataset. No argument need to passed as the `model` +retains it's +training `data` and arguments as model attributes. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-pose.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Validate the model + metrics = model.val() # no arguments needed, dataset and settings remembered + metrics.box.map # map50-95 + metrics.box.map50 # map50 + metrics.box.map75 # map75 + metrics.box.maps # a list contains map50-95 of each category + ``` + === "CLI" + + ```bash + yolo pose val model=yolov8n-pose.pt # val official model + yolo pose val model=path/to/best.pt # val custom model + ``` + +## Predict + +Use a trained YOLOv8n-pose model to run predictions on images. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-pose.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Predict with the model + results = model('https://ultralytics.com/images/bus.jpg') # predict on an image + ``` + === "CLI" + + ```bash + yolo pose predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo pose predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model + ``` + +See full `predict` mode details in the [Predict](https://docs.ultralytics.com/modes/predict/) page. + +## Export + +Export a YOLOv8n model to a different format like ONNX, CoreML, etc. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom trained + + # Export the model + model.export(format='onnx') + ``` + === "CLI" + + ```bash + yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=path/to/best.pt format=onnx # export custom trained model + ``` + +Available YOLOv8-pose export formats are in the table below. You can predict or validate directly on exported models, +i.e. `yolo predict model=yolov8n-pose.onnx`. Usage examples are shown for your model after export completes. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|--------------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n-pose.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n-pose.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-pose.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-pose_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-pose.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-pose.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-pose_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-pose.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-pose.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n-pose_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n-pose_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-pose_paddle_model/` | ✅ | + +See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. diff --git a/docs/tasks/segment.md b/docs/tasks/segment.md new file mode 100644 index 0000000..67e4c6b --- /dev/null +++ b/docs/tasks/segment.md @@ -0,0 +1,175 @@ +Instance segmentation goes a step further than object detection and involves identifying individual objects in an image +and segmenting them from the rest of the image. + + + +The output of an instance segmentation model is a set of masks or +contours that outline each object in the image, along with class labels and confidence scores for each object. Instance +segmentation is useful when you need to know not only where objects are in an image, but also what their exact shape is. + +!!! tip "Tip" + + YOLOv8 Segment models use the `-seg` suffix, i.e. `yolov8n-seg.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml). + +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8) + +YOLOv8 pretrained Segment models are shown here. Detect, Segment and Pose models are pretrained on +the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify +models are pretrained on +the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset. + +[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest +Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. + +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +|----------------------------------------------------------------------------------------------|-----------------------|----------------------|-----------------------|--------------------------------|-------------------------------------|--------------------|-------------------| +| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | +| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | +| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | +| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | +| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | + +- **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset. +
Reproduce by `yolo val segment data=coco.yaml device=0` +- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) + instance. +
Reproduce by `yolo val segment data=coco128-seg.yaml batch=1 device=0|cpu` + +## Train + +Train YOLOv8n-seg on the COCO128-seg dataset for 100 epochs at image size 640. For a full list of available +arguments see the [Configuration](../usage/cfg.md) page. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-seg.yaml') # build a new model from YAML + model = YOLO('yolov8n-seg.pt') # load a pretrained model (recommended for training) + model = YOLO('yolov8n-seg.yaml').load('yolov8n.pt') # build from YAML and transfer weights + + # Train the model + model.train(data='coco128-seg.yaml', epochs=100, imgsz=640) + ``` + === "CLI" + + ```bash + # Build a new model from YAML and start training from scratch + yolo segment train data=coco128-seg.yaml model=yolov8n-seg.yaml epochs=100 imgsz=640 + + # Start training from a pretrained *.pt model + yolo segment train data=coco128-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + + # Build a new model from YAML, transfer pretrained weights to it and start training + yolo segment train data=coco128-seg.yaml model=yolov8n-seg.yaml pretrained=yolov8n-seg.pt epochs=100 imgsz=640 + ``` + +## Val + +Validate trained YOLOv8n-seg model accuracy on the COCO128-seg dataset. No argument need to passed as the `model` +retains it's training `data` and arguments as model attributes. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-seg.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Validate the model + metrics = model.val() # no arguments needed, dataset and settings remembered + metrics.box.map # map50-95(B) + metrics.box.map50 # map50(B) + metrics.box.map75 # map75(B) + metrics.box.maps # a list contains map50-95(B) of each category + metrics.seg.map # map50-95(M) + metrics.seg.map50 # map50(M) + metrics.seg.map75 # map75(M) + metrics.seg.maps # a list contains map50-95(M) of each category + ``` + === "CLI" + + ```bash + yolo segment val model=yolov8n-seg.pt # val official model + yolo segment val model=path/to/best.pt # val custom model + ``` + +## Predict + +Use a trained YOLOv8n-seg model to run predictions on images. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-seg.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Predict with the model + results = model('https://ultralytics.com/images/bus.jpg') # predict on an image + ``` + === "CLI" + + ```bash + yolo segment predict model=yolov8n-seg.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo segment predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model + ``` + +See full `predict` mode details in the [Predict](https://docs.ultralytics.com/modes/predict/) page. + +## Export + +Export a YOLOv8n-seg model to a different format like ONNX, CoreML, etc. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-seg.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom trained + + # Export the model + model.export(format='onnx') + ``` + === "CLI" + + ```bash + yolo export model=yolov8n-seg.pt format=onnx # export official model + yolo export model=path/to/best.pt format=onnx # export custom trained model + ``` + +Available YOLOv8-seg export formats are in the table below. You can predict or validate directly on exported models, +i.e. `yolo predict model=yolov8n-seg.onnx`. Usage examples are shown for your model after export completes. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|-------------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n-seg.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n-seg.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-seg.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-seg_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-seg.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-seg.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-seg_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-seg.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-seg.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n-seg_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n-seg_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-seg_paddle_model/` | ✅ | + +See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. diff --git a/docs/usage/callbacks.md b/docs/usage/callbacks.md new file mode 100644 index 0000000..bea6ece --- /dev/null +++ b/docs/usage/callbacks.md @@ -0,0 +1,85 @@ +## Callbacks + +Ultralytics framework supports callbacks as entry points in strategic stages of train, val, export, and predict modes. +Each callback accepts a `Trainer`, `Validator`, or `Predictor` object depending on the operation type. All properties of +these objects can be found in Reference section of the docs. + +## Examples + +### Returning additional information with Prediction + +In this example, we want to return the original frame with each result object. Here's how we can do that + +```python +def on_predict_batch_end(predictor): + # Retrieve the batch data + _, _, im0s, _, _ = predictor.batch + + # Ensure that im0s is a list + im0s = im0s if isinstance(im0s, list) else [im0s] + + # Combine the prediction results with the corresponding frames + predictor.results = zip(predictor.results, im0s) + +# Create a YOLO model instance +model = YOLO(f'yolov8n.pt') + +# Add the custom callback to the model +model.add_callback("on_predict_batch_end", on_predict_batch_end) + +# Iterate through the results and frames +for (result, frame) in model.track/predict(): + pass +``` + +## All callbacks + +Here are all supported callbacks. See callbacks [source code](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/utils/callbacks/base.py) for additional details. + + +### Trainer Callbacks + +| Callback | Description | +|-----------------------------|---------------------------------------------------------| +| `on_pretrain_routine_start` | Triggered at the beginning of pre-training routine | +| `on_pretrain_routine_end` | Triggered at the end of pre-training routine | +| `on_train_start` | Triggered when the training starts | +| `on_train_epoch_start` | Triggered at the start of each training epoch | +| `on_train_batch_start` | Triggered at the start of each training batch | +| `optimizer_step` | Triggered during the optimizer step | +| `on_before_zero_grad` | Triggered before gradients are zeroed | +| `on_train_batch_end` | Triggered at the end of each training batch | +| `on_train_epoch_end` | Triggered at the end of each training epoch | +| `on_fit_epoch_end` | Triggered at the end of each fit epoch | +| `on_model_save` | Triggered when the model is saved | +| `on_train_end` | Triggered when the training process ends | +| `on_params_update` | Triggered when model parameters are updated | +| `teardown` | Triggered when the training process is being cleaned up | + + +### Validator Callbacks + +| Callback | Description | +|----------------------|-------------------------------------------------| +| `on_val_start` | Triggered when the validation starts | +| `on_val_batch_start` | Triggered at the start of each validation batch | +| `on_val_batch_end` | Triggered at the end of each validation batch | +| `on_val_end` | Triggered when the validation ends | + + +### Predictor Callbacks + +| Callback | Description | +|------------------------------|---------------------------------------------------| +| `on_predict_start` | Triggered when the prediction process starts | +| `on_predict_batch_start` | Triggered at the start of each prediction batch | +| `on_predict_postprocess_end` | Triggered at the end of prediction postprocessing | +| `on_predict_batch_end` | Triggered at the end of each prediction batch | +| `on_predict_end` | Triggered when the prediction process ends | + +### Exporter Callbacks + +| Callback | Description | +|-------------------|------------------------------------------| +| `on_export_start` | Triggered when the export process starts | +| `on_export_end` | Triggered when the export process ends | diff --git a/docs/usage/cfg.md b/docs/usage/cfg.md new file mode 100644 index 0000000..c540f72 --- /dev/null +++ b/docs/usage/cfg.md @@ -0,0 +1,248 @@ +YOLO settings and hyperparameters play a critical role in the model's performance, speed, and accuracy. These settings +and hyperparameters can affect the model's behavior at various stages of the model development process, including +training, validation, and prediction. + +YOLOv8 'yolo' CLI commands use the following syntax: + +!!! example "" + + === "CLI" + + ```bash + yolo TASK MODE ARGS + ``` + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a YOLOv8 model from a pre-trained weights file + model = YOLO('yolov8n.pt') + + # Run MODE mode using the custom arguments ARGS (guess TASK) + model.MODE(ARGS) + ``` + +Where: + +- `TASK` (optional) is one of `[detect, segment, classify, pose]`. If it is not passed explicitly YOLOv8 will try to + guess + the `TASK` from the model type. +- `MODE` (required) is one of `[train, val, predict, export, track, benchmark]` +- `ARGS` (optional) are any number of custom `arg=value` pairs like `imgsz=320` that override defaults. + For a full list of available `ARGS` see the [Configuration](cfg.md) page and `defaults.yaml` + GitHub [source](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/cfg/default.yaml). + +#### Tasks + +YOLO models can be used for a variety of tasks, including detection, segmentation, classification and pose. These tasks +differ in the type of output they produce and the specific problem they are designed to solve. + +**Detect**: For identifying and localizing objects or regions of interest in an image or video. +**Segment**: For dividing an image or video into regions or pixels that correspond to different objects or classes. +**Classify**: For predicting the class label of an input image. +**Pose**: For identifying objects and estimating their keypoints in an image or video. + +| Key | Value | Description | +|--------|------------|-------------------------------------------------| +| `task` | `'detect'` | YOLO task, i.e. detect, segment, classify, pose | + +[Tasks Guide](../tasks/index.md){ .md-button .md-button--primary} + +#### Modes + +YOLO models can be used in different modes depending on the specific problem you are trying to solve. These modes +include: + +**Train**: For training a YOLOv8 model on a custom dataset. +**Val**: For validating a YOLOv8 model after it has been trained. +**Predict**: For making predictions using a trained YOLOv8 model on new images or videos. +**Export**: For exporting a YOLOv8 model to a format that can be used for deployment. +**Track**: For tracking objects in real-time using a YOLOv8 model. +**Benchmark**: For benchmarking YOLOv8 exports (ONNX, TensorRT, etc.) speed and accuracy. + +| Key | Value | Description | +|--------|-----------|---------------------------------------------------------------| +| `mode` | `'train'` | YOLO mode, i.e. train, val, predict, export, track, benchmark | + +[Modes Guide](../modes/index.md){ .md-button .md-button--primary} + +## Train + +The training settings for YOLO models encompass various hyperparameters and configurations used during the training process. These settings influence the model's performance, speed, and accuracy. Key training settings include batch size, learning rate, momentum, and weight decay. Additionally, the choice of optimizer, loss function, and training dataset composition can impact the training process. Careful tuning and experimentation with these settings are crucial for optimizing performance. + +| Key | Value | Description | +|-------------------|----------|-----------------------------------------------------------------------------| +| `model` | `None` | path to model file, i.e. yolov8n.pt, yolov8n.yaml | +| `data` | `None` | path to data file, i.e. coco128.yaml | +| `epochs` | `100` | number of epochs to train for | +| `patience` | `50` | epochs to wait for no observable improvement for early stopping of training | +| `batch` | `16` | number of images per batch (-1 for AutoBatch) | +| `imgsz` | `640` | size of input images as integer or w,h | +| `save` | `True` | save train checkpoints and predict results | +| `save_period` | `-1` | Save checkpoint every x epochs (disabled if < 1) | +| `cache` | `False` | True/ram, disk or False. Use cache for data loading | +| `device` | `None` | device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu | +| `workers` | `8` | number of worker threads for data loading (per RANK if DDP) | +| `project` | `None` | project name | +| `name` | `None` | experiment name | +| `exist_ok` | `False` | whether to overwrite existing experiment | +| `pretrained` | `False` | whether to use a pretrained model | +| `optimizer` | `'SGD'` | optimizer to use, choices=['SGD', 'Adam', 'AdamW', 'RMSProp'] | +| `verbose` | `False` | whether to print verbose output | +| `seed` | `0` | random seed for reproducibility | +| `deterministic` | `True` | whether to enable deterministic mode | +| `single_cls` | `False` | train multi-class data as single-class | +| `image_weights` | `False` | use weighted image selection for training | +| `rect` | `False` | rectangular training with each batch collated for minimum padding | +| `cos_lr` | `False` | use cosine learning rate scheduler | +| `close_mosaic` | `10` | disable mosaic augmentation for final 10 epochs | +| `resume` | `False` | resume training from last checkpoint | +| `amp` | `True` | Automatic Mixed Precision (AMP) training, choices=[True, False] | +| `lr0` | `0.01` | initial learning rate (i.e. SGD=1E-2, Adam=1E-3) | +| `lrf` | `0.01` | final learning rate (lr0 * lrf) | +| `momentum` | `0.937` | SGD momentum/Adam beta1 | +| `weight_decay` | `0.0005` | optimizer weight decay 5e-4 | +| `warmup_epochs` | `3.0` | warmup epochs (fractions ok) | +| `warmup_momentum` | `0.8` | warmup initial momentum | +| `warmup_bias_lr` | `0.1` | warmup initial bias lr | +| `box` | `7.5` | box loss gain | +| `cls` | `0.5` | cls loss gain (scale with pixels) | +| `dfl` | `1.5` | dfl loss gain | +| `pose` | `12.0` | pose loss gain (pose-only) | +| `kobj` | `2.0` | keypoint obj loss gain (pose-only) | +| `fl_gamma` | `0.0` | focal loss gamma (efficientDet default gamma=1.5) | +| `label_smoothing` | `0.0` | label smoothing (fraction) | +| `nbs` | `64` | nominal batch size | +| `overlap_mask` | `True` | masks should overlap during training (segment train only) | +| `mask_ratio` | `4` | mask downsample ratio (segment train only) | +| `dropout` | `0.0` | use dropout regularization (classify train only) | +| `val` | `True` | validate/test during training | + +[Train Guide](../modes/train.md){ .md-button .md-button--primary} + +## Predict + +The prediction settings for YOLO models encompass a range of hyperparameters and configurations that influence the model's performance, speed, and accuracy during inference on new data. Careful tuning and experimentation with these settings are essential to achieve optimal performance for a specific task. Key settings include the confidence threshold, Non-Maximum Suppression (NMS) threshold, and the number of classes considered. Additional factors affecting the prediction process are input data size and format, the presence of supplementary features such as masks or multiple labels per box, and the particular task the model is employed for. + +| Key | Value | Description | +|------------------|------------------------|----------------------------------------------------------| +| `source` | `'ultralytics/assets'` | source directory for images or videos | +| `conf` | `0.25` | object confidence threshold for detection | +| `iou` | `0.7` | intersection over union (IoU) threshold for NMS | +| `half` | `False` | use half precision (FP16) | +| `device` | `None` | device to run on, i.e. cuda device=0/1/2/3 or device=cpu | +| `show` | `False` | show results if possible | +| `save` | `False` | save images with results | +| `save_txt` | `False` | save results as .txt file | +| `save_conf` | `False` | save results with confidence scores | +| `save_crop` | `False` | save cropped images with results | +| `show_labels` | `True` | show object labels in plots | +| `show_conf` | `True` | show object confidence scores in plots | +| `max_det` | `300` | maximum number of detections per image | +| `vid_stride` | `False` | video frame-rate stride | +| `line_thickness` | `3` | bounding box thickness (pixels) | +| `visualize` | `False` | visualize model features | +| `augment` | `False` | apply image augmentation to prediction sources | +| `agnostic_nms` | `False` | class-agnostic NMS | +| `retina_masks` | `False` | use high-resolution segmentation masks | +| `classes` | `None` | filter results by class, i.e. class=0, or class=[0,2,3] | +| `boxes` | `True` | Show boxes in segmentation predictions | + +[Predict Guide](../modes/predict.md){ .md-button .md-button--primary} + +## Val + +The val (validation) settings for YOLO models involve various hyperparameters and configurations used to evaluate the model's performance on a validation dataset. These settings influence the model's performance, speed, and accuracy. Common YOLO validation settings include batch size, validation frequency during training, and performance evaluation metrics. Other factors affecting the validation process include the validation dataset's size and composition, as well as the specific task the model is employed for. Careful tuning and experimentation with these settings are crucial to ensure optimal performance on the validation dataset and detect and prevent overfitting. + +| Key | Value | Description | +|---------------|---------|--------------------------------------------------------------------| +| `save_json` | `False` | save results to JSON file | +| `save_hybrid` | `False` | save hybrid version of labels (labels + additional predictions) | +| `conf` | `0.001` | object confidence threshold for detection | +| `iou` | `0.6` | intersection over union (IoU) threshold for NMS | +| `max_det` | `300` | maximum number of detections per image | +| `half` | `True` | use half precision (FP16) | +| `device` | `None` | device to run on, i.e. cuda device=0/1/2/3 or device=cpu | +| `dnn` | `False` | use OpenCV DNN for ONNX inference | +| `plots` | `False` | show plots during training | +| `rect` | `False` | rectangular val with each batch collated for minimum padding | +| `split` | `val` | dataset split to use for validation, i.e. 'val', 'test' or 'train' | + +[Val Guide](../modes/val.md){ .md-button .md-button--primary} + +## Export + +Export settings for YOLO models encompass configurations and options related to saving or exporting the model for use in different environments or platforms. These settings can impact the model's performance, size, and compatibility with various systems. Key export settings include the exported model file format (e.g., ONNX, TensorFlow SavedModel), the target device (e.g., CPU, GPU), and additional features such as masks or multiple labels per box. The export process may also be affected by the model's specific task and the requirements or constraints of the destination environment or platform. It is crucial to thoughtfully configure these settings to ensure the exported model is optimized for the intended use case and functions effectively in the target environment. + +| Key | Value | Description | +|-------------|-----------------|------------------------------------------------------| +| `format` | `'torchscript'` | format to export to | +| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | +| `keras` | `False` | use Keras for TF SavedModel export | +| `optimize` | `False` | TorchScript: optimize for mobile | +| `half` | `False` | FP16 quantization | +| `int8` | `False` | INT8 quantization | +| `dynamic` | `False` | ONNX/TF/TensorRT: dynamic axes | +| `simplify` | `False` | ONNX: simplify model | +| `opset` | `None` | ONNX: opset version (optional, defaults to latest) | +| `workspace` | `4` | TensorRT: workspace size (GB) | +| `nms` | `False` | CoreML: add NMS | + +[Export Guide](../modes/export.md){ .md-button .md-button--primary} + +## Augmentation + +Augmentation settings for YOLO models refer to the various transformations and modifications +applied to the training data to increase the diversity and size of the dataset. These settings can affect the model's +performance, speed, and accuracy. Some common YOLO augmentation settings include the type and intensity of the +transformations applied (e.g. random flips, rotations, cropping, color changes), the probability with which each +transformation is applied, and the presence of additional features such as masks or multiple labels per box. Other +factors that may affect the augmentation process include the size and composition of the original dataset and the +specific task the model is being used for. It is important to carefully tune and experiment with these settings to +ensure that the augmented dataset is diverse and representative enough to train a high-performing model. + +| Key | Value | Description | +|---------------|-------|-------------------------------------------------| +| `hsv_h` | 0.015 | image HSV-Hue augmentation (fraction) | +| `hsv_s` | 0.7 | image HSV-Saturation augmentation (fraction) | +| `hsv_v` | 0.4 | image HSV-Value augmentation (fraction) | +| `degrees` | 0.0 | image rotation (+/- deg) | +| `translate` | 0.1 | image translation (+/- fraction) | +| `scale` | 0.5 | image scale (+/- gain) | +| `shear` | 0.0 | image shear (+/- deg) | +| `perspective` | 0.0 | image perspective (+/- fraction), range 0-0.001 | +| `flipud` | 0.0 | image flip up-down (probability) | +| `fliplr` | 0.5 | image flip left-right (probability) | +| `mosaic` | 1.0 | image mosaic (probability) | +| `mixup` | 0.0 | image mixup (probability) | +| `copy_paste` | 0.0 | segment copy-paste (probability) | + +## Logging, checkpoints, plotting and file management + +Logging, checkpoints, plotting, and file management are important considerations when training a YOLO model. + +- Logging: It is often helpful to log various metrics and statistics during training to track the model's progress and + diagnose any issues that may arise. This can be done using a logging library such as TensorBoard or by writing log + messages to a file. +- Checkpoints: It is a good practice to save checkpoints of the model at regular intervals during training. This allows + you to resume training from a previous point if the training process is interrupted or if you want to experiment with + different training configurations. +- Plotting: Visualizing the model's performance and training progress can be helpful for understanding how the model is + behaving and identifying potential issues. This can be done using a plotting library such as matplotlib or by + generating plots using a logging library such as TensorBoard. +- File management: Managing the various files generated during the training process, such as model checkpoints, log + files, and plots, can be challenging. It is important to have a clear and organized file structure to keep track of + these files and make it easy to access and analyze them as needed. + +Effective logging, checkpointing, plotting, and file management can help you keep track of the model's progress and make +it easier to debug and optimize the training process. + +| Key | Value | Description | +|------------|----------|------------------------------------------------------------------------------------------------| +| `project` | `'runs'` | project name | +| `name` | `'exp'` | experiment name. `exp` gets automatically incremented if not specified, i.e, `exp`, `exp2` ... | +| `exist_ok` | `False` | whether to overwrite existing experiment | +| `plots` | `False` | save plots during train/val | +| `save` | `False` | save train checkpoints and predict results | diff --git a/docs/usage/cli.md b/docs/usage/cli.md new file mode 100644 index 0000000..79183d4 --- /dev/null +++ b/docs/usage/cli.md @@ -0,0 +1,221 @@ +# Command Line Interface Usage + +The YOLO command line interface (CLI) allows for simple single-line commands without the need for a Python environment. +CLI requires no customization or Python code. You can simply run all tasks from the terminal with the `yolo` command. + +!!! example + + === "Syntax" + + Ultralytics `yolo` commands use the following syntax: + ```bash + yolo TASK MODE ARGS + + Where TASK (optional) is one of [detect, segment, classify] + MODE (required) is one of [train, val, predict, export, track] + ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults. + ``` + See all ARGS in the full [Configuration Guide](./cfg.md) or with `yolo cfg` + + === "Train" + + Train a detection model for 10 epochs with an initial learning_rate of 0.01 + ```bash + yolo train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01 + ``` + + === "Predict" + + Predict a YouTube video using a pretrained segmentation model at image size 320: + ```bash + yolo predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320 + ``` + + === "Val" + + Val a pretrained detection model at batch-size 1 and image size 640: + ```bash + yolo val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640 + ``` + + === "Export" + + Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) + ```bash + yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 + ``` + + === "Special" + + Run special commands to see version, view settings, run checks and more: + ```bash + yolo help + yolo checks + yolo version + yolo settings + yolo copy-cfg + yolo cfg + ``` + +Where: + +- `TASK` (optional) is one of `[detect, segment, classify]`. If it is not passed explicitly YOLOv8 will try to guess + the `TASK` from the model type. +- `MODE` (required) is one of `[train, val, predict, export, track]` +- `ARGS` (optional) are any number of custom `arg=value` pairs like `imgsz=320` that override defaults. + For a full list of available `ARGS` see the [Configuration](cfg.md) page and `defaults.yaml` + GitHub [source](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/cfg/default.yaml). + +!!! warning "Warning" + + Arguments must be passed as `arg=val` pairs, split by an equals `=` sign and delimited by spaces ` ` between pairs. Do not use `--` argument prefixes or commas `,` beteen arguments. + + - `yolo predict model=yolov8n.pt imgsz=640 conf=0.25`   ✅ + - `yolo predict model yolov8n.pt imgsz 640 conf 0.25`   ❌ + - `yolo predict --model yolov8n.pt --imgsz 640 --conf 0.25`   ❌ + +## Train + +Train YOLOv8n on the COCO128 dataset for 100 epochs at image size 640. For a full list of available arguments see +the [Configuration](cfg.md) page. + +!!! example "Example" + + === "Train" + + Start training YOLOv8n on COCO128 for 100 epochs at image-size 640. + ```bash + yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 + ``` + + === "Resume" + + Resume an interrupted training. + ```bash + yolo detect train resume model=last.pt + ``` + +## Val + +Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the `model` retains it's +training `data` and arguments as model attributes. + +!!! example "Example" + + === "Official" + + Validate an official YOLOv8n model. + ```bash + yolo detect val model=yolov8n.pt + ``` + + === "Custom" + + Validate a custom-trained model. + ```bash + yolo detect val model=path/to/best.pt + ``` + +## Predict + +Use a trained YOLOv8n model to run predictions on images. + +!!! example "Example" + + === "Official" + + Predict with an official YOLOv8n model. + ```bash + yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' + ``` + + === "Custom" + + Predict with a custom model. + ```bash + yolo detect predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' + ``` + +## Export + +Export a YOLOv8n model to a different format like ONNX, CoreML, etc. + +!!! example "Example" + + === "Official" + + Export an official YOLOv8n model to ONNX format. + ```bash + yolo export model=yolov8n.pt format=onnx + ``` + + === "Custom" + + Export a custom-trained model to ONNX format. + ```bash + yolo export model=path/to/best.pt format=onnx + ``` + +Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, +i.e. `format='onnx'` or `format='engine'`. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|---------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | + +--- + +## Overriding default arguments + +Default arguments can be overridden by simply passing them as arguments in the CLI in `arg=value` pairs. + +!!! tip "" + + === "Train" + Train a detection model for `10 epochs` with `learning_rate` of `0.01` + ```bash + yolo detect train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01 + ``` + + === "Predict" + Predict a YouTube video using a pretrained segmentation model at image size 320: + ```bash + yolo segment predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320 + ``` + + === "Val" + Validate a pretrained detection model at batch-size 1 and image size 640: + ```bash + yolo detect val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640 + ``` + +--- + +## Overriding default config file + +You can override the `default.yaml` config file entirely by passing a new file with the `cfg` arguments, +i.e. `cfg=custom.yaml`. + +To do this first create a copy of `default.yaml` in your current working dir with the `yolo copy-cfg` command. + +This will create `default_copy.yaml`, which you can then pass as `cfg=default_copy.yaml` along with any additional args, +like `imgsz=320` in this example: + +!!! example "" + + === "CLI" + ```bash + yolo copy-cfg + yolo cfg=default_copy.yaml imgsz=320 + ``` diff --git a/docs/usage/engine.md b/docs/usage/engine.md new file mode 100644 index 0000000..9cc85a1 --- /dev/null +++ b/docs/usage/engine.md @@ -0,0 +1,83 @@ +Both the Ultralytics YOLO command-line and python interfaces are simply a high-level abstraction on the base engine +executors. Let's take a look at the Trainer engine. + +## BaseTrainer + +BaseTrainer contains the generic boilerplate training routine. It can be customized for any task based over overriding +the required functions or operations as long the as correct formats are followed. For example, you can support your own +custom model and dataloader by just overriding these functions: + +* `get_model(cfg, weights)` - The function that builds the model to be trained +* `get_dataloder()` - The function that builds the dataloader + More details and source code can be found in [`BaseTrainer` Reference](../reference/base_trainer.md) + +## DetectionTrainer + +Here's how you can use the YOLOv8 `DetectionTrainer` and customize it. + +```python +from ultralytics.yolo.v8.detect import DetectionTrainer + +trainer = DetectionTrainer(overrides={...}) +trainer.train() +trained_model = trainer.best # get best model +``` + +### Customizing the DetectionTrainer + +Let's customize the trainer **to train a custom detection model** that is not supported directly. You can do this by +simply overloading the existing the `get_model` functionality: + +```python +from ultralytics.yolo.v8.detect import DetectionTrainer + + +class CustomTrainer(DetectionTrainer): + def get_model(self, cfg, weights): + ... + + +trainer = CustomTrainer(overrides={...}) +trainer.train() +``` + +You now realize that you need to customize the trainer further to: + +* Customize the `loss function`. +* Add `callback` that uploads model to your Google Drive after every 10 `epochs` + Here's how you can do it: + +```python +from ultralytics.yolo.v8.detect import DetectionTrainer + + +class CustomTrainer(DetectionTrainer): + def get_model(self, cfg, weights): + ... + + def criterion(self, preds, batch): + # get ground truth + imgs = batch["imgs"] + bboxes = batch["bboxes"] + ... + return loss, loss_items # see Reference-> Trainer for details on the expected format + + +# callback to upload model weights +def log_model(trainer): + last_weight_path = trainer.last + ... + + +trainer = CustomTrainer(overrides={...}) +trainer.add_callback("on_train_epoch_end", log_model) # Adds to existing callback +trainer.train() +``` + +To know more about Callback triggering events and entry point, checkout our [Callbacks Guide](callbacks.md) + +## Other engine components + +There are other components that can be customized similarly like `Validators` and `Predictors` +See Reference section for more information on these. + diff --git a/docs/usage/python.md b/docs/usage/python.md new file mode 100644 index 0000000..a4f6944 --- /dev/null +++ b/docs/usage/python.md @@ -0,0 +1,277 @@ +# Python Usage + +Welcome to the YOLOv8 Python Usage documentation! This guide is designed to help you seamlessly integrate YOLOv8 into +your Python projects for object detection, segmentation, and classification. Here, you'll learn how to load and use +pretrained models, train new models, and perform predictions on images. The easy-to-use Python interface is a valuable +resource for anyone looking to incorporate YOLOv8 into their Python projects, allowing you to quickly implement advanced +object detection capabilities. Let's get started! + +For example, users can load a model, train it, evaluate its performance on a validation set, and even export it to ONNX +format with just a few lines of code. + +!!! example "Python" + + ```python + from ultralytics import YOLO + + # Create a new YOLO model from scratch + model = YOLO('yolov8n.yaml') + + # Load a pretrained YOLO model (recommended for training) + model = YOLO('yolov8n.pt') + + # Train the model using the 'coco128.yaml' dataset for 3 epochs + results = model.train(data='coco128.yaml', epochs=3) + + # Evaluate the model's performance on the validation set + results = model.val() + + # Perform object detection on an image using the model + results = model('https://ultralytics.com/images/bus.jpg') + + # Export the model to ONNX format + success = model.export(format='onnx') + ``` + +## [Train](../modes/train.md) + +Train mode is used for training a YOLOv8 model on a custom dataset. In this mode, the model is trained using the +specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can +accurately predict the classes and locations of objects in an image. + +!!! example "Train" + + === "From pretrained(recommended)" + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.pt') # pass any model type + model.train(epochs=5) + ``` + + === "From scratch" + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.yaml') + model.train(data='coco128.yaml', epochs=5) + ``` + + === "Resume" + ```python + model = YOLO("last.pt") + model.train(resume=True) + ``` + +[Train Examples](../modes/train.md){ .md-button .md-button--primary} + +## [Val](../modes/val.md) + +Val mode is used for validating a YOLOv8 model after it has been trained. In this mode, the model is evaluated on a +validation set to measure its accuracy and generalization performance. This mode can be used to tune the hyperparameters +of the model to improve its performance. + +!!! example "Val" + + === "Val after training" + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.yaml') + model.train(data='coco128.yaml', epochs=5) + model.val() # It'll automatically evaluate the data you trained. + ``` + + === "Val independently" + ```python + from ultralytics import YOLO + + model = YOLO("model.pt") + # It'll use the data yaml file in model.pt if you don't set data. + model.val() + # or you can set the data you want to val + model.val(data='coco128.yaml') + ``` + +[Val Examples](../modes/val.md){ .md-button .md-button--primary} + +## [Predict](../modes/predict.md) + +Predict mode is used for making predictions using a trained YOLOv8 model on new images or videos. In this mode, the +model is loaded from a checkpoint file, and the user can provide images or videos to perform inference. The model +predicts the classes and locations of objects in the input images or videos. + +!!! example "Predict" + + === "From source" + ```python + from ultralytics import YOLO + from PIL import Image + import cv2 + + model = YOLO("model.pt") + # accepts all formats - image/dir/Path/URL/video/PIL/ndarray. 0 for webcam + results = model.predict(source="0") + results = model.predict(source="folder", show=True) # Display preds. Accepts all YOLO predict arguments + + # from PIL + im1 = Image.open("bus.jpg") + results = model.predict(source=im1, save=True) # save plotted images + + # from ndarray + im2 = cv2.imread("bus.jpg") + results = model.predict(source=im2, save=True, save_txt=True) # save predictions as labels + + # from list of PIL/ndarray + results = model.predict(source=[im1, im2]) + ``` + + === "Results usage" + ```python + # results would be a list of Results object including all the predictions by default + # but be careful as it could occupy a lot memory when there're many images, + # especially the task is segmentation. + # 1. return as a list + results = model.predict(source="folder") + + # results would be a generator which is more friendly to memory by setting stream=True + # 2. return as a generator + results = model.predict(source=0, stream=True) + + for result in results: + # detection + result.boxes.xyxy # box with xyxy format, (N, 4) + result.boxes.xywh # box with xywh format, (N, 4) + result.boxes.xyxyn # box with xyxy format but normalized, (N, 4) + result.boxes.xywhn # box with xywh format but normalized, (N, 4) + result.boxes.conf # confidence score, (N, 1) + result.boxes.cls # cls, (N, 1) + + # segmentation + result.masks.masks # masks, (N, H, W) + result.masks.xy # x,y segments (pixels), List[segment] * N + result.masks.xyn # x,y segments (normalized), List[segment] * N + + # classification + result.probs # cls prob, (num_class, ) + + # Each result is composed of torch.Tensor by default, + # in which you can easily use following functionality: + result = result.cuda() + result = result.cpu() + result = result.to("cpu") + result = result.numpy() + ``` + +[Predict Examples](../modes/predict.md){ .md-button .md-button--primary} + +## [Export](../modes/export.md) + +Export mode is used for exporting a YOLOv8 model to a format that can be used for deployment. In this mode, the model is +converted to a format that can be used by other software applications or hardware devices. This mode is useful when +deploying the model to production environments. + +!!! example "Export" + + === "Export to ONNX" + + Export an official YOLOv8n model to ONNX with dynamic batch-size and image-size. + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.pt') + model.export(format='onnx', dynamic=True) + ``` + + === "Export to TensorRT" + + Export an official YOLOv8n model to TensorRT on `device=0` for acceleration on CUDA devices. + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.pt') + model.export(format='onnx', device=0) + ``` + +[Export Examples](../modes/export.md){ .md-button .md-button--primary} + +## [Track](../modes/track.md) + +Track mode is used for tracking objects in real-time using a YOLOv8 model. In this mode, the model is loaded from a +checkpoint file, and the user can provide a live video stream to perform real-time object tracking. This mode is useful +for applications such as surveillance systems or self-driving cars. + +!!! example "Track" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official detection model + model = YOLO('yolov8n-seg.pt') # load an official segmentation model + model = YOLO('path/to/best.pt') # load a custom model + + # Track with the model + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True) + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True, tracker="bytetrack.yaml") + ``` + +[Track Examples](../modes/track.md){ .md-button .md-button--primary} + +## [Benchmark](../modes/benchmark.md) + +Benchmark mode is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks provide +information on the size of the exported format, its `mAP50-95` metrics (for object detection and segmentation) +or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various export +formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for +their specific use case based on their requirements for speed and accuracy. + +!!! example "Benchmark" + + === "Python" + + Benchmark an official YOLOv8n model across all export formats. + ```python + from ultralytics.yolo.utils.benchmarks import benchmark + + # Benchmark + benchmark(model='yolov8n.pt', imgsz=640, half=False, device=0) + ``` + +[Benchmark Examples](../modes/benchmark.md){ .md-button .md-button--primary} + +## Using Trainers + +`YOLO` model class is a high-level wrapper on the Trainer classes. Each YOLO task has its own trainer that inherits +from `BaseTrainer`. + +!!! tip "Detection Trainer Example" + + ```python + from ultralytics.yolo import v8 import DetectionTrainer, DetectionValidator, DetectionPredictor + + # trainer + trainer = DetectionTrainer(overrides={}) + trainer.train() + trained_model = trainer.best + + # Validator + val = DetectionValidator(args=...) + val(model=trained_model) + + # predictor + pred = DetectionPredictor(overrides={}) + pred(source=SOURCE, model=trained_model) + + # resume from last weight + overrides["resume"] = trainer.last + trainer = detect.DetectionTrainer(overrides=overrides) + ``` + +You can easily customize Trainers to support custom tasks or explore R&D ideas. +Learn more about Customizing `Trainers`, `Validators` and `Predictors` to suit your project needs in the Customization +Section. + +[Customization tutorials](engine.md){ .md-button .md-button--primary} diff --git a/docs/yolov5/architecture.md b/docs/yolov5/architecture.md new file mode 100644 index 0000000..d8a05fb --- /dev/null +++ b/docs/yolov5/architecture.md @@ -0,0 +1,209 @@ +## 1. Model Structure + +YOLOv5 (v6.0/6.1) consists of: +- **Backbone**: `New CSP-Darknet53` +- **Neck**: `SPPF`, `New CSP-PAN` +- **Head**: `YOLOv3 Head` + +Model structure (`yolov5l.yaml`): + +![yolov5](https://user-images.githubusercontent.com/31005897/172404576-c260dcf9-76bb-4bc8-b6a9-f2d987792583.png) + + +Some minor changes compared to previous versions: + +1. Replace the `Focus` structure with `6x6 Conv2d`(more efficient, refer #4825) +2. Replace the `SPP` structure with `SPPF`(more than double the speed) + +
+test code + +```python +import time +import torch +import torch.nn as nn + + +class SPP(nn.Module): + def __init__(self): + super().__init__() + self.maxpool1 = nn.MaxPool2d(5, 1, padding=2) + self.maxpool2 = nn.MaxPool2d(9, 1, padding=4) + self.maxpool3 = nn.MaxPool2d(13, 1, padding=6) + + def forward(self, x): + o1 = self.maxpool1(x) + o2 = self.maxpool2(x) + o3 = self.maxpool3(x) + return torch.cat([x, o1, o2, o3], dim=1) + + +class SPPF(nn.Module): + def __init__(self): + super().__init__() + self.maxpool = nn.MaxPool2d(5, 1, padding=2) + + def forward(self, x): + o1 = self.maxpool(x) + o2 = self.maxpool(o1) + o3 = self.maxpool(o2) + return torch.cat([x, o1, o2, o3], dim=1) + + +def main(): + input_tensor = torch.rand(8, 32, 16, 16) + spp = SPP() + sppf = SPPF() + output1 = spp(input_tensor) + output2 = sppf(input_tensor) + + print(torch.equal(output1, output2)) + + t_start = time.time() + for _ in range(100): + spp(input_tensor) + print(f"spp time: {time.time() - t_start}") + + t_start = time.time() + for _ in range(100): + sppf(input_tensor) + print(f"sppf time: {time.time() - t_start}") + + +if __name__ == '__main__': + main() +``` + +result: +``` +True +spp time: 0.5373051166534424 +sppf time: 0.20780706405639648 +``` + +
+ + + +## 2. Data Augmentation + +- Mosaic + + +- Copy paste + + +- Random affine(Rotation, Scale, Translation and Shear) + + +- MixUp + + +- Albumentations +- Augment HSV(Hue, Saturation, Value) + + +- Random horizontal flip + + + + +## 3. Training Strategies + +- Multi-scale training(0.5~1.5x) +- AutoAnchor(For training custom data) +- Warmup and Cosine LR scheduler +- EMA(Exponential Moving Average) +- Mixed precision +- Evolve hyper-parameters + + + +## 4. Others + +### 4.1 Compute Losses + +The YOLOv5 loss consists of three parts: + +- Classes loss(BCE loss) +- Objectness loss(BCE loss) +- Location loss(CIoU loss) + +![loss](https://latex.codecogs.com/svg.image?Loss=\lambda_1L_{cls}+\lambda_2L_{obj}+\lambda_3L_{loc}) + +### 4.2 Balance Losses +The objectness losses of the three prediction layers(`P3`, `P4`, `P5`) are weighted differently. The balance weights are `[4.0, 1.0, 0.4]` respectively. + +![obj_loss](https://latex.codecogs.com/svg.image?L_{obj}=4.0\cdot&space;L_{obj}^{small}+1.0\cdot&space;L_{obj}^{medium}+0.4\cdot&space;L_{obj}^{large}) + +### 4.3 Eliminate Grid Sensitivity +In YOLOv2 and YOLOv3, the formula for calculating the predicted target information is: + +![b_x](https://latex.codecogs.com/svg.image?b_x=\sigma(t_x)+c_x) +![b_y](https://latex.codecogs.com/svg.image?b_y=\sigma(t_y)+c_y) +![b_w](https://latex.codecogs.com/svg.image?b_w=p_w\cdot&space;e^{t_w}) +![b_h](https://latex.codecogs.com/svg.image?b_h=p_h\cdot&space;e^{t_h}) + + + + + +In YOLOv5, the formula is: + +![bx](https://latex.codecogs.com/svg.image?b_x=(2\cdot\sigma(t_x)-0.5)+c_x) +![by](https://latex.codecogs.com/svg.image?b_y=(2\cdot\sigma(t_y)-0.5)+c_y) +![bw](https://latex.codecogs.com/svg.image?b_w=p_w\cdot(2\cdot\sigma(t_w))^2) +![bh](https://latex.codecogs.com/svg.image?b_h=p_h\cdot(2\cdot\sigma(t_h))^2) + +Compare the center point offset before and after scaling. The center point offset range is adjusted from (0, 1) to (-0.5, 1.5). +Therefore, offset can easily get 0 or 1. + + + +Compare the height and width scaling ratio(relative to anchor) before and after adjustment. The original yolo/darknet box equations have a serious flaw. Width and Height are completely unbounded as they are simply out=exp(in), which is dangerous, as it can lead to runaway gradients, instabilities, NaN losses and ultimately a complete loss of training. [refer this issue](https://github.com/ultralytics/yolov5/issues/471#issuecomment-662009779) + + + + +### 4.4 Build Targets +Match positive samples: + +- Calculate the aspect ratio of GT and Anchor Templates + +![rw](https://latex.codecogs.com/svg.image?r_w=w_{gt}/w_{at}) + +![rh](https://latex.codecogs.com/svg.image?r_h=h_{gt}/h_{at}) + +![rwmax](https://latex.codecogs.com/svg.image?r_w^{max}=max(r_w,1/r_w)) + +![rhmax](https://latex.codecogs.com/svg.image?r_h^{max}=max(r_h,1/r_h)) + +![rmax](https://latex.codecogs.com/svg.image?r^{max}=max(r_w^{max},r_h^{max})) + +![match](https://latex.codecogs.com/svg.image?r^{max}<{\rm&space;anchor_t}) + + + +- Assign the successfully matched Anchor Templates to the corresponding cells + + + +- Because the center point offset range is adjusted from (0, 1) to (-0.5, 1.5). GT Box can be assigned to more anchors. + + + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + +## Status + +YOLOv5 CI + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/docs/yolov5/clearml.md b/docs/yolov5/clearml.md new file mode 100644 index 0000000..5c24ca4 --- /dev/null +++ b/docs/yolov5/clearml.md @@ -0,0 +1,237 @@ +# ClearML Integration + +Clear|MLClear|ML + +## About ClearML + +[ClearML](https://cutt.ly/yolov5-tutorial-clearml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️. + +🔨 Track every YOLOv5 training run in the experiment manager + +🔧 Version and easily access your custom training data with the integrated ClearML Data Versioning Tool + +🔦 Remotely train and monitor your YOLOv5 training runs using ClearML Agent + +🔬 Get the very best mAP using ClearML Hyperparameter Optimization + +🔭 Turn your newly trained YOLOv5 model into an API with just a few commands using ClearML Serving + +
+And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline! +
+
+ +![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) + +
+
+ +## 🦾 Setting Things Up + +To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one: + +Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-tutorial-clearml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go! + +1. Install the `clearml` python package: + + ```bash + pip install clearml + ``` + +2. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions: + + ```bash + clearml-init + ``` + +That's it! You're done 😎 + +
+ +## 🚀 Training YOLOv5 With ClearML + +To enable ClearML experiment tracking, simply install the ClearML pip package. + +```bash +pip install clearml>=1.2.0 +``` + +This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. + +If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`. +PLEASE NOTE: ClearML uses `/` as a delimiter for subprojects, so be careful when using `/` in your project name! + +```bash +python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache +``` + +or with custom project and task name: + +```bash +python train.py --project my_project --name my_training --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache +``` + +This will capture: + +- Source code + uncommitted changes +- Installed packages +- (Hyper)parameters +- Model files (use `--save-period n` to save a checkpoint every n epochs) +- Console output +- Scalars (mAP_0.5, mAP_0.5:0.95, precision, recall, losses, learning rates, ...) +- General info such as machine details, runtime, creation date etc. +- All produced plots such as label correlogram and confusion matrix +- Images with bounding boxes per epoch +- Mosaic per epoch +- Validation images per epoch +- ... + +That's a lot right? 🤯 +Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them! + +There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works! + +
+ +## 🔗 Dataset Version Management + +Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! + +![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) + +### Prepare Your Dataset + +The YOLOv5 repository supports a number of different datasets by using yaml files containing their information. By default datasets are downloaded to the `../datasets` folder in relation to the repository root folder. So if you downloaded the `coco128` dataset using the link in the yaml or with the scripts provided by yolov5, you get this folder structure: + +``` +.. +|_ yolov5 +|_ datasets + |_ coco128 + |_ images + |_ labels + |_ LICENSE + |_ README.txt +``` + +But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure. + +Next, ⚠️**copy the corresponding yaml file to the root of the dataset folder**⚠️. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls. + +Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `names`. + +``` +.. +|_ yolov5 +|_ datasets + |_ coco128 + |_ images + |_ labels + |_ coco128.yaml # <---- HERE! + |_ LICENSE + |_ README.txt +``` + +### Upload Your Dataset + +To get this dataset into ClearML as a versioned dataset, go to the dataset root folder and run the following command: + +```bash +cd coco128 +clearml-data sync --project YOLOv5 --name coco128 --folder . +``` + +The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other: + +```bash +# Optionally add --parent if you want to base +# this version on another dataset version, so no duplicate files are uploaded! +clearml-data create --name coco128 --project YOLOv5 +clearml-data add --files . +clearml-data close +``` + +### Run Training Using A ClearML Dataset + +Now that you have a ClearML dataset, you can very simply use it to train custom YOLOv5 🚀 models! + +```bash +python train.py --img 640 --batch 16 --epochs 3 --data clearml:// --weights yolov5s.pt --cache +``` + +
+ +## 👀 Hyperparameter Optimization + +Now that we have our experiments and data versioned, it's time to take a look at what we can build on top! + +Using the code information, installed packages and environment details, the experiment itself is now **completely reproducible**. In fact, ClearML allows you to clone an experiment and even change its parameters. We can then just rerun it with these new parameters automatically, this is basically what HPO does! + +To **run hyperparameter optimization locally**, we've included a pre-made script for you. Just make sure a training task has been run at least once, so it is in the ClearML experiment manager, we will essentially clone it and change its hyperparameters. + +You'll need to fill in the ID of this `template task` in the script found at `utils/loggers/clearml/hpo.py` and then just run it :) You can change `task.execute_locally()` to `task.execute()` to put it in a ClearML queue and have a remote agent work on it instead. + +```bash +# To use optuna, install it first, otherwise you can change the optimizer to just be RandomSearch +pip install optuna +python utils/loggers/clearml/hpo.py +``` + +![HPO](https://github.com/thepycoder/clearml_screenshots/raw/main/hpo.png) + +## 🤯 Remote Execution (advanced) + +Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site, or you have some budget to use cloud GPUs. +This is where the ClearML Agent comes into play. Check out what the agent can do here: + +- [YouTube video](https://youtu.be/MX3BrXnaULs) +- [Documentation](https://clear.ml/docs/latest/docs/clearml_agent) + +In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. + +You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running: + +```bash +clearml-agent daemon --queue [--docker] +``` + +### Cloning, Editing And Enqueuing + +With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too! + +🪄 Clone the experiment by right-clicking it + +🎯 Edit the hyperparameters to what you wish them to be + +⏳ Enqueue the task to any of the queues by right-clicking it + +![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) + +### Executing A Task Remotely + +Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on! + +To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instantiated: + +```python +# ... +# Loggers +data_dict = None +if RANK in {-1, 0}: + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + if loggers.clearml: + loggers.clearml.task.execute_remotely(queue="my_queue") # <------ ADD THIS LINE + # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML + data_dict = loggers.clearml.data_dict +# ... +``` + +When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead! + +### Autoscaling workers + +ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines, and you stop paying! + +Check out the autoscalers getting started video below. + +[![Watch the video](https://img.youtube.com/vi/j4XVMAaUt3E/0.jpg)](https://youtu.be/j4XVMAaUt3E) diff --git a/docs/yolov5/comet.md b/docs/yolov5/comet.md new file mode 100644 index 0000000..2a08906 --- /dev/null +++ b/docs/yolov5/comet.md @@ -0,0 +1,258 @@ + + +# YOLOv5 with Comet + +This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet2) + +# About Comet + +Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. + +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)! +Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! + +# Getting Started + +## Install Comet + +```shell +pip install comet_ml +``` + +## Configure Comet Credentials + +There are two ways to configure Comet with YOLOv5. + +You can either set your credentials through environment variables + +**Environment Variables** + +```shell +export COMET_API_KEY= +export COMET_PROJECT_NAME= # This will default to 'yolov5' +``` + +Or create a `.comet.config` file in your working directory and set your credentials there. + +**Comet Configuration File** + +``` +[comet] +api_key= +project_name= # This will default to 'yolov5' +``` + +## Run the Training Script + +```shell +# Train YOLOv5s on COCO128 for 5 epochs +python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt +``` + +That's it! Comet will automatically log your hyperparameters, command line arguments, training and validation metrics. You can visualize and analyze your runs in the Comet UI + +yolo-ui + +# Try out an Example! + +Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) + +Or better yet, try it out yourself in this Colab Notebook + +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing) + +# Log automatically + +By default, Comet will log the following items + +## Metrics + +- Box Loss, Object Loss, Classification Loss for the training and validation data +- mAP_0.5, mAP_0.5:0.95 metrics for the validation data. +- Precision and Recall for the validation data + +## Parameters + +- Model Hyperparameters +- All parameters passed through the command line options + +## Visualizations + +- Confusion Matrix of the model predictions on the validation data +- Plots for the PR and F1 curves across all classes +- Correlogram of the Class Labels + +# Configure Comet Logging + +Comet can be configured to log additional data either through command line flags passed to the training script +or through environment variables. + +```shell +export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online +export COMET_MODEL_NAME= #Set the name for the saved model. Defaults to yolov5 +export COMET_LOG_CONFUSION_MATRIX=false # Set to disable logging a Comet Confusion Matrix. Defaults to true +export COMET_MAX_IMAGE_UPLOADS= # Controls how many total image predictions to log to Comet. Defaults to 100. +export COMET_LOG_PER_CLASS_METRICS=true # Set to log evaluation metrics for each detected class at the end of training. Defaults to false +export COMET_DEFAULT_CHECKPOINT_FILENAME= # Set this if you would like to resume training from a different checkpoint. Defaults to 'last.pt' +export COMET_LOG_BATCH_LEVEL_METRICS=true # Set this if you would like to log training metrics at the batch level. Defaults to false. +export COMET_LOG_PREDICTIONS=true # Set this to false to disable logging model predictions +``` + +## Logging Checkpoints with Comet + +Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the +logged checkpoints to Comet based on the interval value provided by `save-period` + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--save-period 1 +``` + +## Logging Model Predictions + +By default, model predictions (images, ground truth labels and bounding boxes) will be logged to Comet. + +You can control the frequency of logged predictions and the associated images by passing the `bbox_interval` command line argument. Predictions can be visualized using Comet's Object Detection Custom Panel. This frequency corresponds to every Nth batch of data per epoch. In the example below, we are logging every 2nd batch of data for each epoch. + +**Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. + +Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--bbox_interval 2 +``` + +### Controlling the number of Prediction Images logged to Comet + +When logging predictions from YOLOv5, Comet will log the images associated with each set of predictions. By default a maximum of 100 validation images are logged. You can increase or decrease this number using the `COMET_MAX_IMAGE_UPLOADS` environment variable. + +```shell +env COMET_MAX_IMAGE_UPLOADS=200 python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--bbox_interval 1 +``` + +### Logging Class Level Metrics + +Use the `COMET_LOG_PER_CLASS_METRICS` environment variable to log mAP, precision, recall, f1 for each class. + +```shell +env COMET_LOG_PER_CLASS_METRICS=true python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt +``` + +## Uploading a Dataset to Comet Artifacts + +If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag. + +The dataset be organized in the way described in the [YOLOv5 documentation](train_custom_data.md). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--upload_dataset +``` + +You can find the uploaded dataset in the Artifacts tab in your Comet Workspace +artifact-1 + +You can preview the data directly in the Comet UI. +artifact-2 + +Artifacts are versioned and also support adding metadata about the dataset. Comet will automatically log the metadata from your dataset `yaml` file +artifact-3 + +### Using a saved Artifact + +If you would like to use a dataset from Comet Artifacts, set the `path` variable in your dataset `yaml` file to point to the following Artifact resource URL. + +``` +# contents of artifact.yaml file +path: "comet:///:" +``` + +Then pass this file to your training script in the following way + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data artifact.yaml \ +--weights yolov5s.pt +``` + +Artifacts also allow you to track the lineage of data as it flows through your Experimentation workflow. Here you can see a graph that shows you all the experiments that have used your uploaded dataset. +artifact-4 + +## Resuming a Training Run + +If your training run is interrupted for any reason, e.g. disrupted internet connection, you can resume the run using the `resume` flag and the Comet Run Path. + +The Run Path has the following format `comet:////`. + +This will restore the run to its state before the interruption, which includes restoring the model from a checkpoint, restoring all hyperparameters and training arguments and downloading Comet dataset Artifacts if they were used in the original run. The resumed run will continue logging to the existing Experiment in the Comet UI + +```shell +python train.py \ +--resume "comet://" +``` + +## Hyperparameter Search with the Comet Optimizer + +YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualize hyperparameter sweeps in the Comet UI. + +### Configuring an Optimizer Sweep + +To configure the Comet Optimizer, you will have to create a JSON file with the information about the sweep. An example file has been provided in `utils/loggers/comet/optimizer_config.json` + +```shell +python utils/loggers/comet/hpo.py \ + --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" +``` + +The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after +the script. + +```shell +python utils/loggers/comet/hpo.py \ + --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" \ + --save-period 1 \ + --bbox_interval 1 +``` + +### Running a Sweep in Parallel + +```shell +comet optimizer -j utils/loggers/comet/hpo.py \ + utils/loggers/comet/optimizer_config.json" +``` + +### Visualizing Results + +Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) + +hyperparameter-yolo diff --git a/docs/yolov5/ensemble.md b/docs/yolov5/ensemble.md new file mode 100644 index 0000000..7303a0b --- /dev/null +++ b/docs/yolov5/ensemble.md @@ -0,0 +1,137 @@ +📚 This guide explains how to use YOLOv5 🚀 **model ensembling** during testing and inference for improved mAP and Recall. +UPDATED 25 September 2022. + +From [https://en.wikipedia.org/wiki/Ensemble_learning](https://en.wikipedia.org/wiki/Ensemble_learning): +> Ensemble modeling is a process where multiple diverse models are created to predict an outcome, either by using many different modeling algorithms or using different training data sets. The ensemble model then aggregates the prediction of each base model and results in once final prediction for the unseen data. The motivation for using ensemble models is to reduce the generalization error of the prediction. As long as the base models are diverse and independent, the prediction error of the model decreases when the ensemble approach is used. The approach seeks the wisdom of crowds in making a prediction. Even though the ensemble model has multiple base models within the model, it acts and performs as a single model. + + +## Before You Start + +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a [**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +## Test Normally + +Before ensembling we want to establish the baseline performance of a single model. This command tests YOLOv5x on COCO val2017 at image size 640 pixels. `yolov5x.pt` is the largest and most accurate model available. Other options are `yolov5s.pt`, `yolov5m.pt` and `yolov5l.pt`, or you own checkpoint from training a custom dataset `./weights/best.pt`. For details on all available models please see our README [table](https://github.com/ultralytics/yolov5#pretrained-checkpoints). +```bash +python val.py --weights yolov5x.pt --data coco.yaml --img 640 --half +``` + +Output: +```shell +val: data=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True +YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) + +Fusing layers... +Model Summary: 476 layers, 87730285 parameters, 0 gradients + +val: Scanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2846.03it/s] +val: New cache created: ../datasets/coco/val2017.cache + Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [02:30<00:00, 1.05it/s] + all 5000 36335 0.746 0.626 0.68 0.49 +Speed: 0.1ms pre-process, 22.4ms inference, 1.4ms NMS per image at shape (32, 3, 640, 640) # <--- baseline speed + +Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json... +... + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504 # <--- baseline mAP + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.628 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.681 # <--- baseline mAR + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.826 +``` + +## Ensemble Test + +Multiple pretrained models may be ensembled together at test and inference time by simply appending extra models to the `--weights` argument in any existing val.py or detect.py command. This example tests an ensemble of 2 models together: +- YOLOv5x +- YOLOv5l6 + +```bash +python val.py --weights yolov5x.pt yolov5l6.pt --data coco.yaml --img 640 --half +``` + +Output: +```shell +val: data=./data/coco.yaml, weights=['yolov5x.pt', 'yolov5l6.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True +YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) + +Fusing layers... +Model Summary: 476 layers, 87730285 parameters, 0 gradients # Model 1 +Fusing layers... +Model Summary: 501 layers, 77218620 parameters, 0 gradients # Model 2 +Ensemble created with ['yolov5x.pt', 'yolov5l6.pt'] # Ensemble notice + +val: Scanning '../datasets/coco/val2017.cache' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:00<00:00, 49695545.02it/s] + Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [03:58<00:00, 1.52s/it] + all 5000 36335 0.747 0.637 0.692 0.502 +Speed: 0.1ms pre-process, 39.5ms inference, 2.0ms NMS per image at shape (32, 3, 640, 640) # <--- ensemble speed + +Evaluating pycocotools mAP... saving runs/val/exp3/yolov5x_predictions.json... +... + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.515 # <--- ensemble mAP + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.699 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.557 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.356 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.563 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.668 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.387 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.638 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.689 # <--- ensemble mAR + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.526 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.743 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.844 +``` + +## Ensemble Inference + +Append extra models to the `--weights` argument to run ensemble inference: +```bash +python detect.py --weights yolov5x.pt yolov5l6.pt --img 640 --source data/images +``` + +Output: +```bash +detect: weights=['yolov5x.pt', 'yolov5l6.pt'], source=data/images, imgsz=640, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False +YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) + +Fusing layers... +Model Summary: 476 layers, 87730285 parameters, 0 gradients +Fusing layers... +Model Summary: 501 layers, 77218620 parameters, 0 gradients +Ensemble created with ['yolov5x.pt', 'yolov5l6.pt'] + +image 1/2 /content/yolov5/data/images/bus.jpg: 640x512 4 persons, 1 bus, 1 tie, Done. (0.063s) +image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 3 persons, 2 ties, Done. (0.056s) +Results saved to runs/detect/exp2 +Done. (0.223s) +``` + + + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + +## Status + +YOLOv5 CI + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file diff --git a/docs/yolov5/export.md b/docs/yolov5/export.md new file mode 100644 index 0000000..c1dbbaa --- /dev/null +++ b/docs/yolov5/export.md @@ -0,0 +1,236 @@ +# TFLite, ONNX, CoreML, TensorRT Export + +📚 This guide explains how to export a trained YOLOv5 🚀 model from PyTorch to ONNX and TorchScript formats. +UPDATED 8 December 2022. + +## Before You Start + +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a [**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +For [TensorRT](https://developer.nvidia.com/tensorrt) export example (requires GPU) see our Colab [notebook](https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb#scrollTo=VTRwsvA9u7ln&line=2&uniqifier=1) appendix section. Open In Colab + +## Formats + +YOLOv5 inference is officially supported in 11 formats: + +💡 ProTip: Export to ONNX or OpenVINO for up to 3x CPU speedup. See [CPU Benchmarks](https://github.com/ultralytics/yolov5/pull/6613). +💡 ProTip: Export to TensorRT for up to 5x GPU speedup. See [GPU Benchmarks](https://github.com/ultralytics/yolov5/pull/6963). + +| Format | `export.py --include` | Model | +|:---------------------------------------------------------------------------|:----------------------|:--------------------------| +| [PyTorch](https://pytorch.org/) | - | `yolov5s.pt` | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov5s.torchscript` | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov5s.onnx` | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov5s_openvino_model/` | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov5s.engine` | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov5s.mlmodel` | +| [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov5s_saved_model/` | +| [TensorFlow GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov5s.pb` | +| [TensorFlow Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov5s.tflite` | +| [TensorFlow Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov5s_edgetpu.tflite` | +| [TensorFlow.js](https://www.tensorflow.org/js) | `tfjs` | `yolov5s_web_model/` | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov5s_paddle_model/` | + + +## Benchmarks + +Benchmarks below run on a Colab Pro with the YOLOv5 tutorial notebook Open In Colab. To reproduce: +```bash +python benchmarks.py --weights yolov5s.pt --imgsz 640 --device 0 +``` + +### Colab Pro V100 GPU + +``` +benchmarks: weights=/content/yolov5/yolov5s.pt, imgsz=640, batch_size=1, data=/content/yolov5/data/coco128.yaml, device=0, half=False, test=False +Checking setup... +YOLOv5 🚀 v6.1-135-g7926afc torch 1.10.0+cu111 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB) +Setup complete ✅ (8 CPUs, 51.0 GB RAM, 46.7/166.8 GB disk) + +Benchmarks complete (458.07s) + Format mAP@0.5:0.95 Inference time (ms) +0 PyTorch 0.4623 10.19 +1 TorchScript 0.4623 6.85 +2 ONNX 0.4623 14.63 +3 OpenVINO NaN NaN +4 TensorRT 0.4617 1.89 +5 CoreML NaN NaN +6 TensorFlow SavedModel 0.4623 21.28 +7 TensorFlow GraphDef 0.4623 21.22 +8 TensorFlow Lite NaN NaN +9 TensorFlow Edge TPU NaN NaN +10 TensorFlow.js NaN NaN +``` + +### Colab Pro CPU + +``` +benchmarks: weights=/content/yolov5/yolov5s.pt, imgsz=640, batch_size=1, data=/content/yolov5/data/coco128.yaml, device=cpu, half=False, test=False +Checking setup... +YOLOv5 🚀 v6.1-135-g7926afc torch 1.10.0+cu111 CPU +Setup complete ✅ (8 CPUs, 51.0 GB RAM, 41.5/166.8 GB disk) + +Benchmarks complete (241.20s) + Format mAP@0.5:0.95 Inference time (ms) +0 PyTorch 0.4623 127.61 +1 TorchScript 0.4623 131.23 +2 ONNX 0.4623 69.34 +3 OpenVINO 0.4623 66.52 +4 TensorRT NaN NaN +5 CoreML NaN NaN +6 TensorFlow SavedModel 0.4623 123.79 +7 TensorFlow GraphDef 0.4623 121.57 +8 TensorFlow Lite 0.4623 316.61 +9 TensorFlow Edge TPU NaN NaN +10 TensorFlow.js NaN NaN +``` + +## Export a Trained YOLOv5 Model + +This command exports a pretrained YOLOv5s model to TorchScript and ONNX formats. `yolov5s.pt` is the 'small' model, the second-smallest model available. Other options are `yolov5n.pt`, `yolov5m.pt`, `yolov5l.pt` and `yolov5x.pt`, along with their P6 counterparts i.e. `yolov5s6.pt` or you own custom training checkpoint i.e. `runs/exp/weights/best.pt`. For details on all available models please see our README [table](https://github.com/ultralytics/yolov5#pretrained-checkpoints). +```bash +python export.py --weights yolov5s.pt --include torchscript onnx +``` + +💡 ProTip: Add `--half` to export models at FP16 half precision for smaller file sizes + +Output: +```bash +export: data=data/coco128.yaml, weights=['yolov5s.pt'], imgsz=[640, 640], batch_size=1, device=cpu, half=False, inplace=False, train=False, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=12, verbose=False, workspace=4, nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, conf_thres=0.25, include=['torchscript', 'onnx'] +YOLOv5 🚀 v6.2-104-ge3e5122 Python-3.7.13 torch-1.12.1+cu113 CPU + +Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt... +100% 14.1M/14.1M [00:00<00:00, 274MB/s] + +Fusing layers... +YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients + +PyTorch: starting from yolov5s.pt with output shape (1, 25200, 85) (14.1 MB) + +TorchScript: starting export with torch 1.12.1+cu113... +TorchScript: export success ✅ 1.7s, saved as yolov5s.torchscript (28.1 MB) + +ONNX: starting export with onnx 1.12.0... +ONNX: export success ✅ 2.3s, saved as yolov5s.onnx (28.0 MB) + +Export complete (5.5s) +Results saved to /content/yolov5 +Detect: python detect.py --weights yolov5s.onnx +Validate: python val.py --weights yolov5s.onnx +PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.onnx') +Visualize: https://netron.app/ +``` + +The 3 exported models will be saved alongside the original PyTorch model: +

+ +[Netron Viewer](https://github.com/lutzroeder/netron) is recommended for visualizing exported models: +

+ + +## Exported Model Usage Examples + +`detect.py` runs inference on exported models: +```bash +python detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s_openvino_model # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle +``` + +`val.py` runs validation on exported models: +```bash +python val.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s_openvino_model # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS Only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle +``` + +Use PyTorch Hub with exported YOLOv5 models: +``` python +import torch + +# Model +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') + 'yolov5s.torchscript ') # TorchScript + 'yolov5s.onnx') # ONNX Runtime + 'yolov5s_openvino_model') # OpenVINO + 'yolov5s.engine') # TensorRT + 'yolov5s.mlmodel') # CoreML (macOS Only) + 'yolov5s_saved_model') # TensorFlow SavedModel + 'yolov5s.pb') # TensorFlow GraphDef + 'yolov5s.tflite') # TensorFlow Lite + 'yolov5s_edgetpu.tflite') # TensorFlow Edge TPU + 'yolov5s_paddle_model') # PaddlePaddle + +# Images +img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list + +# Inference +results = model(img) + +# Results +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` + +## OpenCV DNN inference + +OpenCV inference with ONNX models: +```bash +python export.py --weights yolov5s.pt --include onnx + +python detect.py --weights yolov5s.onnx --dnn # detect +python val.py --weights yolov5s.onnx --dnn # validate +``` + +## C++ Inference + +YOLOv5 OpenCV DNN C++ inference on exported ONNX model examples: + +- [https://github.com/Hexmagic/ONNX-yolov5/blob/master/src/test.cpp](https://github.com/Hexmagic/ONNX-yolov5/blob/master/src/test.cpp) +- [https://github.com/doleron/yolov5-opencv-cpp-python](https://github.com/doleron/yolov5-opencv-cpp-python) + +YOLOv5 OpenVINO C++ inference examples: + +- [https://github.com/dacquaviva/yolov5-openvino-cpp-python](https://github.com/dacquaviva/yolov5-openvino-cpp-python) +- [https://github.com/UNeedCryDear/yolov5-seg-opencv-dnn-cpp](https://github.com/UNeedCryDear/yolov5-seg-opencv-dnn-cpp) + +## TensorFlow.js Web Browser Inference + +- [https://aukerul-shuvo.github.io/YOLOv5_TensorFlow-JS/](https://aukerul-shuvo.github.io/YOLOv5_TensorFlow-JS/) + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + +## Status + +YOLOv5 CI + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file diff --git a/docs/yolov5/hyp_evolution.md b/docs/yolov5/hyp_evolution.md new file mode 100644 index 0000000..9fe3094 --- /dev/null +++ b/docs/yolov5/hyp_evolution.md @@ -0,0 +1,161 @@ +📚 This guide explains **hyperparameter evolution** for YOLOv5 🚀. Hyperparameter evolution is a method of [Hyperparameter Optimization](https://en.wikipedia.org/wiki/Hyperparameter_optimization) using a [Genetic Algorithm](https://en.wikipedia.org/wiki/Genetic_algorithm) (GA) for optimization. UPDATED 25 September 2022. + +Hyperparameters in ML control various aspects of training, and finding optimal values for them can be a challenge. Traditional methods like grid searches can quickly become intractable due to 1) the high dimensional search space 2) unknown correlations among the dimensions, and 3) expensive nature of evaluating the fitness at each point, making GA a suitable candidate for hyperparameter searches. + + +## Before You Start + +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a [**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + + +## 1. Initialize Hyperparameters + +YOLOv5 has about 30 hyperparameters used for various training settings. These are defined in `*.yaml` files in the `/data/hyps` directory. Better initial guesses will produce better final results, so it is important to initialize these values properly before evolving. If in doubt, simply use the default values, which are optimized for YOLOv5 COCO training from scratch. + +```yaml +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for low-augmentation COCO training from scratch +# python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.5 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 1.0 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.5 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) +``` + +## 2. Define Fitness + +Fitness is the value we seek to maximize. In YOLOv5 we define a default fitness function as a weighted combination of metrics: `mAP@0.5` contributes 10% of the weight and `mAP@0.5:0.95` contributes the remaining 90%, with [Precision `P` and Recall `R`](https://en.wikipedia.org/wiki/Precision_and_recall) absent. You may adjust these as you see fit or use the default fitness definition in utils/metrics.py (recommended). + +```python +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) +``` + +## 3. Evolve + +Evolution is performed about a base scenario which we seek to improve upon. The base scenario in this example is finetuning COCO128 for 10 epochs using pretrained YOLOv5s. The base scenario training command is: +```bash +python train.py --epochs 10 --data coco128.yaml --weights yolov5s.pt --cache +``` +To evolve hyperparameters **specific to this scenario**, starting from our initial values defined in **Section 1.**, and maximizing the fitness defined in **Section 2.**, append `--evolve`: +```bash +# Single-GPU +python train.py --epochs 10 --data coco128.yaml --weights yolov5s.pt --cache --evolve + +# Multi-GPU +for i in 0 1 2 3 4 5 6 7; do + sleep $(expr 30 \* $i) && # 30-second delay (optional) + echo 'Starting GPU '$i'...' && + nohup python train.py --epochs 10 --data coco128.yaml --weights yolov5s.pt --cache --device $i --evolve > evolve_gpu_$i.log & +done + +# Multi-GPU bash-while (not recommended) +for i in 0 1 2 3 4 5 6 7; do + sleep $(expr 30 \* $i) && # 30-second delay (optional) + echo 'Starting GPU '$i'...' && + "$(while true; do nohup python train.py... --device $i --evolve 1 > evolve_gpu_$i.log; done)" & +done +``` + +The default evolution settings will run the base scenario 300 times, i.e. for 300 generations. You can modify generations via the `--evolve` argument, i.e. `python train.py --evolve 1000`. +https://github.com/ultralytics/yolov5/blob/6a3ee7cf03efb17fbffde0e68b1a854e80fe3213/train.py#L608 + +The main genetic operators are **crossover** and **mutation**. In this work mutation is used, with an 80% probability and a 0.04 variance to create new offspring based on a combination of the best parents from all previous generations. Results are logged to `runs/evolve/exp/evolve.csv`, and the highest fitness offspring is saved every generation as `runs/evolve/hyp_evolved.yaml`: +```yaml +# YOLOv5 Hyperparameter Evolution Results +# Best generation: 287 +# Last generation: 300 +# metrics/precision, metrics/recall, metrics/mAP_0.5, metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss +# 0.54634, 0.55625, 0.58201, 0.33665, 0.056451, 0.042892, 0.013441 + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.5 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 1.0 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.5 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) +``` + +We recommend a minimum of 300 generations of evolution for best results. Note that **evolution is generally expensive and time-consuming**, as the base scenario is trained hundreds of times, possibly requiring hundreds or thousands of GPU hours. + + +## 4. Visualize + +`evolve.csv` is plotted as `evolve.png` by `utils.plots.plot_evolve()` after evolution finishes with one subplot per hyperparameter showing fitness (y-axis) vs hyperparameter values (x-axis). Yellow indicates higher concentrations. Vertical distributions indicate that a parameter has been disabled and does not mutate. This is user selectable in the `meta` dictionary in train.py, and is useful for fixing parameters and preventing them from evolving. + +![evolve](https://user-images.githubusercontent.com/26833433/89130469-f43e8e00-d4b9-11ea-9e28-f8ae3622516d.png) + + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + +## Status + +YOLOv5 CI + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file diff --git a/docs/yolov5/index.md b/docs/yolov5/index.md new file mode 100644 index 0000000..ef96143 --- /dev/null +++ b/docs/yolov5/index.md @@ -0,0 +1,87 @@ +# YOLOv5 Docs + +
+

+ + +

+ + YOLOv5 CI + YOLOv5 Citation + Docker Pulls +
+ Run on Gradient + Open In Colab + Open In Kaggle +
+
+ +Welcome to the Ultralytics YOLOv5 🚀 Docs! YOLOv5, or You Only Look Once version 5, is an Ultralytics object detection model designed to deliver fast and accurate real-time results. +

+This powerful deep learning framework is built on the PyTorch platform and has gained immense popularity due to its ease of use, high performance, and versatility. In this documentation, we will guide you through the installation process, explain the model's architecture, showcase various use-cases, and provide detailed tutorials to help you harness the full potential of YOLOv5 for your computer vision projects. Let's dive in! + +
+ +## Tutorials + +* [Train Custom Data](train_custom_data.md) 🚀 RECOMMENDED +* [Tips for Best Training Results](tips_for_best_training_results.md) ☘️ +* [Multi-GPU Training](multi_gpu_training.md) +* [PyTorch Hub](pytorch_hub.md) 🌟 NEW +* [TFLite, ONNX, CoreML, TensorRT Export](export.md) 🚀 +* [NVIDIA Jetson platform Deployment](jetson_nano.md) 🌟 NEW +* [Test-Time Augmentation (TTA)](tta.md) +* [Model Ensembling](ensemble.md) +* [Model Pruning/Sparsity](pruning_sparsity.md) +* [Hyperparameter Evolution](hyp_evolution.md) +* [Transfer Learning with Frozen Layers](transfer_learn_frozen.md) +* [Architecture Summary](architecture.md) 🌟 NEW +* [Roboflow for Datasets, Labeling, and Active Learning](roboflow.md) +* [ClearML Logging](clearml.md) 🌟 NEW +* [YOLOv5 with Neural Magic's Deepsparse](neural_magic.md) 🌟 NEW +* [Comet Logging](comet.md) 🌟 NEW + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies +including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) +and [PyTorch](https://pytorch.org/) preinstalled): + +- **Notebooks** with free + GPU: Run on Gradient Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. + See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. + See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + +## Status + +YOLOv5 CI + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous +Integration (CI) tests are currently passing. CI tests verify correct operation of +YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) +and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 +hours and on every commit. + +
+
+ + + + + + + + + + + + + + + + + +
\ No newline at end of file diff --git a/docs/yolov5/jetson_nano.md b/docs/yolov5/jetson_nano.md new file mode 100644 index 0000000..43f6047 --- /dev/null +++ b/docs/yolov5/jetson_nano.md @@ -0,0 +1,316 @@ +# Deploy on NVIDIA Jetson using TensorRT and DeepStream SDK + +📚 This guide explains how to deploy a trained model into NVIDIA Jetson Platform and perform inference using TensorRT and DeepStream SDK. Here we use TensorRT to maximize the inference performance on the Jetson platform. +UPDATED 18 November 2022. + +## Hardware Verification + +We have tested and verified this guide on the following Jetson devices + +- [Seeed reComputer J1010 built with Jetson Nano module](https://www.seeedstudio.com/Jetson-10-1-A0-p-5336.html) +- [Seeed reComputer J2021 built with Jetson Xavier NX module](https://www.seeedstudio.com/reComputer-J2021-p-5438.html) + +## Before You Start + +Make sure you have properly installed **JetPack SDK** with all the **SDK Components** and **DeepStream SDK** on the Jetson device as this includes CUDA, TensorRT and DeepStream SDK which are needed for this guide. + +JetPack SDK provides a full development environment for hardware-accelerated AI-at-the-edge development. All Jetson modules and developer kits are supported by JetPack SDK. + +There are two major installation methods including, + +1. SD Card Image Method +2. NVIDIA SDK Manager Method + +You can find a very detailed installation guide from NVIDIA [official website](https://developer.nvidia.com/jetpack-sdk-461). You can also find guides corresponding to the above-mentioned [reComputer J1010](https://wiki.seeedstudio.com/reComputer_J1010_J101_Flash_Jetpack) and [reComputer J2021](https://wiki.seeedstudio.com/reComputer_J2021_J202_Flash_Jetpack). + + +## Install Necessary Packages + +- **Step 1.** Access the terminal of Jetson device, install pip and upgrade it + +```sh +sudo apt update +sudo apt install -y python3-pip +pip3 install --upgrade pip +``` + +- **Step 2.** Clone the following repo + +```sh +git clone https://github.com/ultralytics/yolov5 +``` + +- **Step 3.** Open **requirements.txt** + +```sh +cd yolov5 +vi requirements.txt +``` + +- **Step 5.** Edit the following lines. Here you need to press **i** first to enter editing mode. Press **ESC**, then type **:wq** to save and quit + +```sh +# torch>=1.7.0 +# torchvision>=0.8.1 +``` + +**Note:** torch and torchvision are excluded for now because they will be installed later. + +- **Step 6.** install the below dependency + +```sh +sudo apt install -y libfreetype6-dev +``` + +- **Step 7.** Install the necessary packages + +```sh +pip3 install -r requirements.txt +``` + +## Install PyTorch and Torchvision + +We cannot install PyTorch and Torchvision from pip because they are not compatible to run on Jetson platform which is based on **ARM aarch64 architecture**. Therefore, we need to manually install pre-built PyTorch pip wheel and compile/ install Torchvision from source. + +Visit [this page](https://forums.developer.nvidia.com/t/pytorch-for-jetson) to access all the PyTorch and Torchvision links. + +Here are some of the versions supported by JetPack 4.6 and above. + +**PyTorch v1.10.0** + +Supported by JetPack 4.4 (L4T R32.4.3) / JetPack 4.4.1 (L4T R32.4.4) / JetPack 4.5 (L4T R32.5.0) / JetPack 4.5.1 (L4T R32.5.1) / JetPack 4.6 (L4T R32.6.1) with Python 3.6 + +**file_name:** torch-1.10.0-cp36-cp36m-linux_aarch64.whl +**URL:** [https://nvidia.box.com/shared/static/fjtbno0vpo676a25cgvuqc1wty0fkkg6.whl](https://nvidia.box.com/shared/static/fjtbno0vpo676a25cgvuqc1wty0fkkg6.whl) + +**PyTorch v1.12.0** + +Supported by JetPack 5.0 (L4T R34.1.0) / JetPack 5.0.1 (L4T R34.1.1) / JetPack 5.0.2 (L4T R35.1.0) with Python 3.8 + +**file_name:** torch-1.12.0a0+2c916ef.nv22.3-cp38-cp38-linux_aarch64.whl +**URL:** [https://developer.download.nvidia.com/compute/redist/jp/v50/pytorch/torch-1.12.0a0+2c916ef.nv22.3-cp38-cp38-linux_aarch64.whl](https://developer.download.nvidia.com/compute/redist/jp/v50/pytorch/torch-1.12.0a0+2c916ef.nv22.3-cp38-cp38-linux_aarch64.whl) + +- **Step 1.** Install torch according to your JetPack version in the following format + +```sh +wget -O +pip3 install +``` + +For example, here we are running **JP4.6.1**, and therefore we choose **PyTorch v1.10.0** + +```sh +cd ~ +sudo apt-get install -y libopenblas-base libopenmpi-dev +wget https://nvidia.box.com/shared/static/fjtbno0vpo676a25cgvuqc1wty0fkkg6.whl -O torch-1.10.0-cp36-cp36m-linux_aarch64.whl +pip3 install torch-1.10.0-cp36-cp36m-linux_aarch64.whl +``` + +- **Step 2.** Install torchvision depending on the version of PyTorch that you have installed. For example, we chose **PyTorch v1.10.0**, which means, we need to choose **Torchvision v0.11.1** + +```sh +sudo apt install -y libjpeg-dev zlib1g-dev +git clone --branch v0.11.1 https://github.com/pytorch/vision torchvision +cd torchvision +sudo python3 setup.py install +``` + +Here a list of the corresponding torchvision version that you need to install according to the PyTorch version: + +- PyTorch v1.10 - torchvision v0.11.1 +- PyTorch v1.12 - torchvision v0.13.0 + +## DeepStream Configuration for YOLOv5 + +- **Step 1.** Clone the following repo + +```sh +cd ~ +git clone https://github.com/marcoslucianops/DeepStream-Yolo +``` + +- **Step 2.** Copy **gen_wts_yoloV5.py** from **DeepStream-Yolo/utils** into **yolov5** directory + +```sh +cp DeepStream-Yolo/utils/gen_wts_yoloV5.py yolov5 +``` + +- **Step 3.** Inside the yolov5 repo, download **pt file** from YOLOv5 releases (example for YOLOv5s 6.1) + +```sh +cd yolov5 +wget https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt +``` + +- **Step 4.** Generate the **cfg** and **wts** files + +```sh +python3 gen_wts_yoloV5.py -w yolov5s.pt +``` + +**Note**: To change the inference size (default: 640) + +```sh +-s SIZE +--size SIZE +-s HEIGHT WIDTH +--size HEIGHT WIDTH + +Example for 1280: + +-s 1280 +or +-s 1280 1280 +``` + +- **Step 5.** Copy the generated **cfg** and **wts** files into the **DeepStream-Yolo** folder + +```sh +cp yolov5s.cfg ~/DeepStream-Yolo +cp yolov5s.wts ~/DeepStream-Yolo +``` + +- **Step 6.** Open the **DeepStream-Yolo** folder and compile the library + +```sh +cd ~/DeepStream-Yolo +CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo # for DeepStream 6.1 +CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo # for DeepStream 6.0.1 / 6.0 +``` + +- **Step 7.** Edit the **config_infer_primary_yoloV5.txt** file according to your model + +```sh +[property] +... +custom-network-config=yolov5s.cfg +model-file=yolov5s.wts +... +``` + +- **Step 8.** Edit the **deepstream_app_config** file + +```sh +... +[primary-gie] +... +config-file=config_infer_primary_yoloV5.txt +``` + +- **Step 9.** Change the video source in **deepstream_app_config** file. Here a default video file is loaded as you can see below + +```sh +... +[source0] +... +uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 +``` + +## Run the Inference + +```sh +deepstream-app -c deepstream_app_config.txt +``` + +
+ +The above result is running on **Jetson Xavier NX** with **FP32** and **YOLOv5s 640x640**. We can see that the **FPS** is around **30**. + +## INT8 Calibration + +If you want to use INT8 precision for inference, you need to follow the steps below + +- **Step 1.** Install OpenCV + +```sh +sudo apt-get install libopencv-dev +``` + +- **Step 2.** Compile/recompile the **nvdsinfer_custom_impl_Yolo** library with OpenCV support + +```sh +cd ~/DeepStream-Yolo +CUDA_VER=11.4 OPENCV=1 make -C nvdsinfer_custom_impl_Yolo # for DeepStream 6.1 +CUDA_VER=10.2 OPENCV=1 make -C nvdsinfer_custom_impl_Yolo # for DeepStream 6.0.1 / 6.0 +``` + +- **Step 3.** For COCO dataset, download the [val2017](https://drive.google.com/file/d/1gbvfn7mcsGDRZ_luJwtITL-ru2kK99aK/view?usp=sharing), extract, and move to **DeepStream-Yolo** folder + +- **Step 4.** Make a new directory for calibration images + +```sh +mkdir calibration +``` + +- **Step 5.** Run the following to select 1000 random images from COCO dataset to run calibration + +```sh +for jpg in $(ls -1 val2017/*.jpg | sort -R | head -1000); do \ + cp ${jpg} calibration/; \ +done +``` + +**Note:** NVIDIA recommends at least 500 images to get a good accuracy. On this example, 1000 images are chosen to get better accuracy (more images = more accuracy). Higher INT8_CALIB_BATCH_SIZE values will result in more accuracy and faster calibration speed. Set it according to you GPU memory. You can set it from **head -1000**. For example, for 2000 images, **head -2000**. This process can take a long time. + +- **Step 6.** Create the **calibration.txt** file with all selected images + +```sh +realpath calibration/*jpg > calibration.txt +``` + +- **Step 7.** Set environment variables + +```sh +export INT8_CALIB_IMG_PATH=calibration.txt +export INT8_CALIB_BATCH_SIZE=1 +``` + +- **Step 8.** Update the **config_infer_primary_yoloV5.txt** file + +From + +```sh +... +model-engine-file=model_b1_gpu0_fp32.engine +#int8-calib-file=calib.table +... +network-mode=0 +... +``` + +To + +```sh +... +model-engine-file=model_b1_gpu0_int8.engine +int8-calib-file=calib.table +... +network-mode=1 +... +``` + +- **Step 9.** Run the inference + +```sh +deepstream-app -c deepstream_app_config.txt +``` + +
+ +The above result is running on **Jetson Xavier NX** with **INT8** and **YOLOv5s 640x640**. We can see that the **FPS** is around **60**. + +## Benchmark results + +The following table summarizes how different models perform on **Jetson Xavier NX**. + +| Model Name | Precision | Inference Size | Inference Time (ms) | FPS | +|------------|-----------|----------------|---------------------|-----| +| YOLOv5s | FP32 | 320x320 | 16.66 | 60 | +| | FP32 | 640x640 | 33.33 | 30 | +| | INT8 | 640x640 | 16.66 | 60 | +| YOLOv5n | FP32 | 640x640 | 16.66 | 60 | + + +### Additional + +This tutorial is written by our friends at seeed @lakshanthad and Elaine diff --git a/docs/yolov5/multi_gpu_training.md b/docs/yolov5/multi_gpu_training.md new file mode 100644 index 0000000..28ac865 --- /dev/null +++ b/docs/yolov5/multi_gpu_training.md @@ -0,0 +1,186 @@ +📚 This guide explains how to properly use **multiple** GPUs to train a dataset with YOLOv5 🚀 on single or multiple machine(s). +UPDATED 25 December 2022. + +## Before You Start + +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a [**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +💡 ProTip! **Docker Image** is recommended for all Multi-GPU trainings. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + +💡 ProTip! `torch.distributed.run` replaces `torch.distributed.launch` in **PyTorch>=1.9**. See [docs](https://pytorch.org/docs/stable/distributed.html) for details. + +## Training + +Select a pretrained model to start training from. Here we select [YOLOv5s](https://github.com/ultralytics/yolov5/blob/master/models/yolov5s.yaml), the smallest and fastest model available. See our README [table](https://github.com/ultralytics/yolov5#pretrained-checkpoints) for a full comparison of all models. We will train this model with Multi-GPU on the [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset. + +

YOLOv5 Models

+ + +### Single GPU + +```bash +python train.py --batch 64 --data coco.yaml --weights yolov5s.pt --device 0 +``` + +### Multi-GPU [DataParallel](https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel) Mode (⚠️ not recommended) + +You can increase the `device` to use Multiple GPUs in DataParallel mode. +```bash +python train.py --batch 64 --data coco.yaml --weights yolov5s.pt --device 0,1 +``` + +This method is slow and barely speeds up training compared to using just 1 GPU. + +### Multi-GPU [DistributedDataParallel](https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel) Mode (✅ recommended) + +You will have to pass `python -m torch.distributed.run --nproc_per_node`, followed by the usual arguments. + +```bash +python -m torch.distributed.run --nproc_per_node 2 train.py --batch 64 --data coco.yaml --weights yolov5s.pt --device 0,1 +``` + +`--nproc_per_node` specifies how many GPUs you would like to use. In the example above, it is 2. +`--batch ` is the total batch-size. It will be divided evenly to each GPU. In the example above, it is 64/2=32 per GPU. + +The code above will use GPUs `0... (N-1)`. + +
+ Use specific GPUs (click to expand) + +You can do so by simply passing `--device` followed by your specific GPUs. For example, in the code below, we will use GPUs `2,3`. + +```bash +python -m torch.distributed.run --nproc_per_node 2 train.py --batch 64 --data coco.yaml --cfg yolov5s.yaml --weights '' --device 2,3 +``` + +
+ +
+ Use SyncBatchNorm (click to expand) + +[SyncBatchNorm](https://pytorch.org/docs/master/generated/torch.nn.SyncBatchNorm.html) could increase accuracy for multiple gpu training, however, it will slow down training by a significant factor. It is **only** available for Multiple GPU DistributedDataParallel training. + +It is best used when the batch-size on **each** GPU is small (<= 8). + +To use SyncBatchNorm, simple pass `--sync-bn` to the command like below, + +```bash +python -m torch.distributed.run --nproc_per_node 2 train.py --batch 64 --data coco.yaml --cfg yolov5s.yaml --weights '' --sync-bn +``` +
+ +
+ Use Multiple machines (click to expand) + +This is **only** available for Multiple GPU DistributedDataParallel training. + +Before we continue, make sure the files on all machines are the same, dataset, codebase, etc. Afterwards, make sure the machines can communicate to each other. + +You will have to choose a master machine(the machine that the others will talk to). Note down its address(`master_addr`) and choose a port(`master_port`). I will use `master_addr = 192.168.1.1` and `master_port = 1234` for the example below. + +To use it, you can do as the following, + +```bash +# On master machine 0 +python -m torch.distributed.run --nproc_per_node G --nnodes N --node_rank 0 --master_addr "192.168.1.1" --master_port 1234 train.py --batch 64 --data coco.yaml --cfg yolov5s.yaml --weights '' +``` +```bash +# On machine R +python -m torch.distributed.run --nproc_per_node G --nnodes N --node_rank R --master_addr "192.168.1.1" --master_port 1234 train.py --batch 64 --data coco.yaml --cfg yolov5s.yaml --weights '' +``` +where `G` is number of GPU per machine, `N` is the number of machines, and `R` is the machine number from `0...(N-1)`. +Let's say I have two machines with two GPUs each, it would be `G = 2` , `N = 2`, and `R = 1` for the above. + +Training will not start until all `N` machines are connected. Output will only be shown on master machine! + +
+ + +### Notes + +- Windows support is untested, Linux is recommended. +- `--batch ` must be a multiple of the number of GPUs. +- GPU 0 will take slightly more memory than the other GPUs as it maintains EMA and is responsible for checkpointing etc. +- If you get `RuntimeError: Address already in use`, it could be because you are running multiple trainings at a time. To fix this, simply use a different port number by adding `--master_port` like below, + +```bash +python -m torch.distributed.run --master_port 1234 --nproc_per_node 2 ... +``` + +## Results + +DDP profiling results on an [AWS EC2 P4d instance](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) with 8x A100 SXM4-40GB for YOLOv5l for 1 COCO epoch. + +
+ Profiling code + +```bash +# prepare +t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/coco:/usr/src/coco $t +pip3 install torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html +cd .. && rm -rf app && git clone https://github.com/ultralytics/yolov5 -b master app && cd app +cp data/coco.yaml data/coco_profile.yaml + +# profile +python train.py --batch-size 16 --data coco_profile.yaml --weights yolov5l.pt --epochs 1 --device 0 +python -m torch.distributed.run --nproc_per_node 2 train.py --batch-size 32 --data coco_profile.yaml --weights yolov5l.pt --epochs 1 --device 0,1 +python -m torch.distributed.run --nproc_per_node 4 train.py --batch-size 64 --data coco_profile.yaml --weights yolov5l.pt --epochs 1 --device 0,1,2,3 +python -m torch.distributed.run --nproc_per_node 8 train.py --batch-size 128 --data coco_profile.yaml --weights yolov5l.pt --epochs 1 --device 0,1,2,3,4,5,6,7 +``` + +
+ +| GPUs
A100 | batch-size | CUDA_mem
device0 (G) | COCO
train | COCO
val | +|--------------|------------|------------------------------|--------------------|------------------| +| 1x | 16 | 26GB | 20:39 | 0:55 | +| 2x | 32 | 26GB | 11:43 | 0:57 | +| 4x | 64 | 26GB | 5:57 | 0:55 | +| 8x | 128 | 26GB | 3:09 | 0:57 | + +## FAQ + +If an error occurs, please read the checklist below first! (It could save your time) + +
+ Checklist (click to expand) + +
    +
  • Have you properly read this post?
  • +
  • Have you tried to reclone the codebase? The code changes daily.
  • +
  • Have you tried to search for your error? Someone may have already encountered it in this repo or in another and have the solution.
  • +
  • Have you installed all the requirements listed on top (including the correct Python and Pytorch versions)?
  • +
  • Have you tried in other environments listed in the "Environments" section below?
  • +
  • Have you tried with another dataset like coco128 or coco2017? It will make it easier to find the root cause.
  • +
+ +If you went through all the above, feel free to raise an Issue by giving as much detail as possible following the template. + +
+ + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + +## Status + +YOLOv5 CI + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. + + +## Credits + +I would like to thank @MagicFrogSJTU, who did all the heavy lifting, and @glenn-jocher for guiding us along the way. \ No newline at end of file diff --git a/docs/yolov5/neural_magic.md b/docs/yolov5/neural_magic.md new file mode 100644 index 0000000..23283f7 --- /dev/null +++ b/docs/yolov5/neural_magic.md @@ -0,0 +1,260 @@ + + +Welcome to software-delivered AI. + +This guide explains how to deploy YOLOv5 with Neural Magic's DeepSparse. + +DeepSparse is an inference runtime with exceptional performance on CPUs. For instance, compared to the ONNX Runtime baseline, DeepSparse offers a 5.8x speed-up for YOLOv5s, running on the same machine! + +

+ +

+ +For the first time, your deep learning workloads can meet the performance demands of production without the complexity and costs of hardware accelerators. +Put simply, DeepSparse gives you the performance of GPUs and the simplicity of software: +- **Flexible Deployments**: Run consistently across cloud, data center, and edge with any hardware provider from Intel to AMD to ARM +- **Infinite Scalability**: Scale vertically to 100s of cores, out with standard Kubernetes, or fully-abstracted with Serverless +- **Easy Integration**: Clean APIs for integrating your model into an application and monitoring it in production + +**[Start your 90 day Free Trial](https://neuralmagic.com/deepsparse-free-trial/?utm_campaign=free_trial&utm_source=ultralytics_github).** + +### How Does DeepSparse Achieve GPU-Class Performance? + +DeepSparse takes advantage of model sparsity to gain its performance speedup. + +Sparsification through pruning and quantization is a broadly studied technique, allowing order-of-magnitude reductions in the size and compute needed to +execute a network, while maintaining high accuracy. DeepSparse is sparsity-aware, meaning it skips the zeroed out parameters, shrinking amount of compute +in a forward pass. Since the sparse computation is now memory bound, DeepSparse executes the network depth-wise, breaking the problem into Tensor Columns, +vertical stripes of computation that fit in cache. + +

+ +

+ +Sparse networks with compressed computation, executed depth-wise in cache, allows DeepSparse to deliver GPU-class performance on CPUs! + +### How Do I Create A Sparse Version of YOLOv5 Trained on My Data? + +Neural Magic's open-source model repository, SparseZoo, contains pre-sparsified checkpoints of each YOLOv5 model. Using SparseML, which is integrated with Ultralytics, you can fine-tune a sparse checkpoint onto your data with a single CLI command. + +[Checkout Neural Magic's YOLOv5 documentation for more details](https://docs.neuralmagic.com/use-cases/object-detection/sparsifying). + +## DeepSparse Usage + +We will walk through an example benchmarking and deploying a sparse version of YOLOv5s with DeepSparse. + +### Install DeepSparse + +Run the following to install DeepSparse. We recommend you use a virtual environment with Python. + +```bash +pip install deepsparse[server,yolo,onnxruntime] +``` + +### Collect an ONNX File + +DeepSparse accepts a model in the ONNX format, passed either as: +- A SparseZoo stub which identifies an ONNX file in the SparseZoo +- A local path to an ONNX model in a filesystem + +The examples below use the standard dense and pruned-quantized YOLOv5s checkpoints, identified by the following SparseZoo stubs: +```bash +zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/base-none +zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned65_quant-none +``` + +### Deploy a Model + +DeepSparse offers convenient APIs for integrating your model into an application. + +To try the deployment examples below, pull down a sample image and save it as `basilica.jpg` with the following: +```bash +wget -O basilica.jpg https://raw.githubusercontent.com/neuralmagic/deepsparse/main/src/deepsparse/yolo/sample_images/basilica.jpg +``` + +#### Python API + +`Pipelines` wrap pre-processing and output post-processing around the runtime, providing a clean interface for adding DeepSparse to an application. +The DeepSparse-Ultralytics integration includes an out-of-the-box `Pipeline` that accepts raw images and outputs the bounding boxes. + +Create a `Pipeline` and run inference: + +```python +from deepsparse import Pipeline + +# list of images in local filesystem +images = ["basilica.jpg"] + +# create Pipeline +model_stub = "zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned65_quant-none" +yolo_pipeline = Pipeline.create( + task="yolo", + model_path=model_stub, +) + +# run inference on images, receive bounding boxes + classes +pipeline_outputs = yolo_pipeline(images=images, iou_thres=0.6, conf_thres=0.001) +print(pipeline_outputs) +``` + +If you are running in the cloud, you may get an error that open-cv cannot find `libGL.so.1`. Running the following on Ubuntu installs it: + +``` +apt-get install libgl1-mesa-glx +``` + +#### HTTP Server + +DeepSparse Server runs on top of the popular FastAPI web framework and Uvicorn web server. With just a single CLI command, you can easily setup a model +service endpoint with DeepSparse. The Server supports any Pipeline from DeepSparse, including object detection with YOLOv5, enabling you to send raw +images to the endpoint and receive the bounding boxes. + +Spin up the Server with the pruned-quantized YOLOv5s: + +```bash +deepsparse.server \ + --task yolo \ + --model_path zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned65_quant-none +``` + +An example request, using Python's `requests` package: +```python +import requests, json + +# list of images for inference (local files on client side) +path = ['basilica.jpg'] +files = [('request', open(img, 'rb')) for img in path] + +# send request over HTTP to /predict/from_files endpoint +url = 'http://0.0.0.0:5543/predict/from_files' +resp = requests.post(url=url, files=files) + +# response is returned in JSON +annotations = json.loads(resp.text) # dictionary of annotation results +bounding_boxes = annotations["boxes"] +labels = annotations["labels"] +``` + +#### Annotate CLI +You can also use the annotate command to have the engine save an annotated photo on disk. Try --source 0 to annotate your live webcam feed! +```bash +deepsparse.object_detection.annotate --model_filepath zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned65_quant-none --source basilica.jpg +``` + +Running the above command will create an `annotation-results` folder and save the annotated image inside. + +

+annotated +

+ +## Benchmarking Performance + +We will compare DeepSparse's throughput to ONNX Runtime's throughput on YOLOv5s, using DeepSparse's benchmarking script. + +The benchmarks were run on an AWS `c6i.8xlarge` instance (16 cores). + +### Batch 32 Performance Comparison + +#### ONNX Runtime Baseline + +At batch 32, ONNX Runtime achieves 42 images/sec with the standard dense YOLOv5s: + +```bash +deepsparse.benchmark zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/base-none -s sync -b 32 -nstreams 1 -e onnxruntime + +> Original Model Path: zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/base-none +> Batch Size: 32 +> Scenario: sync +> Throughput (items/sec): 41.9025 +``` + +#### DeepSparse Dense Performance + +While DeepSparse offers its best performance with optimized sparse models, it also performs well with the standard dense YOLOv5s. + +At batch 32, DeepSparse achieves 70 images/sec with the standard dense YOLOv5s, a **1.7x performance improvement over ORT**! + +```bash +deepsparse.benchmark zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/base-none -s sync -b 32 -nstreams 1 + +> Original Model Path: zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/base-none +> Batch Size: 32 +> Scenario: sync +> Throughput (items/sec): 69.5546 +``` +#### DeepSparse Sparse Performance + +When sparsity is applied to the model, DeepSparse's performance gains over ONNX Runtime is even stronger. + +At batch 32, DeepSparse achieves 241 images/sec with the pruned-quantized YOLOv5s, a **5.8x performance improvement over ORT**! + +```bash +deepsparse.benchmark zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned65_quant-none -s sync -b 32 -nstreams 1 + +> Original Model Path: zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned65_quant-none +> Batch Size: 32 +> Scenario: sync +> Throughput (items/sec): 241.2452 +``` + +### Batch 1 Performance Comparison + +DeepSparse is also able to gain a speed-up over ONNX Runtime for the latency-sensitive, batch 1 scenario. + +#### ONNX Runtime Baseline +At batch 1, ONNX Runtime achieves 48 images/sec with the standard, dense YOLOv5s. + +```bash +deepsparse.benchmark zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/base-none -s sync -b 1 -nstreams 1 -e onnxruntime + +> Original Model Path: zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/base-none +> Batch Size: 1 +> Scenario: sync +> Throughput (items/sec): 48.0921 +``` + +#### DeepSparse Sparse Performance + +At batch 1, DeepSparse achieves 135 items/sec with a pruned-quantized YOLOv5s, **a 2.8x performance gain over ONNX Runtime!** + +```bash +deepsparse.benchmark zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned65_quant-none -s sync -b 1 -nstreams 1 + +> Original Model Path: zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned65_quant-none +> Batch Size: 1 +> Scenario: sync +> Throughput (items/sec): 134.9468 +``` + +Since `c6i.8xlarge` instances have VNNI instructions, DeepSparse's throughput can be pushed further if weights are pruned in blocks of 4. + +At batch 1, DeepSparse achieves 180 items/sec with a 4-block pruned-quantized YOLOv5s, a **3.7x performance gain over ONNX Runtime!** + +```bash +deepsparse.benchmark zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned35_quant-none-vnni -s sync -b 1 -nstreams 1 + +> Original Model Path: zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned35_quant-none-vnni +> Batch Size: 1 +> Scenario: sync +> Throughput (items/sec): 179.7375 +``` + +## Get Started With DeepSparse + +**Research or Testing?** DeepSparse Community is free for research and testing. Get started with our [Documentation](https://docs.neuralmagic.com/). + +**Want to Try DeepSparse Enterprise?** [Start your 90 day free trial](https://neuralmagic.com/deepsparse-free-trial/?utm_campaign=free_trial&utm_source=ultralytics_github). diff --git a/docs/yolov5/pruning_sparsity.md b/docs/yolov5/pruning_sparsity.md new file mode 100644 index 0000000..42d225d --- /dev/null +++ b/docs/yolov5/pruning_sparsity.md @@ -0,0 +1,103 @@ +📚 This guide explains how to apply **pruning** to YOLOv5 🚀 models. +UPDATED 25 September 2022. + +## Before You Start + +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a [**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +## Test Normally + +Before pruning we want to establish a baseline performance to compare to. This command tests YOLOv5x on COCO val2017 at image size 640 pixels. `yolov5x.pt` is the largest and most accurate model available. Other options are `yolov5s.pt`, `yolov5m.pt` and `yolov5l.pt`, or you own checkpoint from training a custom dataset `./weights/best.pt`. For details on all available models please see our README [table](https://github.com/ultralytics/yolov5#pretrained-checkpoints). +```bash +python val.py --weights yolov5x.pt --data coco.yaml --img 640 --half +``` + +Output: +```shell +val: data=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False +YOLOv5 🚀 v6.0-224-g4c40933 torch 1.10.0+cu111 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB) + +Fusing layers... +Model Summary: 444 layers, 86705005 parameters, 0 gradients +val: Scanning '/content/datasets/coco/val2017.cache' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00 + +30% pruned output: +```bash +val: data=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False +YOLOv5 🚀 v6.0-224-g4c40933 torch 1.10.0+cu111 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB) + +Fusing layers... +Model Summary: 444 layers, 86705005 parameters, 0 gradients +Pruning model... 0.3 global sparsity +val: Scanning '/content/datasets/coco/val2017.cache' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00Run on Gradient Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + +## Status + +YOLOv5 CI + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file diff --git a/docs/yolov5/pytorch_hub.md b/docs/yolov5/pytorch_hub.md new file mode 100644 index 0000000..b752eec --- /dev/null +++ b/docs/yolov5/pytorch_hub.md @@ -0,0 +1,290 @@ +📚 This guide explains how to load YOLOv5 🚀 from PyTorch Hub at [https://pytorch.org/hub/ultralytics_yolov5](https://pytorch.org/hub/ultralytics_yolov5). +UPDATED 26 March 2023. + +## Before You Start + +Install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a [**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). + +```bash +pip install -r https://raw.githubusercontent.com/ultralytics/yolov5/master/requirements.txt +``` + +💡 ProTip: Cloning [https://github.com/ultralytics/yolov5](https://github.com/ultralytics/yolov5) is **not** required 😃 + +## Load YOLOv5 with PyTorch Hub + +### Simple Example + +This example loads a pretrained YOLOv5s model from PyTorch Hub as `model` and passes an image for inference. `'yolov5s'` is the lightest and fastest YOLOv5 model. For details on all available models please see the [README](https://github.com/ultralytics/yolov5#pretrained-checkpoints). +```python +import torch + +# Model +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') + +# Image +im = 'https://ultralytics.com/images/zidane.jpg' + +# Inference +results = model(im) + +results.pandas().xyxy[0] +# xmin ymin xmax ymax confidence class name +# 0 749.50 43.50 1148.0 704.5 0.874023 0 person +# 1 433.50 433.50 517.5 714.5 0.687988 27 tie +# 2 114.75 195.75 1095.0 708.0 0.624512 0 person +# 3 986.00 304.00 1028.0 420.0 0.286865 27 tie +``` + + +### Detailed Example + +This example shows **batched inference** with **PIL** and **OpenCV** image sources. `results` can be **printed** to console, **saved** to `runs/hub`, **showed** to screen on supported environments, and returned as **tensors** or **pandas** dataframes. +```python +import cv2 +import torch +from PIL import Image + +# Model +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') + +# Images +for f in 'zidane.jpg', 'bus.jpg': + torch.hub.download_url_to_file('https://ultralytics.com/images/' + f, f) # download 2 images +im1 = Image.open('zidane.jpg') # PIL image +im2 = cv2.imread('bus.jpg')[..., ::-1] # OpenCV image (BGR to RGB) + +# Inference +results = model([im1, im2], size=640) # batch of images + +# Results +results.print() +results.save() # or .show() + +results.xyxy[0] # im1 predictions (tensor) +results.pandas().xyxy[0] # im1 predictions (pandas) +# xmin ymin xmax ymax confidence class name +# 0 749.50 43.50 1148.0 704.5 0.874023 0 person +# 1 433.50 433.50 517.5 714.5 0.687988 27 tie +# 2 114.75 195.75 1095.0 708.0 0.624512 0 person +# 3 986.00 304.00 1028.0 420.0 0.286865 27 tie +``` + + +For all inference options see YOLOv5 `AutoShape()` forward [method](https://github.com/ultralytics/yolov5/blob/30e4c4f09297b67afedf8b2bcd851833ddc9dead/models/common.py#L243-L252). + +### Inference Settings +YOLOv5 models contain various inference attributes such as **confidence threshold**, **IoU threshold**, etc. which can be set by: +```python +model.conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + agnostic = False # NMS class-agnostic + multi_label = False # NMS multiple labels per box + classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs + max_det = 1000 # maximum number of detections per image + amp = False # Automatic Mixed Precision (AMP) inference + +results = model(im, size=320) # custom inference size +``` + + +### Device +Models can be transferred to any device after creation: +```python +model.cpu() # CPU +model.cuda() # GPU +model.to(device) # i.e. device=torch.device(0) +``` + +Models can also be created directly on any `device`: +```python +model = torch.hub.load('ultralytics/yolov5', 'yolov5s', device='cpu') # load on CPU +``` + +💡 ProTip: Input images are automatically transferred to the correct model device before inference. + +### Silence Outputs +Models can be loaded silently with `_verbose=False`: +```python +model = torch.hub.load('ultralytics/yolov5', 'yolov5s', _verbose=False) # load silently +``` + +### Input Channels +To load a pretrained YOLOv5s model with 4 input channels rather than the default 3: +```python +model = torch.hub.load('ultralytics/yolov5', 'yolov5s', channels=4) +``` +In this case the model will be composed of pretrained weights **except for** the very first input layer, which is no longer the same shape as the pretrained input layer. The input layer will remain initialized by random weights. + +### Number of Classes +To load a pretrained YOLOv5s model with 10 output classes rather than the default 80: +```python +model = torch.hub.load('ultralytics/yolov5', 'yolov5s', classes=10) +``` +In this case the model will be composed of pretrained weights **except for** the output layers, which are no longer the same shape as the pretrained output layers. The output layers will remain initialized by random weights. + +### Force Reload +If you run into problems with the above steps, setting `force_reload=True` may help by discarding the existing cache and force a fresh download of the latest YOLOv5 version from PyTorch Hub. +```python +model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True) # force reload +``` + +### Screenshot Inference +To run inference on your desktop screen: +```python +import torch +from PIL import ImageGrab + +# Model +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') + +# Image +im = ImageGrab.grab() # take a screenshot + +# Inference +results = model(im) +``` + +### Multi-GPU Inference + +YOLOv5 models can be loaded to multiple GPUs in parallel with threaded inference: + +```python +import torch +import threading + +def run(model, im): + results = model(im) + results.save() + +# Models +model0 = torch.hub.load('ultralytics/yolov5', 'yolov5s', device=0) +model1 = torch.hub.load('ultralytics/yolov5', 'yolov5s', device=1) + +# Inference +threading.Thread(target=run, args=[model0, 'https://ultralytics.com/images/zidane.jpg'], daemon=True).start() +threading.Thread(target=run, args=[model1, 'https://ultralytics.com/images/bus.jpg'], daemon=True).start() +``` + +### Training +To load a YOLOv5 model for training rather than inference, set `autoshape=False`. To load a model with randomly initialized weights (to train from scratch) use `pretrained=False`. You must provide your own training script in this case. Alternatively see our YOLOv5 [Train Custom Data Tutorial](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) for model training. +```python +model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) # load pretrained +model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False, pretrained=False) # load scratch +``` + +### Base64 Results +For use with API services. See https://github.com/ultralytics/yolov5/pull/2291 and [Flask REST API](https://github.com/ultralytics/yolov5/tree/master/utils/flask_rest_api) example for details. +```python +results = model(im) # inference + +results.ims # array of original images (as np array) passed to model for inference +results.render() # updates results.ims with boxes and labels +for im in results.ims: + buffered = BytesIO() + im_base64 = Image.fromarray(im) + im_base64.save(buffered, format="JPEG") + print(base64.b64encode(buffered.getvalue()).decode('utf-8')) # base64 encoded image with results +``` + +### Cropped Results +Results can be returned and saved as detection crops: +```python +results = model(im) # inference +crops = results.crop(save=True) # cropped detections dictionary +``` + +### Pandas Results +Results can be returned as [Pandas DataFrames](https://pandas.pydata.org/): +```python +results = model(im) # inference +results.pandas().xyxy[0] # Pandas DataFrame +``` +
+ Pandas Output (click to expand) + +```python +print(results.pandas().xyxy[0]) +# xmin ymin xmax ymax confidence class name +# 0 749.50 43.50 1148.0 704.5 0.874023 0 person +# 1 433.50 433.50 517.5 714.5 0.687988 27 tie +# 2 114.75 195.75 1095.0 708.0 0.624512 0 person +# 3 986.00 304.00 1028.0 420.0 0.286865 27 tie +``` +
+ +### Sorted Results +Results can be sorted by column, i.e. to sort license plate digit detection left-to-right (x-axis): +```python +results = model(im) # inference +results.pandas().xyxy[0].sort_values('xmin') # sorted left-right +``` + +### Box-Cropped Results +Results can be returned and saved as detection crops: +```python +results = model(im) # inference +crops = results.crop(save=True) # cropped detections dictionary +``` + +### JSON Results +Results can be returned in JSON format once converted to `.pandas()` dataframes using the `.to_json()` method. The JSON format can be modified using the `orient` argument. See pandas `.to_json()` [documentation](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html) for details. +```python +results = model(ims) # inference +results.pandas().xyxy[0].to_json(orient="records") # JSON img1 predictions +``` + +
+ JSON Output (click to expand) + +```json +[ +{"xmin":749.5,"ymin":43.5,"xmax":1148.0,"ymax":704.5,"confidence":0.8740234375,"class":0,"name":"person"}, +{"xmin":433.5,"ymin":433.5,"xmax":517.5,"ymax":714.5,"confidence":0.6879882812,"class":27,"name":"tie"}, +{"xmin":115.25,"ymin":195.75,"xmax":1096.0,"ymax":708.0,"confidence":0.6254882812,"class":0,"name":"person"}, +{"xmin":986.0,"ymin":304.0,"xmax":1028.0,"ymax":420.0,"confidence":0.2873535156,"class":27,"name":"tie"} +] +``` + +
+ +## Custom Models +This example loads a custom 20-class [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml)-trained YOLOv5s model `'best.pt'` with PyTorch Hub. +```python +model = torch.hub.load('ultralytics/yolov5', 'custom', path='path/to/best.pt') # local model +model = torch.hub.load('path/to/yolov5', 'custom', path='path/to/best.pt', source='local') # local repo +``` + +## TensorRT, ONNX and OpenVINO Models + +PyTorch Hub supports inference on most YOLOv5 export formats, including custom trained models. See [TFLite, ONNX, CoreML, TensorRT Export tutorial](https://github.com/ultralytics/yolov5/issues/251) for details on exporting models. + +💡 ProTip: **TensorRT** may be up to 2-5X faster than PyTorch on [**GPU benchmarks**](https://github.com/ultralytics/yolov5/pull/6963) +💡 ProTip: **ONNX** and **OpenVINO** may be up to 2-3X faster than PyTorch on [**CPU benchmarks**](https://github.com/ultralytics/yolov5/pull/6613) + +```python +model = torch.hub.load('ultralytics/yolov5', 'custom', path='yolov5s.pt') # PyTorch + 'yolov5s.torchscript') # TorchScript + 'yolov5s.onnx') # ONNX + 'yolov5s_openvino_model/') # OpenVINO + 'yolov5s.engine') # TensorRT + 'yolov5s.mlmodel') # CoreML (macOS-only) + 'yolov5s.tflite') # TFLite + 'yolov5s_paddle_model/') # PaddlePaddle +``` + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + +## Status + +YOLOv5 CI + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file diff --git a/docs/yolov5/roboflow.md b/docs/yolov5/roboflow.md new file mode 100644 index 0000000..d73963c --- /dev/null +++ b/docs/yolov5/roboflow.md @@ -0,0 +1,37 @@ +# Roboflow Datasets + +You can now use Roboflow to organize, label, prepare, version, and host your datasets for training YOLOv5 🚀 models. Roboflow is free to use with YOLOv5 if you make your workspace public. +UPDATED 30 September 2021. + +## Upload +You can upload your data to Roboflow via [web UI](https://docs.roboflow.com/adding-data), [rest API](https://docs.roboflow.com/adding-data/upload-api), or [python](https://docs.roboflow.com/python). + +## Labeling +After uploading data to Roboflow, you can label your data and review previous labels. + +[![Roboflow Annotate](https://roboflow-darknet.s3.us-east-2.amazonaws.com/roboflow-annotate.gif)](https://roboflow.com/annotate) + +## Versioning +You can make versions of your dataset with different preprocessing and offline augmentation options. YOLOv5 does online augmentations natively, so be intentional when layering Roboflow's offline augs on top. + +![Roboflow Preprocessing](https://roboflow-darknet.s3.us-east-2.amazonaws.com/robolfow-preprocessing.png) + +## Exporting Data +You can download your data in YOLOv5 format to quickly begin training. + +``` +from roboflow import Roboflow +rf = Roboflow(api_key="YOUR API KEY HERE") +project = rf.workspace().project("YOUR PROJECT") +dataset = project.version("YOUR VERSION").download("yolov5") +``` + +## Custom Training +We have released a custom training tutorial demonstrating all of the above capabilities. You can access the code here: + +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb) + +## Active Learning +The real world is messy and your model will invariably encounter situations your dataset didn't anticipate. Using [active learning](https://blog.roboflow.com/what-is-active-learning/) is an important strategy to iteratively improve your dataset and model. With the Roboflow and YOLOv5 integration, you can quickly make improvements on your model deployments by using a battle tested machine learning pipeline. + +

diff --git a/docs/yolov5/tips_for_best_training_results.md b/docs/yolov5/tips_for_best_training_results.md new file mode 100644 index 0000000..9598143 --- /dev/null +++ b/docs/yolov5/tips_for_best_training_results.md @@ -0,0 +1,59 @@ +📚 This guide explains how to produce the best mAP and training results with YOLOv5 🚀. +UPDATED 25 May 2022. + +Most of the time good results can be obtained with no changes to the models or training settings, **provided your dataset is sufficiently large and well labelled**. If at first you don't get good results, there are steps you might be able to take to improve, but we always recommend users **first train with all default settings** before considering any changes. This helps establish a performance baseline and spot areas for improvement. + +If you have questions about your training results **we recommend you provide the maximum amount of information possible** if you expect a helpful response, including results plots (train losses, val losses, P, R, mAP), PR curve, confusion matrix, training mosaics, test results and dataset statistics images such as labels.png. All of these are located in your `project/name` directory, typically `yolov5/runs/train/exp`. + +We've put together a full guide for users looking to get the best results on their YOLOv5 trainings below. + +## Dataset + +- **Images per class.** ≥ 1500 images per class recommended +- **Instances per class.** ≥ 10000 instances (labeled objects) per class recommended +- **Image variety.** Must be representative of deployed environment. For real-world use cases we recommend images from different times of day, different seasons, different weather, different lighting, different angles, different sources (scraped online, collected locally, different cameras) etc. +- **Label consistency.** All instances of all classes in all images must be labelled. Partial labelling will not work. +- **Label accuracy.** Labels must closely enclose each object. No space should exist between an object and it's bounding box. No objects should be missing a label. +- **Label verification.** View `train_batch*.jpg` on train start to verify your labels appear correct, i.e. see [example](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#local-logging) mosaic. +- **Background images.** Background images are images with no objects that are added to a dataset to reduce False Positives (FP). We recommend about 0-10% background images to help reduce FPs (COCO has 1000 background images for reference, 1% of the total). No labels are required for background images. + +COCO Analysis + + +## Model Selection + +Larger models like YOLOv5x and [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/tag/v5.0) will produce better results in nearly all cases, but have more parameters, require more CUDA memory to train, and are slower to run. For **mobile** deployments we recommend YOLOv5s/m, for **cloud** deployments we recommend YOLOv5l/x. See our README [table](https://github.com/ultralytics/yolov5#pretrained-checkpoints) for a full comparison of all models. + +

YOLOv5 Models

+ +- **Start from Pretrained weights.** Recommended for small to medium-sized datasets (i.e. [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml)). Pass the name of the model to the `--weights` argument. Models download automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). +```shell +python train.py --data custom.yaml --weights yolov5s.pt + yolov5m.pt + yolov5l.pt + yolov5x.pt + custom_pretrained.pt +``` +- **Start from Scratch.** Recommended for large datasets (i.e. [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [OIv6](https://storage.googleapis.com/openimages/web/index.html)). Pass the model architecture yaml you are interested in, along with an empty `--weights ''` argument: +```bash +python train.py --data custom.yaml --weights '' --cfg yolov5s.yaml + yolov5m.yaml + yolov5l.yaml + yolov5x.yaml +``` + + +## Training Settings + +Before modifying anything, **first train with default settings to establish a performance baseline**. A full list of train.py settings can be found in the [train.py](https://github.com/ultralytics/yolov5/blob/master/train.py) argparser. + +- **Epochs.** Start with 300 epochs. If this overfits early then you can reduce epochs. If overfitting does not occur after 300 epochs, train longer, i.e. 600, 1200 etc epochs. +- **Image size.** COCO trains at native resolution of `--img 640`, though due to the high amount of small objects in the dataset it can benefit from training at higher resolutions such as `--img 1280`. If there are many small objects then custom datasets will benefit from training at native or higher resolution. Best inference results are obtained at the same `--img` as the training was run at, i.e. if you train at `--img 1280` you should also test and detect at `--img 1280`. +- **Batch size.** Use the largest `--batch-size` that your hardware allows for. Small batch sizes produce poor batchnorm statistics and should be avoided. +- **Hyperparameters.** Default hyperparameters are in [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml). We recommend you train with default hyperparameters first before thinking of modifying any. In general, increasing augmentation hyperparameters will reduce and delay overfitting, allowing for longer trainings and higher final mAP. Reduction in loss component gain hyperparameters like `hyp['obj']` will help reduce overfitting in those specific loss components. For an automated method of optimizing these hyperparameters, see our [Hyperparameter Evolution Tutorial](https://github.com/ultralytics/yolov5/issues/607). + +## Further Reading + +If you'd like to know more, a good place to start is Karpathy's 'Recipe for Training Neural Networks', which has great ideas for training that apply broadly across all ML domains: [http://karpathy.github.io/2019/04/25/recipe/](http://karpathy.github.io/2019/04/25/recipe/) + +Good luck 🍀 and let us know if you have any other questions! \ No newline at end of file diff --git a/docs/yolov5/train_custom_data.md b/docs/yolov5/train_custom_data.md new file mode 100644 index 0000000..e249861 --- /dev/null +++ b/docs/yolov5/train_custom_data.md @@ -0,0 +1,229 @@ +📚 This guide explains how to train your own **custom dataset** with [YOLOv5](https://github.com/ultralytics/yolov5) 🚀. +UPDATED 26 March 2023. + +## Before You Start + +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a [**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +## Train On Custom Data + + + +
+
+ +Creating a custom model to detect your objects is an iterative process of collecting and organizing images, labeling your objects of interest, training a model, deploying it into the wild to make predictions, and then using that deployed model to collect examples of edge cases to repeat and improve. + +### 1. Create Dataset + +YOLOv5 models must be trained on labelled data in order to learn classes of objects in that data. There are two options for creating your dataset before you start training: + +
+Use Roboflow to manage your dataset in YOLO format + +### 1.1 Collect Images + +Your model will learn by example. Training on images similar to the ones it will see in the wild is of the utmost importance. Ideally, you will collect a wide variety of images from the same configuration (camera, angle, lighting, etc.) as you will ultimately deploy your project. + +If this is not possible, you can start from [a public dataset](https://universe.roboflow.com/?ref=ultralytics) to train your initial model and then [sample images from the wild during inference](https://blog.roboflow.com/computer-vision-active-learning-tips/?ref=ultralytics) to improve your dataset and model iteratively. + +### 1.2 Create Labels + +Once you have collected images, you will need to annotate the objects of interest to create a ground truth for your model to learn from. + +

+ +[Roboflow Annotate](https://roboflow.com/annotate?ref=ultralytics) is a simple +web-based tool for managing and labeling your images with your team and exporting +them in [YOLOv5's annotation format](https://roboflow.com/formats/yolov5-pytorch-txt?ref=ultralytics). + +### 1.3 Prepare Dataset for YOLOv5 + +Whether you [label your images with Roboflow](https://roboflow.com/annotate?ref=ultralytics) or not, you can use it to convert your dataset into YOLO format, create a YOLOv5 YAML configuration file, and host it for importing into your training script. + +[Create a free Roboflow account](https://app.roboflow.com/?model=yolov5&ref=ultralytics) +and upload your dataset to a `Public` workspace, label any unannotated images, +then generate and export a version of your dataset in `YOLOv5 Pytorch` format. + +Note: YOLOv5 does online augmentation during training, so we do not recommend +applying any augmentation steps in Roboflow for training with YOLOv5. But we +recommend applying the following preprocessing steps: + +

+ +* **Auto-Orient** - to strip EXIF orientation from your images. +* **Resize (Stretch)** - to the square input size of your model (640x640 is the YOLOv5 default). + +Generating a version will give you a point in time snapshot of your dataset so +you can always go back and compare your future model training runs against it, +even if you add more images or change its configuration later. + +

+ +Export in `YOLOv5 Pytorch` format, then copy the snippet into your training +script or notebook to download your dataset. + +

+ +Now continue with `2. Select a Model`. +
+ +
+Or manually prepare your dataset + +### 1.1 Create dataset.yaml + +[COCO128](https://www.kaggle.com/ultralytics/coco128) is an example small tutorial dataset composed of the first 128 images in [COCO](http://cocodataset.org/#home) train2017. These same 128 images are used for both training and validation to verify our training pipeline is capable of overfitting. [data/coco128.yaml](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), shown below, is the dataset config file that defines 1) the dataset root directory `path` and relative paths to `train` / `val` / `test` image directories (or *.txt files with image paths) and 2) a class `names` dictionary: +```yaml +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128 # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) + +# Classes (80 COCO classes) +names: + 0: person + 1: bicycle + 2: car + ... + 77: teddy bear + 78: hair drier + 79: toothbrush +``` + + +### 1.2 Create Labels + +After using an annotation tool to label your images, export your labels to **YOLO format**, with one `*.txt` file per image (if no objects in image, no `*.txt` file is required). The `*.txt` file specifications are: + +- One row per object +- Each row is `class x_center y_center width height` format. +- Box coordinates must be in **normalized xywh** format (from 0 - 1). If your boxes are in pixels, divide `x_center` and `width` by image width, and `y_center` and `height` by image height. +- Class numbers are zero-indexed (start from 0). + +

+ +The label file corresponding to the above image contains 2 persons (class `0`) and a tie (class `27`): + +

+ + +### 1.3 Organize Directories + +Organize your train and val images and labels according to the example below. YOLOv5 assumes `/coco128` is inside a `/datasets` directory **next to** the `/yolov5` directory. **YOLOv5 locates labels automatically for each image** by replacing the last instance of `/images/` in each image path with `/labels/`. For example: +```bash +../datasets/coco128/images/im0.jpg # image +../datasets/coco128/labels/im0.txt # label +``` + +

+
+ + +### 2. Select a Model + +Select a pretrained model to start training from. Here we select [YOLOv5s](https://github.com/ultralytics/yolov5/blob/master/models/yolov5s.yaml), the second-smallest and fastest model available. See our README [table](https://github.com/ultralytics/yolov5#pretrained-checkpoints) for a full comparison of all models. + +

YOLOv5 Models

+ +### 3. Train + +Train a YOLOv5s model on COCO128 by specifying dataset, batch-size, image size and either pretrained `--weights yolov5s.pt` (recommended), or randomly initialized `--weights '' --cfg yolov5s.yaml` (not recommended). Pretrained weights are auto-downloaded from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). + +```bash +python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt +``` +!!! tip "Tip" + + 💡 Add `--cache ram` or `--cache disk` to speed up training (requires significant RAM/disk resources). + +!!! tip "Tip" + + 💡 Always train from a local dataset. Mounted or network drives like Google Drive will be very slow. + +All training results are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc. For more details see the Training section of our tutorial notebook. Open In Colab Open In Kaggle + +### 4. Visualize + +#### Comet Logging and Visualization 🌟 NEW + +[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! + +Getting started is easy: +```shell +pip install comet_ml # 1. install +export COMET_API_KEY= # 2. paste API key +python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train +``` + +To learn more about all the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook: +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing) + +yolo-ui + +#### ClearML Logging and Automation 🌟 NEW + +[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML: + +- `pip install clearml` +- run `clearml-init` to connect to a ClearML server (**deploy your own open-source server [here](https://github.com/allegroai/clearml-server)**, or use our free hosted server [here](https://cutt.ly/yolov5-notebook-clearml)) + +You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers). + +You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details! + + +ClearML Experiment Management UI + + +#### Local Logging + +Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. + +This directory contains train and val statistics, mosaics, labels, predictions and augmented mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. + +Local logging results + +Results file `results.csv` is updated after each epoch, and then plotted as `results.png` (below) after training completes. You can also plot any `results.csv` file manually: + +```python +from utils.plots import plot_results +plot_results('path/to/results.csv') # plot 'results.csv' as 'results.png' +``` + +

results.png

+ + + +## Next Steps + +Once your model is trained you can use your best checkpoint `best.pt` to: +* Run [CLI](https://github.com/ultralytics/yolov5#quick-start-examples) or [Python](https://github.com/ultralytics/yolov5/issues/36) inference on new images and videos +* [Validate](https://github.com/ultralytics/yolov5/blob/master/val.py) accuracy on train, val and test splits +* [Export](https://github.com/ultralytics/yolov5/issues/251) to TensorFlow, Keras, ONNX, TFlite, TF.js, CoreML and TensorRT formats +* [Evolve](https://github.com/ultralytics/yolov5/issues/607) hyperparameters to improve performance +* [Improve](https://docs.roboflow.com/adding-data/upload-api?ref=ultralytics) your model by sampling real-world images and adding them to your dataset + + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + +## Status + +YOLOv5 CI + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/docs/yolov5/transfer_learn_frozen.md b/docs/yolov5/transfer_learn_frozen.md new file mode 100644 index 0000000..5e1e6d0 --- /dev/null +++ b/docs/yolov5/transfer_learn_frozen.md @@ -0,0 +1,146 @@ +📚 This guide explains how to **freeze** YOLOv5 🚀 layers when **transfer learning**. Transfer learning is a useful way to quickly retrain a model on new data without having to retrain the entire network. Instead, part of the initial weights are frozen in place, and the rest of the weights are used to compute loss and are updated by the optimizer. This requires less resources than normal training and allows for faster training times, though it may also result in reductions to final trained accuracy. +UPDATED 25 September 2022. + + +## Before You Start + +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a [**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +## Freeze Backbone + +All layers that match the train.py `freeze` list in train.py will be frozen by setting their gradients to zero before training starts. +```python + # Freeze + freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze + for k, v in model.named_parameters(): + v.requires_grad = True # train all layers + if any(x in k for x in freeze): + print(f'freezing {k}') + v.requires_grad = False +``` + +To see a list of module names: +```python +for k, v in model.named_parameters(): + print(k) + +# Output +model.0.conv.conv.weight +model.0.conv.bn.weight +model.0.conv.bn.bias +model.1.conv.weight +model.1.bn.weight +model.1.bn.bias +model.2.cv1.conv.weight +model.2.cv1.bn.weight +... +model.23.m.0.cv2.bn.weight +model.23.m.0.cv2.bn.bias +model.24.m.0.weight +model.24.m.0.bias +model.24.m.1.weight +model.24.m.1.bias +model.24.m.2.weight +model.24.m.2.bias +``` + +Looking at the model architecture we can see that the model backbone is layers 0-9: +```yaml +# YOLOv5 backbone + backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, BottleneckCSP, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, BottleneckCSP, [1024, False]], # 9 + ] + + # YOLOv5 head + head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, BottleneckCSP, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] +``` + +so we can define the freeze list to contain all modules with 'model.0.' - 'model.9.' in their names: +```bash +python train.py --freeze 10 +``` + +## Freeze All Layers + +To freeze the full model except for the final output convolution layers in Detect(), we set freeze list to contain all modules with 'model.0.' - 'model.23.' in their names: +```bash +python train.py --freeze 24 +``` + +## Results + +We train YOLOv5m on VOC on both of the above scenarios, along with a default model (no freezing), starting from the official COCO pretrained `--weights yolov5m.pt`: +```python +train.py --batch 48 --weights yolov5m.pt --data voc.yaml --epochs 50 --cache --img 512 --hyp hyp.finetune.yaml +``` + +### Accuracy Comparison + +The results show that freezing speeds up training, but reduces final accuracy slightly. + +![](https://user-images.githubusercontent.com/26833433/98394454-11579f80-205b-11eb-8e57-d8318e1cc2f8.png) + +![](https://user-images.githubusercontent.com/26833433/98394459-13216300-205b-11eb-871b-49e20691a423.png) + +Screenshot 2020-11-06 at 18 08 13 + +### GPU Utilization Comparison + +Interestingly, the more modules are frozen the less GPU memory is required to train, and the lower GPU utilization. This indicates that larger models, or models trained at larger --image-size may benefit from freezing in order to train faster. + +![](https://user-images.githubusercontent.com/26833433/98394920-c2f6d080-205b-11eb-9611-fd68522b4e0e.png) + +![](https://user-images.githubusercontent.com/26833433/98394918-bf634980-205b-11eb-948d-311036ef9325.png) + + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + +## Status + +YOLOv5 CI + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file diff --git a/docs/yolov5/tta.md b/docs/yolov5/tta.md new file mode 100644 index 0000000..0d39ce3 --- /dev/null +++ b/docs/yolov5/tta.md @@ -0,0 +1,154 @@ +# Test-Time Augmentation (TTA) + +📚 This guide explains how to use Test Time Augmentation (TTA) during testing and inference for improved mAP and Recall with YOLOv5 🚀. +UPDATED 25 September 2022. + +## Before You Start + +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a [**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +## Test Normally + +Before trying TTA we want to establish a baseline performance to compare to. This command tests YOLOv5x on COCO val2017 at image size 640 pixels. `yolov5x.pt` is the largest and most accurate model available. Other options are `yolov5s.pt`, `yolov5m.pt` and `yolov5l.pt`, or you own checkpoint from training a custom dataset `./weights/best.pt`. For details on all available models please see our README [table](https://github.com/ultralytics/yolov5#pretrained-checkpoints). +```bash +python val.py --weights yolov5x.pt --data coco.yaml --img 640 --half +``` + +Output: +```shell +val: data=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True +YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) + +Fusing layers... +Model Summary: 476 layers, 87730285 parameters, 0 gradients + +val: Scanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2846.03it/s] +val: New cache created: ../datasets/coco/val2017.cache + Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [02:30<00:00, 1.05it/s] + all 5000 36335 0.746 0.626 0.68 0.49 +Speed: 0.1ms pre-process, 22.4ms inference, 1.4ms NMS per image at shape (32, 3, 640, 640) # <--- baseline speed + +Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json... +... + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504 # <--- baseline mAP + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.628 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.681 # <--- baseline mAR + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.826 +``` + +## Test with TTA +Append `--augment` to any existing `val.py` command to enable TTA, and increase the image size by about 30% for improved results. Note that inference with TTA enabled will typically take about 2-3X the time of normal inference as the images are being left-right flipped and processed at 3 different resolutions, with the outputs merged before NMS. Part of the speed decrease is simply due to larger image sizes (832 vs 640), while part is due to the actual TTA operations. +```bash +python val.py --weights yolov5x.pt --data coco.yaml --img 832 --augment --half +``` + +Output: +```shell +val: data=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=832, conf_thres=0.001, iou_thres=0.6, task=val, device=, single_cls=False, augment=True, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True +YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) + +Fusing layers... +/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1156.) + return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode) +Model Summary: 476 layers, 87730285 parameters, 0 gradients +val: Scanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2885.61it/s] +val: New cache created: ../datasets/coco/val2017.cache + Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [07:29<00:00, 2.86s/it] + all 5000 36335 0.718 0.656 0.695 0.503 +Speed: 0.2ms pre-process, 80.6ms inference, 2.7ms NMS per image at shape (32, 3, 832, 832) # <--- TTA speed + +Evaluating pycocotools mAP... saving runs/val/exp2/yolov5x_predictions.json... +... + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.516 # <--- TTA mAP + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.701 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.562 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.361 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.564 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.656 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.388 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.640 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.696 # <--- TTA mAR + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.553 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.744 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.833 +``` + +## Inference with TTA + +`detect.py` TTA inference operates identically to `val.py` TTA: simply append `--augment` to any existing `detect.py` command: +```bash +python detect.py --weights yolov5s.pt --img 832 --source data/images --augment +``` + +Output: +```bash +detect: weights=['yolov5s.pt'], source=data/images, imgsz=832, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=True, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False +YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) + +Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt to yolov5s.pt... +100% 14.1M/14.1M [00:00<00:00, 81.9MB/s] + +Fusing layers... +Model Summary: 224 layers, 7266973 parameters, 0 gradients +image 1/2 /content/yolov5/data/images/bus.jpg: 832x640 4 persons, 1 bus, 1 fire hydrant, Done. (0.029s) +image 2/2 /content/yolov5/data/images/zidane.jpg: 480x832 3 persons, 3 ties, Done. (0.024s) +Results saved to runs/detect/exp +Done. (0.156s) +``` + + + + +### PyTorch Hub TTA + +TTA is automatically integrated into all [YOLOv5 PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5) models, and can be accessed by passing `augment=True` at inference time. +```python +import torch + +# Model +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5x, custom + +# Images +img = 'https://ultralytics.com/images/zidane.jpg' # or file, PIL, OpenCV, numpy, multiple + +# Inference +results = model(img, augment=True) # <--- TTA inference + +# Results +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` + +### Customize + +You can customize the TTA ops applied in the YOLOv5 `forward_augment()` method [here](https://github.com/ultralytics/yolov5/blob/8c6f9e15bfc0000d18b976a95b9d7c17d407ec91/models/yolo.py#L125-L137). + + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + +## Status + +YOLOv5 CI + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000..6fad2b7 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,24 @@ +## Ultralytics YOLOv8 Example Applications + +This repository features a collection of real-world applications and walkthroughs, provided as either Python files or notebooks. Explore the examples below to see how YOLOv8 can be integrated into various applications. + +### Ultralytics YOLO Example Applications + +| Title | Format | Contributor | +| ------------------------------------------------------------------------ | ------------------ | --------------------------------------------------- | +| [YOLO ONNX Detection Inference with C++](./YOLOv8-CPP-Inference) | C++/ONNX | [Justas Bartnykas](https://github.com/JustasBart) | +| [YOLO OpenCV ONNX Detection Python](./YOLOv8-OpenCV-ONNX-Python) | OpenCV/Python/ONNX | [Farid Inawan](https://github.com/frdteknikelektro) | +| [YOLO .Net ONNX Detection C#](https://www.nuget.org/packages/Yolov8.Net) | C# .Net | [Samuel Stainback](https://github.com/sstainba) | + +### How to Contribute + +We welcome contributions from the community in the form of examples, applications, and guides. To contribute, please follow these steps: + +1. Create a pull request (PR) with the `[Example]` prefix in the title, adding your project folder to the `examples/` directory in the repository. +1. Ensure that your project meets the following criteria: + - Utilizes the `ultralytics` package. + - Includes a `README.md` file with instructions on how to run the project. + - Avoids adding large assets or dependencies unless absolutely necessary. + - The contributor is expected to provide support for issues related to their examples. + +If you have any questions or concerns about these requirements, please submit a PR, and we will be more than happy to guide you. diff --git a/examples/YOLOv8-CPP-Inference/CMakeLists.txt b/examples/YOLOv8-CPP-Inference/CMakeLists.txt new file mode 100644 index 0000000..bc2f33f --- /dev/null +++ b/examples/YOLOv8-CPP-Inference/CMakeLists.txt @@ -0,0 +1,28 @@ +cmake_minimum_required(VERSION 3.5) + +project(Yolov8CPPInference VERSION 0.1) + +set(CMAKE_INCLUDE_CURRENT_DIR ON) + +# CUDA +set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda") +find_package(CUDA 11 REQUIRED) + +set(CMAKE_CUDA_STANDARD 11) +set(CMAKE_CUDA_STANDARD_REQUIRED ON) +# !CUDA + +# OpenCV +find_package(OpenCV REQUIRED) +include_directories(${OpenCV_INCLUDE_DIRS}) +# !OpenCV + +set(PROJECT_SOURCES + main.cpp + + inference.h + inference.cpp +) + +add_executable(Yolov8CPPInference ${PROJECT_SOURCES}) +target_link_libraries(Yolov8CPPInference ${OpenCV_LIBS}) diff --git a/examples/YOLOv8-CPP-Inference/README.md b/examples/YOLOv8-CPP-Inference/README.md new file mode 100644 index 0000000..548f9b8 --- /dev/null +++ b/examples/YOLOv8-CPP-Inference/README.md @@ -0,0 +1,50 @@ +# YOLOv8/YOLOv5 Inference C++ + +This example demonstrates how to perform inference using YOLOv8 and YOLOv5 models in C++ with OpenCV's DNN API. + +## Usage + +```commandline +git clone ultralytics +cd ultralytics +pip install . +cd examples/cpp_ + +# Add a **yolov8\_.onnx** and/or **yolov5\_.onnx** model(s) to the ultralytics folder. +# Edit the **main.cpp** to change the **projectBasePath** to match your user. + +# Note that by default the CMake file will try and import the CUDA library to be used with the OpenCVs dnn (cuDNN) GPU Inference. +# If your OpenCV build does not use CUDA/cuDNN you can remove that import call and run the example on CPU. + +mkdir build +cd build +cmake .. +make +./Yolov8CPPInference +``` + +## Exporting YOLOv8 and YOLOv5 Models + +To export YOLOv8 models: + +```commandline +yolo export model=yolov8s.pt imgsz=480,640 format=onnx opset=12 +``` + +To export YOLOv5 models: + +```commandline +python3 export.py --weights yolov5s.pt --img 480 640 --include onnx --opset 12 +``` + +yolov8s.onnx: + +![image](https://user-images.githubusercontent.com/40023722/217356132-a4cecf2e-2729-4acb-b80a-6559022d7707.png) + +yolov5s.onnx: + +![image](https://user-images.githubusercontent.com/40023722/217357005-07464492-d1da-42e3-98a7-fc753f87d5e6.png) + +This repository utilizes OpenCV's DNN API to run ONNX exported models of YOLOv5 and YOLOv8. In theory, it should work for YOLOv6 and YOLOv7 as well, but they have not been tested. Note that the example networks are exported with rectangular (640x480) resolutions, but any exported resolution will work. You may want to use the letterbox approach for square images, depending on your use case. + +The **main** branch version uses Qt as a GUI wrapper. The primary focus here is the **Inference** class file, which demonstrates how to transpose YOLOv8 models to work as YOLOv5 models. diff --git a/examples/YOLOv8-CPP-Inference/inference.cpp b/examples/YOLOv8-CPP-Inference/inference.cpp new file mode 100644 index 0000000..12c2607 --- /dev/null +++ b/examples/YOLOv8-CPP-Inference/inference.cpp @@ -0,0 +1,185 @@ +#include "inference.h" + +Inference::Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape, const std::string &classesTxtFile, const bool &runWithCuda) +{ + modelPath = onnxModelPath; + modelShape = modelInputShape; + classesPath = classesTxtFile; + cudaEnabled = runWithCuda; + + loadOnnxNetwork(); + // loadClassesFromFile(); The classes are hard-coded for this example +} + +std::vector Inference::runInference(const cv::Mat &input) +{ + cv::Mat modelInput = input; + if (letterBoxForSquare && modelShape.width == modelShape.height) + modelInput = formatToSquare(modelInput); + + cv::Mat blob; + cv::dnn::blobFromImage(modelInput, blob, 1.0/255.0, modelShape, cv::Scalar(), true, false); + net.setInput(blob); + + std::vector outputs; + net.forward(outputs, net.getUnconnectedOutLayersNames()); + + int rows = outputs[0].size[1]; + int dimensions = outputs[0].size[2]; + + bool yolov8 = false; + // yolov5 has an output of shape (batchSize, 25200, 85) (Num classes + box[x,y,w,h] + confidence[c]) + // yolov8 has an output of shape (batchSize, 84, 8400) (Num classes + box[x,y,w,h]) + if (dimensions > rows) // Check if the shape[2] is more than shape[1] (yolov8) + { + yolov8 = true; + rows = outputs[0].size[2]; + dimensions = outputs[0].size[1]; + + outputs[0] = outputs[0].reshape(1, dimensions); + cv::transpose(outputs[0], outputs[0]); + } + float *data = (float *)outputs[0].data; + + float x_factor = modelInput.cols / modelShape.width; + float y_factor = modelInput.rows / modelShape.height; + + std::vector class_ids; + std::vector confidences; + std::vector boxes; + + for (int i = 0; i < rows; ++i) + { + if (yolov8) + { + float *classes_scores = data+4; + + cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores); + cv::Point class_id; + double maxClassScore; + + minMaxLoc(scores, 0, &maxClassScore, 0, &class_id); + + if (maxClassScore > modelScoreThreshold) + { + confidences.push_back(maxClassScore); + class_ids.push_back(class_id.x); + + float x = data[0]; + float y = data[1]; + float w = data[2]; + float h = data[3]; + + int left = int((x - 0.5 * w) * x_factor); + int top = int((y - 0.5 * h) * y_factor); + + int width = int(w * x_factor); + int height = int(h * y_factor); + + boxes.push_back(cv::Rect(left, top, width, height)); + } + } + else // yolov5 + { + float confidence = data[4]; + + if (confidence >= modelConfidenceThreshold) + { + float *classes_scores = data+5; + + cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores); + cv::Point class_id; + double max_class_score; + + minMaxLoc(scores, 0, &max_class_score, 0, &class_id); + + if (max_class_score > modelScoreThreshold) + { + confidences.push_back(confidence); + class_ids.push_back(class_id.x); + + float x = data[0]; + float y = data[1]; + float w = data[2]; + float h = data[3]; + + int left = int((x - 0.5 * w) * x_factor); + int top = int((y - 0.5 * h) * y_factor); + + int width = int(w * x_factor); + int height = int(h * y_factor); + + boxes.push_back(cv::Rect(left, top, width, height)); + } + } + } + + data += dimensions; + } + + std::vector nms_result; + cv::dnn::NMSBoxes(boxes, confidences, modelScoreThreshold, modelNMSThreshold, nms_result); + + std::vector detections{}; + for (unsigned long i = 0; i < nms_result.size(); ++i) + { + int idx = nms_result[i]; + + Detection result; + result.class_id = class_ids[idx]; + result.confidence = confidences[idx]; + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution dis(100, 255); + result.color = cv::Scalar(dis(gen), + dis(gen), + dis(gen)); + + result.className = classes[result.class_id]; + result.box = boxes[idx]; + + detections.push_back(result); + } + + return detections; +} + +void Inference::loadClassesFromFile() +{ + std::ifstream inputFile(classesPath); + if (inputFile.is_open()) + { + std::string classLine; + while (std::getline(inputFile, classLine)) + classes.push_back(classLine); + inputFile.close(); + } +} + +void Inference::loadOnnxNetwork() +{ + net = cv::dnn::readNetFromONNX(modelPath); + if (cudaEnabled) + { + std::cout << "\nRunning on CUDA" << std::endl; + net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA); + net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA); + } + else + { + std::cout << "\nRunning on CPU" << std::endl; + net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV); + net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU); + } +} + +cv::Mat Inference::formatToSquare(const cv::Mat &source) +{ + int col = source.cols; + int row = source.rows; + int _max = MAX(col, row); + cv::Mat result = cv::Mat::zeros(_max, _max, CV_8UC3); + source.copyTo(result(cv::Rect(0, 0, col, row))); + return result; +} diff --git a/examples/YOLOv8-CPP-Inference/inference.h b/examples/YOLOv8-CPP-Inference/inference.h new file mode 100644 index 0000000..dc6149f --- /dev/null +++ b/examples/YOLOv8-CPP-Inference/inference.h @@ -0,0 +1,52 @@ +#ifndef INFERENCE_H +#define INFERENCE_H + +// Cpp native +#include +#include +#include +#include + +// OpenCV / DNN / Inference +#include +#include +#include + +struct Detection +{ + int class_id{0}; + std::string className{}; + float confidence{0.0}; + cv::Scalar color{}; + cv::Rect box{}; +}; + +class Inference +{ +public: + Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape = {640, 640}, const std::string &classesTxtFile = "", const bool &runWithCuda = true); + std::vector runInference(const cv::Mat &input); + +private: + void loadClassesFromFile(); + void loadOnnxNetwork(); + cv::Mat formatToSquare(const cv::Mat &source); + + std::string modelPath{}; + std::string classesPath{}; + bool cudaEnabled{}; + + std::vector classes{"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"}; + + cv::Size2f modelShape{}; + + float modelConfidenceThreshold {0.25}; + float modelScoreThreshold {0.45}; + float modelNMSThreshold {0.50}; + + bool letterBoxForSquare = true; + + cv::dnn::Net net; +}; + +#endif // INFERENCE_H diff --git a/examples/YOLOv8-CPP-Inference/main.cpp b/examples/YOLOv8-CPP-Inference/main.cpp new file mode 100644 index 0000000..6d1ba98 --- /dev/null +++ b/examples/YOLOv8-CPP-Inference/main.cpp @@ -0,0 +1,70 @@ +#include +#include +#include + +#include + +#include "inference.h" + +using namespace std; +using namespace cv; + +int main(int argc, char **argv) +{ + std::string projectBasePath = "/home/user/ultralytics"; // Set your ultralytics base path + + bool runOnGPU = true; + + // + // Pass in either: + // + // "yolov8s.onnx" or "yolov5s.onnx" + // + // To run Inference with yolov8/yolov5 (ONNX) + // + + // Note that in this example the classes are hard-coded and 'classes.txt' is a place holder. + Inference inf(projectBasePath + "/yolov8s.onnx", cv::Size(640, 480), "classes.txt", runOnGPU); + + std::vector imageNames; + imageNames.push_back(projectBasePath + "/ultralytics/assets/bus.jpg"); + imageNames.push_back(projectBasePath + "/ultralytics/assets/zidane.jpg"); + + for (int i = 0; i < imageNames.size(); ++i) + { + cv::Mat frame = cv::imread(imageNames[i]); + + // Inference starts here... + std::vector output = inf.runInference(frame); + + int detections = output.size(); + std::cout << "Number of detections:" << detections << std::endl; + + for (int i = 0; i < detections; ++i) + { + Detection detection = output[i]; + + cv::Rect box = detection.box; + cv::Scalar color = detection.color; + + // Detection box + cv::rectangle(frame, box, color, 2); + + // Detection box text + std::string classString = detection.className + ' ' + std::to_string(detection.confidence).substr(0, 4); + cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0); + cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20); + + cv::rectangle(frame, textBox, color, cv::FILLED); + cv::putText(frame, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0); + } + // Inference ends here... + + // This is only for preview purposes + float scale = 0.8; + cv::resize(frame, frame, cv::Size(frame.cols*scale, frame.rows*scale)); + cv::imshow("Inference", frame); + + cv::waitKey(-1); + } +} diff --git a/examples/YOLOv8-OpenCV-ONNX-Python/README.md b/examples/YOLOv8-OpenCV-ONNX-Python/README.md new file mode 100644 index 0000000..c9076fa --- /dev/null +++ b/examples/YOLOv8-OpenCV-ONNX-Python/README.md @@ -0,0 +1,19 @@ +# YOLOv8 - OpenCV + +Implementation YOLOv8 on OpenCV using ONNX Format. + +Just simply clone and run + +```bash +pip install -r requirements.txt +python main.py --model yolov8n.onnx --img image.jpg +``` + +If you start from scratch: + +```bash +pip install ultralytics +yolo export model=yolov8n.pt imgsz=640 format=onnx opset=12 +``` + +_\*Make sure to include "opset=12"_ diff --git a/examples/YOLOv8-OpenCV-ONNX-Python/main.py b/examples/YOLOv8-OpenCV-ONNX-Python/main.py new file mode 100644 index 0000000..d1f635c --- /dev/null +++ b/examples/YOLOv8-OpenCV-ONNX-Python/main.py @@ -0,0 +1,80 @@ +import argparse + +import cv2.dnn +import numpy as np + +from ultralytics.yolo.utils import ROOT, yaml_load +from ultralytics.yolo.utils.checks import check_yaml + +CLASSES = yaml_load(check_yaml('coco128.yaml'))['names'] + +colors = np.random.uniform(0, 255, size=(len(CLASSES), 3)) + + +def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h): + label = f'{CLASSES[class_id]} ({confidence:.2f})' + color = colors[class_id] + cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2) + cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) + + +def main(onnx_model, input_image): + model: cv2.dnn.Net = cv2.dnn.readNetFromONNX(onnx_model) + original_image: np.ndarray = cv2.imread(input_image) + [height, width, _] = original_image.shape + length = max((height, width)) + image = np.zeros((length, length, 3), np.uint8) + image[0:height, 0:width] = original_image + scale = length / 640 + + blob = cv2.dnn.blobFromImage(image, scalefactor=1 / 255, size=(640, 640), swapRB=True) + model.setInput(blob) + outputs = model.forward() + + outputs = np.array([cv2.transpose(outputs[0])]) + rows = outputs.shape[1] + + boxes = [] + scores = [] + class_ids = [] + + for i in range(rows): + classes_scores = outputs[0][i][4:] + (minScore, maxScore, minClassLoc, (x, maxClassIndex)) = cv2.minMaxLoc(classes_scores) + if maxScore >= 0.25: + box = [ + outputs[0][i][0] - (0.5 * outputs[0][i][2]), outputs[0][i][1] - (0.5 * outputs[0][i][3]), + outputs[0][i][2], outputs[0][i][3]] + boxes.append(box) + scores.append(maxScore) + class_ids.append(maxClassIndex) + + result_boxes = cv2.dnn.NMSBoxes(boxes, scores, 0.25, 0.45, 0.5) + + detections = [] + for i in range(len(result_boxes)): + index = result_boxes[i] + box = boxes[index] + detection = { + 'class_id': class_ids[index], + 'class_name': CLASSES[class_ids[index]], + 'confidence': scores[index], + 'box': box, + 'scale': scale} + detections.append(detection) + draw_bounding_box(original_image, class_ids[index], scores[index], round(box[0] * scale), round(box[1] * scale), + round((box[0] + box[2]) * scale), round((box[1] + box[3]) * scale)) + + cv2.imshow('image', original_image) + cv2.waitKey(0) + cv2.destroyAllWindows() + + return detections + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--model', default='yolov8n.onnx', help='Input your onnx model.') + parser.add_argument('--img', default=str(ROOT / 'assets/bus.jpg'), help='Path to input image.') + args = parser.parse_args() + main(args.model, args.img) diff --git a/fonts/cv_puttext.py b/fonts/cv_puttext.py new file mode 100644 index 0000000..cc88ab8 --- /dev/null +++ b/fonts/cv_puttext.py @@ -0,0 +1,22 @@ +import cv2 +import numpy as np +from PIL import Image, ImageDraw, ImageFont + +def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20): + if (isinstance(img, np.ndarray)): #判断是否OpenCV图片类型 + img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) + draw = ImageDraw.Draw(img) + fontText = ImageFont.truetype( + "fonts/platech.ttf", textSize, encoding="utf-8") + draw.text((left, top), text, textColor, font=fontText) + return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) + +if __name__ == '__main__': + imgPath = "result.jpg" + img = cv2.imread(imgPath) + + saveImg = cv2ImgAddText(img, '中国加油!', 50, 100, (255, 0, 0), 50) + + # cv2.imshow('display',saveImg) + cv2.imwrite('save.jpg',saveImg) + # cv2.waitKey() \ No newline at end of file diff --git a/fonts/platech.ttf b/fonts/platech.ttf new file mode 100644 index 0000000..d66a970 Binary files /dev/null and b/fonts/platech.ttf differ diff --git a/imgs/Quicker_20220930_180856.png b/imgs/Quicker_20220930_180856.png new file mode 100644 index 0000000..eed5017 Binary files /dev/null and b/imgs/Quicker_20220930_180856.png differ diff --git a/imgs/Quicker_20220930_180919.png b/imgs/Quicker_20220930_180919.png new file mode 100644 index 0000000..42abc0f Binary files /dev/null and b/imgs/Quicker_20220930_180919.png differ diff --git a/imgs/Quicker_20220930_180938.png b/imgs/Quicker_20220930_180938.png new file mode 100644 index 0000000..1e34d8c Binary files /dev/null and b/imgs/Quicker_20220930_180938.png differ diff --git a/imgs/Quicker_20220930_181044.png b/imgs/Quicker_20220930_181044.png new file mode 100644 index 0000000..cde51c1 Binary files /dev/null and b/imgs/Quicker_20220930_181044.png differ diff --git a/imgs/double_yellow.jpg b/imgs/double_yellow.jpg new file mode 100644 index 0000000..b42b61c Binary files /dev/null and b/imgs/double_yellow.jpg differ diff --git a/imgs/hongkang1.jpg b/imgs/hongkang1.jpg new file mode 100644 index 0000000..23099b1 Binary files /dev/null and b/imgs/hongkang1.jpg differ diff --git a/imgs/police.jpg b/imgs/police.jpg new file mode 100644 index 0000000..9ac0bb1 Binary files /dev/null and b/imgs/police.jpg differ diff --git a/imgs/shi_lin_guan.jpg b/imgs/shi_lin_guan.jpg new file mode 100644 index 0000000..f7c216d Binary files /dev/null and b/imgs/shi_lin_guan.jpg differ diff --git a/imgs/single_blue.jpg b/imgs/single_blue.jpg new file mode 100644 index 0000000..28abf64 Binary files /dev/null and b/imgs/single_blue.jpg differ diff --git a/imgs/single_green.jpg b/imgs/single_green.jpg new file mode 100644 index 0000000..0a0e0a9 Binary files /dev/null and b/imgs/single_green.jpg differ diff --git a/imgs/single_yellow.jpg b/imgs/single_yellow.jpg new file mode 100644 index 0000000..ebe3a26 Binary files /dev/null and b/imgs/single_yellow.jpg differ diff --git a/imgs/tmpA5E3.png b/imgs/tmpA5E3.png new file mode 100644 index 0000000..b75872b Binary files /dev/null and b/imgs/tmpA5E3.png differ diff --git a/imgs/xue.jpg b/imgs/xue.jpg new file mode 100644 index 0000000..d583ac7 Binary files /dev/null and b/imgs/xue.jpg differ diff --git a/plate_recognition/double_plate_split_merge.py b/plate_recognition/double_plate_split_merge.py new file mode 100644 index 0000000..24c6537 --- /dev/null +++ b/plate_recognition/double_plate_split_merge.py @@ -0,0 +1,15 @@ +import os +import cv2 +import numpy as np +def get_split_merge(img): + h,w,c = img.shape + img_upper = img[0:int(5/12*h),:] + img_lower = img[int(1/3*h):,:] + img_upper = cv2.resize(img_upper,(img_lower.shape[1],img_lower.shape[0])) + new_img = np.hstack((img_upper,img_lower)) + return new_img + +if __name__=="__main__": + img = cv2.imread("double_plate/tmp8078.png") + new_img =get_split_merge(img) + cv2.imwrite("double_plate/new.jpg",new_img) diff --git a/plate_recognition/plateNet.py b/plate_recognition/plateNet.py new file mode 100644 index 0000000..ce9a982 --- /dev/null +++ b/plate_recognition/plateNet.py @@ -0,0 +1,203 @@ +import torch.nn as nn +import torch + + +class myNet_ocr(nn.Module): + def __init__(self,cfg=None,num_classes=78,export=False): + super(myNet_ocr, self).__init__() + if cfg is None: + cfg =[32,32,64,64,'M',128,128,'M',196,196,'M',256,256] + # cfg =[32,32,'M',64,64,'M',128,128,'M',256,256] + self.feature = self.make_layers(cfg, True) + self.export = export + # self.classifier = nn.Linear(cfg[-1], num_classes) + # self.loc = nn.MaxPool2d((2, 2), (5, 1), (0, 1),ceil_mode=True) + # self.loc = nn.AvgPool2d((2, 2), (5, 2), (0, 1),ceil_mode=False) + self.loc = nn.MaxPool2d((5, 2), (1, 1),(0,1),ceil_mode=False) + self.newCnn=nn.Conv2d(cfg[-1],num_classes,1,1) + # self.newBn=nn.BatchNorm2d(num_classes) + def make_layers(self, cfg, batch_norm=False): + layers = [] + in_channels = 3 + for i in range(len(cfg)): + if i == 0: + conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = cfg[i] + else : + if cfg[i] == 'M': + layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)] + else: + conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=(1,1),stride =1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = cfg[i] + return nn.Sequential(*layers) + + def forward(self, x): + x = self.feature(x) + x=self.loc(x) + x=self.newCnn(x) + # x=self.newBn(x) + if self.export: + conv = x.squeeze(2) # b *512 * width + conv = conv.transpose(2,1) # [w, b, c] + # conv =conv.argmax(dim=2) + return conv + else: + b, c, h, w = x.size() + assert h == 1, "the height of conv must be 1" + conv = x.squeeze(2) # b *512 * width + conv = conv.permute(2, 0, 1) # [w, b, c] + # output = F.log_softmax(self.rnn(conv), dim=2) + output = torch.softmax(conv, dim=2) + return output + +myCfg = [32,'M',64,'M',96,'M',128,'M',256] +class myNet(nn.Module): + def __init__(self,cfg=None,num_classes=3): + super(myNet, self).__init__() + if cfg is None: + cfg = myCfg + self.feature = self.make_layers(cfg, True) + self.classifier = nn.Linear(cfg[-1], num_classes) + def make_layers(self, cfg, batch_norm=False): + layers = [] + in_channels = 3 + for i in range(len(cfg)): + if i == 0: + conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = cfg[i] + else : + if cfg[i] == 'M': + layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)] + else: + conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=1,stride =1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = cfg[i] + return nn.Sequential(*layers) + + def forward(self, x): + x = self.feature(x) + x = nn.AvgPool2d(kernel_size=3, stride=1)(x) + x = x.view(x.size(0), -1) + y = self.classifier(x) + return y + + +class MyNet_color(nn.Module): + def __init__(self, class_num=6): + super(MyNet_color, self).__init__() + self.class_num = class_num + self.backbone = nn.Sequential( + nn.Conv2d(in_channels=3, out_channels=16, kernel_size=(5, 5), stride=(1, 1)), # 0 + torch.nn.BatchNorm2d(16), + nn.ReLU(), + nn.MaxPool2d(kernel_size=(2, 2)), + nn.Dropout(0), + nn.Flatten(), + nn.Linear(480, 64), + nn.Dropout(0), + nn.ReLU(), + nn.Linear(64, class_num), + nn.Dropout(0), + nn.Softmax(1) + ) + + def forward(self, x): + logits = self.backbone(x) + + return logits + + +class myNet_ocr_color(nn.Module): + def __init__(self,cfg=None,num_classes=78,export=False,color_num=None): + super(myNet_ocr_color, self).__init__() + if cfg is None: + cfg =[32,32,64,64,'M',128,128,'M',196,196,'M',256,256] + # cfg =[32,32,'M',64,64,'M',128,128,'M',256,256] + self.feature = self.make_layers(cfg, True) + self.export = export + self.color_num=color_num + self.conv_out_num=12 #颜色第一个卷积层输出通道12 + if self.color_num: + self.conv1=nn.Conv2d(cfg[-1],self.conv_out_num,kernel_size=3,stride=2) + self.bn1=nn.BatchNorm2d(self.conv_out_num) + self.relu1=nn.ReLU(inplace=True) + self.gap =nn.AdaptiveAvgPool2d(output_size=1) + self.color_classifier=nn.Conv2d(self.conv_out_num,self.color_num,kernel_size=1,stride=1) + self.color_bn = nn.BatchNorm2d(self.color_num) + self.flatten = nn.Flatten() + self.loc = nn.MaxPool2d((5, 2), (1, 1),(0,1),ceil_mode=False) + self.newCnn=nn.Conv2d(cfg[-1],num_classes,1,1) + # self.newBn=nn.BatchNorm2d(num_classes) + def make_layers(self, cfg, batch_norm=False): + layers = [] + in_channels = 3 + for i in range(len(cfg)): + if i == 0: + conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = cfg[i] + else : + if cfg[i] == 'M': + layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)] + else: + conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=(1,1),stride =1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = cfg[i] + return nn.Sequential(*layers) + + def forward(self, x): + x = self.feature(x) + if self.color_num: + x_color=self.conv1(x) + x_color=self.bn1(x_color) + x_color =self.relu1(x_color) + x_color = self.color_classifier(x_color) + x_color = self.color_bn(x_color) + x_color =self.gap(x_color) + x_color = self.flatten(x_color) + x=self.loc(x) + x=self.newCnn(x) + + if self.export: + conv = x.squeeze(2) # b *512 * width + conv = conv.transpose(2,1) # [w, b, c] + if self.color_num: + return conv,x_color + return conv + else: + b, c, h, w = x.size() + assert h == 1, "the height of conv must be 1" + conv = x.squeeze(2) # b *512 * width + conv = conv.permute(2, 0, 1) # [w, b, c] + output = F.log_softmax(conv, dim=2) + if self.color_num: + return output,x_color + return output + + +if __name__ == '__main__': + x = torch.randn(1,3,48,216) + model = myNet_ocr(num_classes=78,export=True) + out = model(x) + print(out.shape) \ No newline at end of file diff --git a/plate_recognition/plate_rec.py b/plate_recognition/plate_rec.py new file mode 100644 index 0000000..7231e77 --- /dev/null +++ b/plate_recognition/plate_rec.py @@ -0,0 +1,119 @@ +from plate_recognition.plateNet import myNet_ocr,myNet_ocr_color +import torch +import torch.nn as nn +import cv2 +import numpy as np +import os +import time +import sys + +def cv_imread(path): #可以读取中文路径的图片 + img=cv2.imdecode(np.fromfile(path,dtype=np.uint8),-1) + return img + +def allFilePath(rootPath,allFIleList): + fileList = os.listdir(rootPath) + for temp in fileList: + if os.path.isfile(os.path.join(rootPath,temp)): + if temp.endswith('.jpg') or temp.endswith('.png') or temp.endswith('.JPG'): + allFIleList.append(os.path.join(rootPath,temp)) + else: + allFilePath(os.path.join(rootPath,temp),allFIleList) +device = torch.device('cuda') if torch.cuda.is_available() else torch.device("cpu") +color=['黑色','蓝色','绿色','白色','黄色'] +plateName=r"#京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新学警港澳挂使领民航危0123456789ABCDEFGHJKLMNPQRSTUVWXYZ险品" +mean_value,std_value=(0.588,0.193) +def decodePlate(preds): + pre=0 + newPreds=[] + index=[] + for i in range(len(preds)): + if preds[i]!=0 and preds[i]!=pre: + newPreds.append(preds[i]) + index.append(i) + pre=preds[i] + return newPreds,index + +def image_processing(img,device): + img = cv2.resize(img, (168,48)) + img = np.reshape(img, (48, 168, 3)) + + # normalize + img = img.astype(np.float32) + img = (img / 255. - mean_value) / std_value + img = img.transpose([2, 0, 1]) + img = torch.from_numpy(img) + + img = img.to(device) + img = img.view(1, *img.size()) + return img + +def get_plate_result(img,device,model,is_color=False): + input = image_processing(img,device) + if is_color: #是否识别颜色 + preds,color_preds = model(input) + color_preds = torch.softmax(color_preds,dim=-1) + color_conf,color_index = torch.max(color_preds,dim=-1) + color_conf=color_conf.item() + else: + preds = model(input) + preds=torch.softmax(preds,dim=-1) + prob,index=preds.max(dim=-1) + index = index.view(-1).detach().cpu().numpy() + prob=prob.view(-1).detach().cpu().numpy() + + + # preds=preds.view(-1).detach().cpu().numpy() + newPreds,new_index=decodePlate(index) + prob=prob[new_index] + plate="" + for i in newPreds: + plate+=plateName[i] + # if not (plate[0] in plateName[1:44] ): + # return "" + if is_color: + return plate,prob,color[color_index],color_conf #返回车牌号以及每个字符的概率,以及颜色,和颜色的概率 + else: + return plate,prob + +def init_model(device,model_path,is_color = False): + # print( print(sys.path)) + # model_path ="plate_recognition/model/checkpoint_61_acc_0.9715.pth" + check_point = torch.load(model_path,map_location=device) + model_state=check_point['state_dict'] + cfg=check_point['cfg'] + color_classes=0 + if is_color: + color_classes=5 #颜色类别数 + model = myNet_ocr_color(num_classes=len(plateName),export=True,cfg=cfg,color_num=color_classes) + + model.load_state_dict(model_state,strict=False) + model.to(device) + model.eval() + return model + +# model = init_model(device) +if __name__ == '__main__': + model_path = r"weights/plate_rec_color.pth" + image_path ="images/tmp2424.png" + testPath = r"/mnt/Gpan/Mydata/pytorchPorject/CRNN/crnn_plate_recognition/images" + fileList=[] + allFilePath(testPath,fileList) +# result = get_plate_result(image_path,device) +# print(result) + is_color = False + model = init_model(device,model_path,is_color=is_color) + right=0 + begin = time.time() + + for imge_path in fileList: + img=cv2.imread(imge_path) + if is_color: + plate,_,plate_color,_=get_plate_result(img,device,model,is_color=is_color) + print(plate) + else: + plate,_=get_plate_result(img,device,model,is_color=is_color) + print(plate,imge_path) + + + diff --git a/predict.py b/predict.py new file mode 100644 index 0000000..a8c8dfe --- /dev/null +++ b/predict.py @@ -0,0 +1,16 @@ +from ultralytics import YOLO +from PIL import Image +from ultralytics.nn.tasks import attempt_load_weights + +# Load a model +model = YOLO('runs/pose/train4/weights/best.pt') # load an official model +# model = YOLO('path/to/best.pt') # load a custom model + +# Predict with the model +results = model('h_0_008396.jpg') # predict on an image +for r in results: + print(r.boxes) + im_array = r.plot() # plot a BGR numpy array of predictions + im = Image.fromarray(im_array[..., ::-1]) # RGB PIL image + # im.show() # show image + im.save('result.jpg') # save imagel9 \ No newline at end of file diff --git a/readme/README.md b/readme/README.md new file mode 100644 index 0000000..496bdff --- /dev/null +++ b/readme/README.md @@ -0,0 +1,32 @@ +### **车牌检测训练** + +1. **下载数据集:** [datasets](https://pan.baidu.com/s/1xa6zvOGjU02j8_lqHGVf0A) 提取码:pi6c 数据从CCPD和CRPD数据集中选取并转换的 + 数据集格式为yolo格式: + + ``` + label x y w h pt1x pt1y pt2x pt2y pt3x pt3y pt4x pt4y + ``` + + 关键点依次是(左上,右上,右下,左下) + 坐标都是经过归一化,x,y是中心点除以图片宽高,w,h是框的宽高除以图片宽高,ptx,pty是关键点坐标除以宽高 + + **自己的数据集**可以通过lablme 软件,create polygons标注车牌四个点即可,然后通过json2yolo.py 将数据集转为yolo格式,即可训练 +2. **修改ultralytics/datasets/yolov8-plate.yaml train和val路径,换成你的数据路径** + + ``` + train: /your/train/path #修改成你的训练集路径 + val: /your/val/path #修改成你的验证集路径 + # number of classes + nc: 2 #这里用的是2分类,0 单层车牌 1 双层车牌 + + # class names + names: [ 'single','double'] + + ``` +3. **训练** + + ``` + python3 train.py --data data/widerface.yaml --cfg models/yolov5n-0.5.yaml --weights weights/plate_detect.pt --epoch 120 + ``` + + 结果存在run文件夹中 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..099fdcb --- /dev/null +++ b/requirements.txt @@ -0,0 +1,43 @@ +# Ultralytics requirements +# Usage: pip install -r requirements.txt + +# Base ---------------------------------------- +matplotlib>=3.2.2 +numpy>=1.21.6 +opencv-python>=4.6.0 +Pillow>=7.1.2 +PyYAML>=5.3.1 +requests>=2.23.0 +scipy>=1.4.1 +torch>=1.7.0 +torchvision>=0.8.1 +tqdm>=4.64.0 + +# Logging ------------------------------------- +# tensorboard>=2.4.1 +# clearml +# comet + +# Plotting ------------------------------------ +pandas>=1.1.4 +seaborn>=0.11.0 + +# Export -------------------------------------- +# coremltools>=6.0 # CoreML export +# onnx>=1.12.0 # ONNX export +# onnxsim>=0.4.1 # ONNX simplifier +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) +# tflite-support +# tensorflowjs>=3.9.0 # TF.js export +# openvino-dev>=2022.3 # OpenVINO export + +# Extras -------------------------------------- +psutil # system utilization +thop>=0.1.1 # FLOPs computation +# ipython # interactive notebook +# albumentations>=1.0.3 +# pycocotools>=2.0.6 # COCO mAP +# roboflow diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..ba0296e --- /dev/null +++ b/setup.py @@ -0,0 +1,65 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import re +from pathlib import Path + +import pkg_resources as pkg +from setuptools import find_packages, setup + +# Settings +FILE = Path(__file__).resolve() +PARENT = FILE.parent # root directory +README = (PARENT / 'README.md').read_text(encoding='utf-8') +REQUIREMENTS = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements((PARENT / 'requirements.txt').read_text())] +PKG_REQUIREMENTS = ['sentry_sdk'] # pip-only requirements + + +def get_version(): + file = PARENT / 'ultralytics/__init__.py' + return re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', file.read_text(encoding='utf-8'), re.M)[1] + + +setup( + name='ultralytics', # name of pypi package + version=get_version(), # version of pypi package + python_requires='>=3.7', + license='GPL-3.0', + description='Ultralytics YOLOv8', + long_description=README, + long_description_content_type='text/markdown', + url='https://github.com/ultralytics/ultralytics', + project_urls={ + 'Bug Reports': 'https://github.com/ultralytics/ultralytics/issues', + 'Funding': 'https://ultralytics.com', + 'Source': 'https://github.com/ultralytics/ultralytics'}, + author='Ultralytics', + author_email='hello@ultralytics.com', + packages=find_packages(), # required + include_package_data=True, + install_requires=REQUIREMENTS + PKG_REQUIREMENTS, + extras_require={ + 'dev': ['check-manifest', 'pytest', 'pytest-cov', 'coverage', 'mkdocs-material', 'mkdocstrings[python]'], + 'export': ['coremltools>=6.0', 'onnx', 'onnxsim', 'onnxruntime', 'openvino-dev>=2022.3'], + 'tf': ['onnx2tf', 'sng4onnx', 'tflite_support', 'tensorflow']}, + classifiers=[ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'Intended Audience :: Education', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Topic :: Software Development', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Scientific/Engineering :: Image Recognition', + 'Operating System :: POSIX :: Linux', + 'Operating System :: MacOS', + 'Operating System :: Microsoft :: Windows', ], + keywords='machine-learning, deep-learning, vision, ML, DL, AI, YOLO, YOLOv3, YOLOv5, YOLOv8, HUB, Ultralytics', + entry_points={ + 'console_scripts': ['yolo = ultralytics.yolo.cfg:entrypoint', 'ultralytics = ultralytics.yolo.cfg:entrypoint']}) diff --git a/test_widerface.py b/test_widerface.py new file mode 100644 index 0000000..d6e62f6 --- /dev/null +++ b/test_widerface.py @@ -0,0 +1,48 @@ +import os +import argparse +from ultralytics import YOLO + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default='runs/pose/yolov8n-face/weights/best.pt', help='model.pt path(s)') + parser.add_argument('--img-size', nargs= '+', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.01, help='object confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS') + parser.add_argument('--device', type=str, default='cpu', help='augmented inference') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--save_folder', default='./widerface_evaluate/widerface_txt/', type=str, help='Dir to save txt results') + parser.add_argument('--dataset_folder', default='./data/widerface/val/images/', type=str, help='dataset path') + opt = parser.parse_args() + print(opt) + + model = YOLO(opt.weights) + + # testing dataset + testset_folder = opt.dataset_folder + testset_list = opt.dataset_folder[:-7] + "wider_val.txt" + with open(testset_list, 'r') as fr: + test_dataset = fr.read().split() + num_images = len(test_dataset) + for img_name in test_dataset: + image_path = testset_folder + img_name + results = model.predict(source=image_path, imgsz=opt.img_size, conf=opt.conf_thres, iou=opt.iou_thres, augment=opt.augment, device=opt.device) + + save_name = opt.save_folder + img_name[:-4] + ".txt" + dirname = os.path.dirname(save_name) + if not os.path.isdir(dirname): + os.makedirs(dirname) + with open(save_name, "w") as fd: + result = results[0].cpu().numpy() + file_name = os.path.basename(save_name)[:-4] + "\n" + bboxs_num = str(result.boxes.shape[0]) + '\n' + fd.write(file_name) + fd.write(bboxs_num) + for box in result.boxes: + conf = box.conf[0] + cls = box.cls[0] + xyxy = box.xyxy[0] + x1 = int(xyxy[0] + 0.5) + y1 = int(xyxy[1] + 0.5) + x2 = int(xyxy[2] + 0.5) + y2 = int(xyxy[3] + 0.5) + fd.write('%d %d %d %d %.03f' % (x1, y1, x2-x1, y2-y1, conf if conf <= 1 else 1) + '\n') diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000..2dca8a8 --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,97 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import subprocess +from pathlib import Path + +from ultralytics.yolo.utils import LINUX, ONLINE, ROOT, SETTINGS + +MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n' +CFG = 'yolov8n' + + +def run(cmd): + # Run a subprocess command with check=True + subprocess.run(cmd.split(), check=True) + + +def test_special_modes(): + run('yolo checks') + run('yolo settings') + run('yolo help') + + +# Train checks --------------------------------------------------------------------------------------------------------- +def test_train_det(): + run(f'yolo train detect model={CFG}.yaml data=coco8.yaml imgsz=32 epochs=1 v5loader') + + +def test_train_seg(): + run(f'yolo train segment model={CFG}-seg.yaml data=coco8-seg.yaml imgsz=32 epochs=1') + + +def test_train_cls(): + run(f'yolo train classify model={CFG}-cls.yaml data=imagenet10 imgsz=32 epochs=1') + + +def test_train_pose(): + run(f'yolo train pose model={CFG}-pose.yaml data=coco8-pose.yaml imgsz=32 epochs=1') + + +# Val checks ----------------------------------------------------------------------------------------------------------- +def test_val_detect(): + run(f'yolo val detect model={MODEL}.pt data=coco8.yaml imgsz=32') + + +def test_val_segment(): + run(f'yolo val segment model={MODEL}-seg.pt data=coco8-seg.yaml imgsz=32') + + +def test_val_classify(): + run(f'yolo val classify model={MODEL}-cls.pt data=imagenet10 imgsz=32') + + +def test_val_pose(): + run(f'yolo val pose model={MODEL}-pose.pt data=coco8-pose.yaml imgsz=32') + + +# Predict checks ------------------------------------------------------------------------------------------------------- +def test_predict_detect(): + run(f"yolo predict model={MODEL}.pt source={ROOT / 'assets'} imgsz=32 save save_crop save_txt") + if ONLINE: + run(f'yolo predict model={MODEL}.pt source=https://ultralytics.com/images/bus.jpg imgsz=32') + run(f'yolo predict model={MODEL}.pt source=https://ultralytics.com/assets/decelera_landscape_min.mov imgsz=32') + run(f'yolo predict model={MODEL}.pt source=https://ultralytics.com/assets/decelera_portrait_min.mov imgsz=32') + + +def test_predict_segment(): + run(f"yolo predict model={MODEL}-seg.pt source={ROOT / 'assets'} imgsz=32 save save_txt") + + +def test_predict_classify(): + run(f"yolo predict model={MODEL}-cls.pt source={ROOT / 'assets'} imgsz=32 save save_txt") + + +def test_predict_pose(): + run(f"yolo predict model={MODEL}-pose.pt source={ROOT / 'assets'} imgsz=32 save save_txt") + + +# Export checks -------------------------------------------------------------------------------------------------------- +def test_export_detect_torchscript(): + run(f'yolo export model={MODEL}.pt format=torchscript') + + +def test_export_segment_torchscript(): + run(f'yolo export model={MODEL}-seg.pt format=torchscript') + + +def test_export_classify_torchscript(): + run(f'yolo export model={MODEL}-cls.pt format=torchscript') + + +def test_export_classify_pose(): + run(f'yolo export model={MODEL}-pose.pt format=torchscript') + + +def test_export_detect_edgetpu(enabled=False): + if enabled and LINUX: + run(f'yolo export model={MODEL}.pt format=edgetpu') diff --git a/tests/test_engine.py b/tests/test_engine.py new file mode 100644 index 0000000..c20edc1 --- /dev/null +++ b/tests/test_engine.py @@ -0,0 +1,93 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from pathlib import Path + +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, SETTINGS +from ultralytics.yolo.v8 import classify, detect, segment + +CFG_DET = 'yolov8n.yaml' +CFG_SEG = 'yolov8n-seg.yaml' +CFG_CLS = 'squeezenet1_0' +CFG = get_cfg(DEFAULT_CFG) +MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n' +SOURCE = ROOT / 'assets' + + +def test_detect(): + overrides = {'data': 'coco8.yaml', 'model': CFG_DET, 'imgsz': 32, 'epochs': 1, 'save': False} + CFG.data = 'coco8.yaml' + + # Trainer + trainer = detect.DetectionTrainer(overrides=overrides) + trainer.train() + + # Validator + val = detect.DetectionValidator(args=CFG) + val(model=trainer.best) # validate best.pt + + # Predictor + pred = detect.DetectionPredictor(overrides={'imgsz': [64, 64]}) + result = pred(source=SOURCE, model=f'{MODEL}.pt') + assert len(result), 'predictor test failed' + + overrides['resume'] = trainer.last + trainer = detect.DetectionTrainer(overrides=overrides) + try: + trainer.train() + except Exception as e: + print(f'Expected exception caught: {e}') + return + + Exception('Resume test failed!') + + +def test_segment(): + overrides = {'data': 'coco8-seg.yaml', 'model': CFG_SEG, 'imgsz': 32, 'epochs': 1, 'save': False} + CFG.data = 'coco8-seg.yaml' + CFG.v5loader = False + # YOLO(CFG_SEG).train(**overrides) # works + + # trainer + trainer = segment.SegmentationTrainer(overrides=overrides) + trainer.train() + + # Validator + val = segment.SegmentationValidator(args=CFG) + val(model=trainer.best) # validate best.pt + + # Predictor + pred = segment.SegmentationPredictor(overrides={'imgsz': [64, 64]}) + result = pred(source=SOURCE, model=f'{MODEL}-seg.pt') + assert len(result), 'predictor test failed' + + # Test resume + overrides['resume'] = trainer.last + trainer = segment.SegmentationTrainer(overrides=overrides) + try: + trainer.train() + except Exception as e: + print(f'Expected exception caught: {e}') + return + + Exception('Resume test failed!') + + +def test_classify(): + overrides = {'data': 'imagenet10', 'model': 'yolov8n-cls.yaml', 'imgsz': 32, 'epochs': 1, 'save': False} + CFG.data = 'imagenet10' + CFG.imgsz = 32 + # YOLO(CFG_SEG).train(**overrides) # works + + # Trainer + trainer = classify.ClassificationTrainer(overrides=overrides) + trainer.train() + + # Validator + val = classify.ClassificationValidator(args=CFG) + val(model=trainer.best) + + # Predictor + pred = classify.ClassificationPredictor(overrides={'imgsz': [64, 64]}) + result = pred(source=SOURCE, model=trainer.best) + assert len(result), 'predictor test failed' diff --git a/tests/test_python.py b/tests/test_python.py new file mode 100644 index 0000000..ee2a190 --- /dev/null +++ b/tests/test_python.py @@ -0,0 +1,222 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from pathlib import Path + +import cv2 +import numpy as np +import torch +from PIL import Image + +from ultralytics import YOLO +from ultralytics.yolo.data.build import load_inference_source +from ultralytics.yolo.utils import LINUX, ONLINE, ROOT, SETTINGS + +MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n.pt' +CFG = 'yolov8n.yaml' +SOURCE = ROOT / 'assets/bus.jpg' +SOURCE_GREYSCALE = Path(f'{SOURCE.parent / SOURCE.stem}_greyscale.jpg') +SOURCE_RGBA = Path(f'{SOURCE.parent / SOURCE.stem}_4ch.png') + +# Convert SOURCE to greyscale and 4-ch +im = Image.open(SOURCE) +im.convert('L').save(SOURCE_GREYSCALE) # greyscale +im.convert('RGBA').save(SOURCE_RGBA) # 4-ch PNG with alpha + + +def test_model_forward(): + model = YOLO(CFG) + model(SOURCE) + + +def test_model_info(): + model = YOLO(CFG) + model.info() + model = YOLO(MODEL) + model.info(verbose=True) + + +def test_model_fuse(): + model = YOLO(CFG) + model.fuse() + model = YOLO(MODEL) + model.fuse() + + +def test_predict_dir(): + model = YOLO(MODEL) + model(source=ROOT / 'assets') + + +def test_predict_img(): + model = YOLO(MODEL) + seg_model = YOLO('yolov8n-seg.pt') + cls_model = YOLO('yolov8n-cls.pt') + im = cv2.imread(str(SOURCE)) + assert len(model(source=Image.open(SOURCE), save=True, verbose=True)) == 1 # PIL + assert len(model(source=im, save=True, save_txt=True)) == 1 # ndarray + assert len(model(source=[im, im], save=True, save_txt=True)) == 2 # batch + assert len(list(model(source=[im, im], save=True, stream=True))) == 2 # stream + assert len(model(torch.zeros(320, 640, 3).numpy())) == 1 # tensor to numpy + batch = [ + str(SOURCE), # filename + Path(SOURCE), # Path + 'https://ultralytics.com/images/zidane.jpg' if ONLINE else SOURCE, # URI + cv2.imread(str(SOURCE)), # OpenCV + Image.open(SOURCE), # PIL + np.zeros((320, 640, 3))] # numpy + assert len(model(batch)) == len(batch) # multiple sources in a batch + + # Test tensor inference + im = cv2.imread(str(SOURCE)) # OpenCV + t = cv2.resize(im, (32, 32)) + t = torch.from_numpy(t.transpose((2, 0, 1))) + t = torch.stack([t, t, t, t]) + results = model(t) + assert len(results) == t.shape[0] + results = seg_model(t) + assert len(results) == t.shape[0] + results = cls_model(t) + assert len(results) == t.shape[0] + + +def test_predict_grey_and_4ch(): + model = YOLO(MODEL) + for f in SOURCE_RGBA, SOURCE_GREYSCALE: + for source in Image.open(f), cv2.imread(str(f)), f: + model(source, save=True, verbose=True) + + +def test_val(): + model = YOLO(MODEL) + model.val(data='coco8.yaml', imgsz=32) + + +def test_val_scratch(): + model = YOLO(CFG) + model.val(data='coco8.yaml', imgsz=32) + + +def test_amp(): + if torch.cuda.is_available(): + from ultralytics.yolo.engine.trainer import check_amp + model = YOLO(MODEL).model.cuda() + assert check_amp(model) + + +def test_train_scratch(): + model = YOLO(CFG) + model.train(data='coco8.yaml', epochs=1, imgsz=32) + model(SOURCE) + + +def test_train_pretrained(): + model = YOLO(MODEL) + model.train(data='coco8.yaml', epochs=1, imgsz=32) + model(SOURCE) + + +def test_export_torchscript(): + model = YOLO(MODEL) + f = model.export(format='torchscript') + YOLO(f)(SOURCE) # exported model inference + + +def test_export_torchscript_scratch(): + model = YOLO(CFG) + f = model.export(format='torchscript') + YOLO(f)(SOURCE) # exported model inference + + +def test_export_onnx(): + model = YOLO(MODEL) + f = model.export(format='onnx') + YOLO(f)(SOURCE) # exported model inference + + +def test_export_openvino(): + model = YOLO(MODEL) + f = model.export(format='openvino') + YOLO(f)(SOURCE) # exported model inference + + +def test_export_coreml(): # sourcery skip: move-assign + model = YOLO(MODEL) + model.export(format='coreml') + # if MACOS: + # YOLO(f)(SOURCE) # model prediction only supported on macOS + + +def test_export_tflite(enabled=False): + # TF suffers from install conflicts on Windows and macOS + if enabled and LINUX: + model = YOLO(MODEL) + f = model.export(format='tflite') + YOLO(f)(SOURCE) + + +def test_export_pb(enabled=False): + # TF suffers from install conflicts on Windows and macOS + if enabled and LINUX: + model = YOLO(MODEL) + f = model.export(format='pb') + YOLO(f)(SOURCE) + + +def test_export_paddle(enabled=False): + # Paddle protobuf requirements conflicting with onnx protobuf requirements + if enabled: + model = YOLO(MODEL) + model.export(format='paddle') + + +def test_all_model_yamls(): + for m in list((ROOT / 'models').rglob('*.yaml')): + YOLO(m.name) + + +def test_workflow(): + model = YOLO(MODEL) + model.train(data='coco8.yaml', epochs=1, imgsz=32) + model.val() + model.predict(SOURCE) + model.export(format='onnx') # export a model to ONNX format + + +def test_predict_callback_and_setup(): + # test callback addition for prediction + def on_predict_batch_end(predictor): # results -> List[batch_size] + path, _, im0s, _, _ = predictor.batch + # print('on_predict_batch_end', im0s[0].shape) + im0s = im0s if isinstance(im0s, list) else [im0s] + bs = [predictor.dataset.bs for _ in range(len(path))] + predictor.results = zip(predictor.results, im0s, bs) + + model = YOLO(MODEL) + model.add_callback('on_predict_batch_end', on_predict_batch_end) + + dataset = load_inference_source(source=SOURCE, transforms=model.transforms) + bs = dataset.bs # noqa access predictor properties + results = model.predict(dataset, stream=True) # source already setup + for _, (result, im0, bs) in enumerate(results): + print('test_callback', im0.shape) + print('test_callback', bs) + boxes = result.boxes # Boxes object for bbox outputs + print(boxes) + + +def test_result(): + model = YOLO('yolov8n-seg.pt') + res = model([SOURCE, SOURCE]) + res[0].plot(show_conf=False) # raises warning + res[0].plot(conf=True, boxes=False, masks=True) + res[0] = res[0].cpu().numpy() + print(res[0].path, res[0].masks.masks) + model = YOLO('yolov8n.pt') + res = model(SOURCE) + res[0].plot() + print(res[0].path) + + model = YOLO('yolov8n-cls.pt') + res = model(SOURCE) + res[0].plot(probs=False) + print(res[0].path) diff --git a/train.py b/train.py new file mode 100644 index 0000000..cc891ff --- /dev/null +++ b/train.py @@ -0,0 +1,10 @@ +import os +# os.environ["OMP_NUM_THREADS"]='2' + +from ultralytics import YOLO +# Load a model +model = YOLO('ultralytics/models/v8/yolov8-lite-t-pose.yaml') # build a new model from YAML +model = YOLO('yolov8-lite-t.pt') # load a pretrained model (recommended for training) + +# Train the model +model.train(data='yolov8-plate.yaml', epochs=100, imgsz=320, batch=16, device=[0]) diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py new file mode 100644 index 0000000..a4d8652 --- /dev/null +++ b/ultralytics/__init__.py @@ -0,0 +1,14 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +__version__ = '8.0.134' + +from ultralytics.hub import start +from ultralytics.vit.rtdetr import RTDETR +from ultralytics.vit.sam import SAM +from ultralytics.yolo.engine.model import YOLO +from ultralytics.yolo.fastsam import FastSAM +from ultralytics.yolo.nas import NAS +from ultralytics.yolo.utils.checks import check_yolo as checks +from ultralytics.yolo.utils.downloads import download + +__all__ = '__version__', 'YOLO', 'NAS', 'SAM', 'FastSAM', 'RTDETR', 'checks', 'download', 'start' # allow simpler import diff --git a/ultralytics/assets/bus.jpg b/ultralytics/assets/bus.jpg new file mode 100644 index 0000000..40eaaf5 Binary files /dev/null and b/ultralytics/assets/bus.jpg differ diff --git a/ultralytics/assets/zidane.jpg b/ultralytics/assets/zidane.jpg new file mode 100644 index 0000000..eeab1cd Binary files /dev/null and b/ultralytics/assets/zidane.jpg differ diff --git a/ultralytics/datasets/Argoverse.yaml b/ultralytics/datasets/Argoverse.yaml new file mode 100644 index 0000000..e78758a --- /dev/null +++ b/ultralytics/datasets/Argoverse.yaml @@ -0,0 +1,73 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI +# Example usage: yolo train data=Argoverse.yaml +# parent +# ├── ultralytics +# └── datasets +# └── Argoverse ← downloads here (31.3 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/Argoverse # dataset root dir +train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images +val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images +test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: bus + 5: truck + 6: traffic_light + 7: stop_sign + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import json + from tqdm import tqdm + from ultralytics.yolo.utils.downloads import download + from pathlib import Path + + def argoverse2yolo(set): + labels = {} + a = json.load(open(set, "rb")) + for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."): + img_id = annot['image_id'] + img_name = a['images'][img_id]['name'] + img_label_name = f'{img_name[:-3]}txt' + + cls = annot['category_id'] # instance class id + x_center, y_center, width, height = annot['bbox'] + x_center = (x_center + width / 2) / 1920.0 # offset and scale + y_center = (y_center + height / 2) / 1200.0 # offset and scale + width /= 1920.0 # scale + height /= 1200.0 # scale + + img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']] + if not img_dir.exists(): + img_dir.mkdir(parents=True, exist_ok=True) + + k = str(img_dir / img_label_name) + if k not in labels: + labels[k] = [] + labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n") + + for k in labels: + with open(k, "w") as f: + f.writelines(labels[k]) + + + # Download + dir = Path(yaml['path']) # dataset root dir + urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip'] + download(urls, dir=dir) + + # Convert + annotations_dir = 'Argoverse-HD/annotations/' + (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images' + for d in "train.json", "val.json": + argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels diff --git a/ultralytics/datasets/GlobalWheat2020.yaml b/ultralytics/datasets/GlobalWheat2020.yaml new file mode 100644 index 0000000..de29aa7 --- /dev/null +++ b/ultralytics/datasets/GlobalWheat2020.yaml @@ -0,0 +1,54 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan +# Example usage: yolo train data=GlobalWheat2020.yaml +# parent +# ├── ultralytics +# └── datasets +# └── GlobalWheat2020 ← downloads here (7.0 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/GlobalWheat2020 # dataset root dir +train: # train images (relative to 'path') 3422 images + - images/arvalis_1 + - images/arvalis_2 + - images/arvalis_3 + - images/ethz_1 + - images/rres_1 + - images/inrae_1 + - images/usask_1 +val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1) + - images/ethz_1 +test: # test images (optional) 1276 images + - images/utokyo_1 + - images/utokyo_2 + - images/nau_1 + - images/uq_1 + +# Classes +names: + 0: wheat_head + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + from ultralytics.yolo.utils.downloads import download + from pathlib import Path + + # Download + dir = Path(yaml['path']) # dataset root dir + urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip'] + download(urls, dir=dir) + + # Make Directories + for p in 'annotations', 'images', 'labels': + (dir / p).mkdir(parents=True, exist_ok=True) + + # Move + for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \ + 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1': + (dir / p).rename(dir / 'images' / p) # move to /images + f = (dir / p).with_suffix('.json') # json file + if f.exists(): + f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations diff --git a/ultralytics/datasets/ImageNet.yaml b/ultralytics/datasets/ImageNet.yaml new file mode 100644 index 0000000..c1aa155 --- /dev/null +++ b/ultralytics/datasets/ImageNet.yaml @@ -0,0 +1,2025 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University +# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels +# Example usage: yolo train task=classify data=imagenet +# parent +# ├── ultralytics +# └── datasets +# └── imagenet ← downloads here (144 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/imagenet # dataset root dir +train: train # train images (relative to 'path') 1281167 images +val: val # val images (relative to 'path') 50000 images +test: # test images (optional) + +# Classes +names: + 0: tench + 1: goldfish + 2: great white shark + 3: tiger shark + 4: hammerhead shark + 5: electric ray + 6: stingray + 7: cock + 8: hen + 9: ostrich + 10: brambling + 11: goldfinch + 12: house finch + 13: junco + 14: indigo bunting + 15: American robin + 16: bulbul + 17: jay + 18: magpie + 19: chickadee + 20: American dipper + 21: kite + 22: bald eagle + 23: vulture + 24: great grey owl + 25: fire salamander + 26: smooth newt + 27: newt + 28: spotted salamander + 29: axolotl + 30: American bullfrog + 31: tree frog + 32: tailed frog + 33: loggerhead sea turtle + 34: leatherback sea turtle + 35: mud turtle + 36: terrapin + 37: box turtle + 38: banded gecko + 39: green iguana + 40: Carolina anole + 41: desert grassland whiptail lizard + 42: agama + 43: frilled-necked lizard + 44: alligator lizard + 45: Gila monster + 46: European green lizard + 47: chameleon + 48: Komodo dragon + 49: Nile crocodile + 50: American alligator + 51: triceratops + 52: worm snake + 53: ring-necked snake + 54: eastern hog-nosed snake + 55: smooth green snake + 56: kingsnake + 57: garter snake + 58: water snake + 59: vine snake + 60: night snake + 61: boa constrictor + 62: African rock python + 63: Indian cobra + 64: green mamba + 65: sea snake + 66: Saharan horned viper + 67: eastern diamondback rattlesnake + 68: sidewinder + 69: trilobite + 70: harvestman + 71: scorpion + 72: yellow garden spider + 73: barn spider + 74: European garden spider + 75: southern black widow + 76: tarantula + 77: wolf spider + 78: tick + 79: centipede + 80: black grouse + 81: ptarmigan + 82: ruffed grouse + 83: prairie grouse + 84: peacock + 85: quail + 86: partridge + 87: grey parrot + 88: macaw + 89: sulphur-crested cockatoo + 90: lorikeet + 91: coucal + 92: bee eater + 93: hornbill + 94: hummingbird + 95: jacamar + 96: toucan + 97: duck + 98: red-breasted merganser + 99: goose + 100: black swan + 101: tusker + 102: echidna + 103: platypus + 104: wallaby + 105: koala + 106: wombat + 107: jellyfish + 108: sea anemone + 109: brain coral + 110: flatworm + 111: nematode + 112: conch + 113: snail + 114: slug + 115: sea slug + 116: chiton + 117: chambered nautilus + 118: Dungeness crab + 119: rock crab + 120: fiddler crab + 121: red king crab + 122: American lobster + 123: spiny lobster + 124: crayfish + 125: hermit crab + 126: isopod + 127: white stork + 128: black stork + 129: spoonbill + 130: flamingo + 131: little blue heron + 132: great egret + 133: bittern + 134: crane (bird) + 135: limpkin + 136: common gallinule + 137: American coot + 138: bustard + 139: ruddy turnstone + 140: dunlin + 141: common redshank + 142: dowitcher + 143: oystercatcher + 144: pelican + 145: king penguin + 146: albatross + 147: grey whale + 148: killer whale + 149: dugong + 150: sea lion + 151: Chihuahua + 152: Japanese Chin + 153: Maltese + 154: Pekingese + 155: Shih Tzu + 156: King Charles Spaniel + 157: Papillon + 158: toy terrier + 159: Rhodesian Ridgeback + 160: Afghan Hound + 161: Basset Hound + 162: Beagle + 163: Bloodhound + 164: Bluetick Coonhound + 165: Black and Tan Coonhound + 166: Treeing Walker Coonhound + 167: English foxhound + 168: Redbone Coonhound + 169: borzoi + 170: Irish Wolfhound + 171: Italian Greyhound + 172: Whippet + 173: Ibizan Hound + 174: Norwegian Elkhound + 175: Otterhound + 176: Saluki + 177: Scottish Deerhound + 178: Weimaraner + 179: Staffordshire Bull Terrier + 180: American Staffordshire Terrier + 181: Bedlington Terrier + 182: Border Terrier + 183: Kerry Blue Terrier + 184: Irish Terrier + 185: Norfolk Terrier + 186: Norwich Terrier + 187: Yorkshire Terrier + 188: Wire Fox Terrier + 189: Lakeland Terrier + 190: Sealyham Terrier + 191: Airedale Terrier + 192: Cairn Terrier + 193: Australian Terrier + 194: Dandie Dinmont Terrier + 195: Boston Terrier + 196: Miniature Schnauzer + 197: Giant Schnauzer + 198: Standard Schnauzer + 199: Scottish Terrier + 200: Tibetan Terrier + 201: Australian Silky Terrier + 202: Soft-coated Wheaten Terrier + 203: West Highland White Terrier + 204: Lhasa Apso + 205: Flat-Coated Retriever + 206: Curly-coated Retriever + 207: Golden Retriever + 208: Labrador Retriever + 209: Chesapeake Bay Retriever + 210: German Shorthaired Pointer + 211: Vizsla + 212: English Setter + 213: Irish Setter + 214: Gordon Setter + 215: Brittany + 216: Clumber Spaniel + 217: English Springer Spaniel + 218: Welsh Springer Spaniel + 219: Cocker Spaniels + 220: Sussex Spaniel + 221: Irish Water Spaniel + 222: Kuvasz + 223: Schipperke + 224: Groenendael + 225: Malinois + 226: Briard + 227: Australian Kelpie + 228: Komondor + 229: Old English Sheepdog + 230: Shetland Sheepdog + 231: collie + 232: Border Collie + 233: Bouvier des Flandres + 234: Rottweiler + 235: German Shepherd Dog + 236: Dobermann + 237: Miniature Pinscher + 238: Greater Swiss Mountain Dog + 239: Bernese Mountain Dog + 240: Appenzeller Sennenhund + 241: Entlebucher Sennenhund + 242: Boxer + 243: Bullmastiff + 244: Tibetan Mastiff + 245: French Bulldog + 246: Great Dane + 247: St. Bernard + 248: husky + 249: Alaskan Malamute + 250: Siberian Husky + 251: Dalmatian + 252: Affenpinscher + 253: Basenji + 254: pug + 255: Leonberger + 256: Newfoundland + 257: Pyrenean Mountain Dog + 258: Samoyed + 259: Pomeranian + 260: Chow Chow + 261: Keeshond + 262: Griffon Bruxellois + 263: Pembroke Welsh Corgi + 264: Cardigan Welsh Corgi + 265: Toy Poodle + 266: Miniature Poodle + 267: Standard Poodle + 268: Mexican hairless dog + 269: grey wolf + 270: Alaskan tundra wolf + 271: red wolf + 272: coyote + 273: dingo + 274: dhole + 275: African wild dog + 276: hyena + 277: red fox + 278: kit fox + 279: Arctic fox + 280: grey fox + 281: tabby cat + 282: tiger cat + 283: Persian cat + 284: Siamese cat + 285: Egyptian Mau + 286: cougar + 287: lynx + 288: leopard + 289: snow leopard + 290: jaguar + 291: lion + 292: tiger + 293: cheetah + 294: brown bear + 295: American black bear + 296: polar bear + 297: sloth bear + 298: mongoose + 299: meerkat + 300: tiger beetle + 301: ladybug + 302: ground beetle + 303: longhorn beetle + 304: leaf beetle + 305: dung beetle + 306: rhinoceros beetle + 307: weevil + 308: fly + 309: bee + 310: ant + 311: grasshopper + 312: cricket + 313: stick insect + 314: cockroach + 315: mantis + 316: cicada + 317: leafhopper + 318: lacewing + 319: dragonfly + 320: damselfly + 321: red admiral + 322: ringlet + 323: monarch butterfly + 324: small white + 325: sulphur butterfly + 326: gossamer-winged butterfly + 327: starfish + 328: sea urchin + 329: sea cucumber + 330: cottontail rabbit + 331: hare + 332: Angora rabbit + 333: hamster + 334: porcupine + 335: fox squirrel + 336: marmot + 337: beaver + 338: guinea pig + 339: common sorrel + 340: zebra + 341: pig + 342: wild boar + 343: warthog + 344: hippopotamus + 345: ox + 346: water buffalo + 347: bison + 348: ram + 349: bighorn sheep + 350: Alpine ibex + 351: hartebeest + 352: impala + 353: gazelle + 354: dromedary + 355: llama + 356: weasel + 357: mink + 358: European polecat + 359: black-footed ferret + 360: otter + 361: skunk + 362: badger + 363: armadillo + 364: three-toed sloth + 365: orangutan + 366: gorilla + 367: chimpanzee + 368: gibbon + 369: siamang + 370: guenon + 371: patas monkey + 372: baboon + 373: macaque + 374: langur + 375: black-and-white colobus + 376: proboscis monkey + 377: marmoset + 378: white-headed capuchin + 379: howler monkey + 380: titi + 381: Geoffroy's spider monkey + 382: common squirrel monkey + 383: ring-tailed lemur + 384: indri + 385: Asian elephant + 386: African bush elephant + 387: red panda + 388: giant panda + 389: snoek + 390: eel + 391: coho salmon + 392: rock beauty + 393: clownfish + 394: sturgeon + 395: garfish + 396: lionfish + 397: pufferfish + 398: abacus + 399: abaya + 400: academic gown + 401: accordion + 402: acoustic guitar + 403: aircraft carrier + 404: airliner + 405: airship + 406: altar + 407: ambulance + 408: amphibious vehicle + 409: analog clock + 410: apiary + 411: apron + 412: waste container + 413: assault rifle + 414: backpack + 415: bakery + 416: balance beam + 417: balloon + 418: ballpoint pen + 419: Band-Aid + 420: banjo + 421: baluster + 422: barbell + 423: barber chair + 424: barbershop + 425: barn + 426: barometer + 427: barrel + 428: wheelbarrow + 429: baseball + 430: basketball + 431: bassinet + 432: bassoon + 433: swimming cap + 434: bath towel + 435: bathtub + 436: station wagon + 437: lighthouse + 438: beaker + 439: military cap + 440: beer bottle + 441: beer glass + 442: bell-cot + 443: bib + 444: tandem bicycle + 445: bikini + 446: ring binder + 447: binoculars + 448: birdhouse + 449: boathouse + 450: bobsleigh + 451: bolo tie + 452: poke bonnet + 453: bookcase + 454: bookstore + 455: bottle cap + 456: bow + 457: bow tie + 458: brass + 459: bra + 460: breakwater + 461: breastplate + 462: broom + 463: bucket + 464: buckle + 465: bulletproof vest + 466: high-speed train + 467: butcher shop + 468: taxicab + 469: cauldron + 470: candle + 471: cannon + 472: canoe + 473: can opener + 474: cardigan + 475: car mirror + 476: carousel + 477: tool kit + 478: carton + 479: car wheel + 480: automated teller machine + 481: cassette + 482: cassette player + 483: castle + 484: catamaran + 485: CD player + 486: cello + 487: mobile phone + 488: chain + 489: chain-link fence + 490: chain mail + 491: chainsaw + 492: chest + 493: chiffonier + 494: chime + 495: china cabinet + 496: Christmas stocking + 497: church + 498: movie theater + 499: cleaver + 500: cliff dwelling + 501: cloak + 502: clogs + 503: cocktail shaker + 504: coffee mug + 505: coffeemaker + 506: coil + 507: combination lock + 508: computer keyboard + 509: confectionery store + 510: container ship + 511: convertible + 512: corkscrew + 513: cornet + 514: cowboy boot + 515: cowboy hat + 516: cradle + 517: crane (machine) + 518: crash helmet + 519: crate + 520: infant bed + 521: Crock Pot + 522: croquet ball + 523: crutch + 524: cuirass + 525: dam + 526: desk + 527: desktop computer + 528: rotary dial telephone + 529: diaper + 530: digital clock + 531: digital watch + 532: dining table + 533: dishcloth + 534: dishwasher + 535: disc brake + 536: dock + 537: dog sled + 538: dome + 539: doormat + 540: drilling rig + 541: drum + 542: drumstick + 543: dumbbell + 544: Dutch oven + 545: electric fan + 546: electric guitar + 547: electric locomotive + 548: entertainment center + 549: envelope + 550: espresso machine + 551: face powder + 552: feather boa + 553: filing cabinet + 554: fireboat + 555: fire engine + 556: fire screen sheet + 557: flagpole + 558: flute + 559: folding chair + 560: football helmet + 561: forklift + 562: fountain + 563: fountain pen + 564: four-poster bed + 565: freight car + 566: French horn + 567: frying pan + 568: fur coat + 569: garbage truck + 570: gas mask + 571: gas pump + 572: goblet + 573: go-kart + 574: golf ball + 575: golf cart + 576: gondola + 577: gong + 578: gown + 579: grand piano + 580: greenhouse + 581: grille + 582: grocery store + 583: guillotine + 584: barrette + 585: hair spray + 586: half-track + 587: hammer + 588: hamper + 589: hair dryer + 590: hand-held computer + 591: handkerchief + 592: hard disk drive + 593: harmonica + 594: harp + 595: harvester + 596: hatchet + 597: holster + 598: home theater + 599: honeycomb + 600: hook + 601: hoop skirt + 602: horizontal bar + 603: horse-drawn vehicle + 604: hourglass + 605: iPod + 606: clothes iron + 607: jack-o'-lantern + 608: jeans + 609: jeep + 610: T-shirt + 611: jigsaw puzzle + 612: pulled rickshaw + 613: joystick + 614: kimono + 615: knee pad + 616: knot + 617: lab coat + 618: ladle + 619: lampshade + 620: laptop computer + 621: lawn mower + 622: lens cap + 623: paper knife + 624: library + 625: lifeboat + 626: lighter + 627: limousine + 628: ocean liner + 629: lipstick + 630: slip-on shoe + 631: lotion + 632: speaker + 633: loupe + 634: sawmill + 635: magnetic compass + 636: mail bag + 637: mailbox + 638: tights + 639: tank suit + 640: manhole cover + 641: maraca + 642: marimba + 643: mask + 644: match + 645: maypole + 646: maze + 647: measuring cup + 648: medicine chest + 649: megalith + 650: microphone + 651: microwave oven + 652: military uniform + 653: milk can + 654: minibus + 655: miniskirt + 656: minivan + 657: missile + 658: mitten + 659: mixing bowl + 660: mobile home + 661: Model T + 662: modem + 663: monastery + 664: monitor + 665: moped + 666: mortar + 667: square academic cap + 668: mosque + 669: mosquito net + 670: scooter + 671: mountain bike + 672: tent + 673: computer mouse + 674: mousetrap + 675: moving van + 676: muzzle + 677: nail + 678: neck brace + 679: necklace + 680: nipple + 681: notebook computer + 682: obelisk + 683: oboe + 684: ocarina + 685: odometer + 686: oil filter + 687: organ + 688: oscilloscope + 689: overskirt + 690: bullock cart + 691: oxygen mask + 692: packet + 693: paddle + 694: paddle wheel + 695: padlock + 696: paintbrush + 697: pajamas + 698: palace + 699: pan flute + 700: paper towel + 701: parachute + 702: parallel bars + 703: park bench + 704: parking meter + 705: passenger car + 706: patio + 707: payphone + 708: pedestal + 709: pencil case + 710: pencil sharpener + 711: perfume + 712: Petri dish + 713: photocopier + 714: plectrum + 715: Pickelhaube + 716: picket fence + 717: pickup truck + 718: pier + 719: piggy bank + 720: pill bottle + 721: pillow + 722: ping-pong ball + 723: pinwheel + 724: pirate ship + 725: pitcher + 726: hand plane + 727: planetarium + 728: plastic bag + 729: plate rack + 730: plow + 731: plunger + 732: Polaroid camera + 733: pole + 734: police van + 735: poncho + 736: billiard table + 737: soda bottle + 738: pot + 739: potter's wheel + 740: power drill + 741: prayer rug + 742: printer + 743: prison + 744: projectile + 745: projector + 746: hockey puck + 747: punching bag + 748: purse + 749: quill + 750: quilt + 751: race car + 752: racket + 753: radiator + 754: radio + 755: radio telescope + 756: rain barrel + 757: recreational vehicle + 758: reel + 759: reflex camera + 760: refrigerator + 761: remote control + 762: restaurant + 763: revolver + 764: rifle + 765: rocking chair + 766: rotisserie + 767: eraser + 768: rugby ball + 769: ruler + 770: running shoe + 771: safe + 772: safety pin + 773: salt shaker + 774: sandal + 775: sarong + 776: saxophone + 777: scabbard + 778: weighing scale + 779: school bus + 780: schooner + 781: scoreboard + 782: CRT screen + 783: screw + 784: screwdriver + 785: seat belt + 786: sewing machine + 787: shield + 788: shoe store + 789: shoji + 790: shopping basket + 791: shopping cart + 792: shovel + 793: shower cap + 794: shower curtain + 795: ski + 796: ski mask + 797: sleeping bag + 798: slide rule + 799: sliding door + 800: slot machine + 801: snorkel + 802: snowmobile + 803: snowplow + 804: soap dispenser + 805: soccer ball + 806: sock + 807: solar thermal collector + 808: sombrero + 809: soup bowl + 810: space bar + 811: space heater + 812: space shuttle + 813: spatula + 814: motorboat + 815: spider web + 816: spindle + 817: sports car + 818: spotlight + 819: stage + 820: steam locomotive + 821: through arch bridge + 822: steel drum + 823: stethoscope + 824: scarf + 825: stone wall + 826: stopwatch + 827: stove + 828: strainer + 829: tram + 830: stretcher + 831: couch + 832: stupa + 833: submarine + 834: suit + 835: sundial + 836: sunglass + 837: sunglasses + 838: sunscreen + 839: suspension bridge + 840: mop + 841: sweatshirt + 842: swimsuit + 843: swing + 844: switch + 845: syringe + 846: table lamp + 847: tank + 848: tape player + 849: teapot + 850: teddy bear + 851: television + 852: tennis ball + 853: thatched roof + 854: front curtain + 855: thimble + 856: threshing machine + 857: throne + 858: tile roof + 859: toaster + 860: tobacco shop + 861: toilet seat + 862: torch + 863: totem pole + 864: tow truck + 865: toy store + 866: tractor + 867: semi-trailer truck + 868: tray + 869: trench coat + 870: tricycle + 871: trimaran + 872: tripod + 873: triumphal arch + 874: trolleybus + 875: trombone + 876: tub + 877: turnstile + 878: typewriter keyboard + 879: umbrella + 880: unicycle + 881: upright piano + 882: vacuum cleaner + 883: vase + 884: vault + 885: velvet + 886: vending machine + 887: vestment + 888: viaduct + 889: violin + 890: volleyball + 891: waffle iron + 892: wall clock + 893: wallet + 894: wardrobe + 895: military aircraft + 896: sink + 897: washing machine + 898: water bottle + 899: water jug + 900: water tower + 901: whiskey jug + 902: whistle + 903: wig + 904: window screen + 905: window shade + 906: Windsor tie + 907: wine bottle + 908: wing + 909: wok + 910: wooden spoon + 911: wool + 912: split-rail fence + 913: shipwreck + 914: yawl + 915: yurt + 916: website + 917: comic book + 918: crossword + 919: traffic sign + 920: traffic light + 921: dust jacket + 922: menu + 923: plate + 924: guacamole + 925: consomme + 926: hot pot + 927: trifle + 928: ice cream + 929: ice pop + 930: baguette + 931: bagel + 932: pretzel + 933: cheeseburger + 934: hot dog + 935: mashed potato + 936: cabbage + 937: broccoli + 938: cauliflower + 939: zucchini + 940: spaghetti squash + 941: acorn squash + 942: butternut squash + 943: cucumber + 944: artichoke + 945: bell pepper + 946: cardoon + 947: mushroom + 948: Granny Smith + 949: strawberry + 950: orange + 951: lemon + 952: fig + 953: pineapple + 954: banana + 955: jackfruit + 956: custard apple + 957: pomegranate + 958: hay + 959: carbonara + 960: chocolate syrup + 961: dough + 962: meatloaf + 963: pizza + 964: pot pie + 965: burrito + 966: red wine + 967: espresso + 968: cup + 969: eggnog + 970: alp + 971: bubble + 972: cliff + 973: coral reef + 974: geyser + 975: lakeshore + 976: promontory + 977: shoal + 978: seashore + 979: valley + 980: volcano + 981: baseball player + 982: bridegroom + 983: scuba diver + 984: rapeseed + 985: daisy + 986: yellow lady's slipper + 987: corn + 988: acorn + 989: rose hip + 990: horse chestnut seed + 991: coral fungus + 992: agaric + 993: gyromitra + 994: stinkhorn mushroom + 995: earth star + 996: hen-of-the-woods + 997: bolete + 998: ear + 999: toilet paper + +# Imagenet class codes to human-readable names +map: + n01440764: tench + n01443537: goldfish + n01484850: great_white_shark + n01491361: tiger_shark + n01494475: hammerhead + n01496331: electric_ray + n01498041: stingray + n01514668: cock + n01514859: hen + n01518878: ostrich + n01530575: brambling + n01531178: goldfinch + n01532829: house_finch + n01534433: junco + n01537544: indigo_bunting + n01558993: robin + n01560419: bulbul + n01580077: jay + n01582220: magpie + n01592084: chickadee + n01601694: water_ouzel + n01608432: kite + n01614925: bald_eagle + n01616318: vulture + n01622779: great_grey_owl + n01629819: European_fire_salamander + n01630670: common_newt + n01631663: eft + n01632458: spotted_salamander + n01632777: axolotl + n01641577: bullfrog + n01644373: tree_frog + n01644900: tailed_frog + n01664065: loggerhead + n01665541: leatherback_turtle + n01667114: mud_turtle + n01667778: terrapin + n01669191: box_turtle + n01675722: banded_gecko + n01677366: common_iguana + n01682714: American_chameleon + n01685808: whiptail + n01687978: agama + n01688243: frilled_lizard + n01689811: alligator_lizard + n01692333: Gila_monster + n01693334: green_lizard + n01694178: African_chameleon + n01695060: Komodo_dragon + n01697457: African_crocodile + n01698640: American_alligator + n01704323: triceratops + n01728572: thunder_snake + n01728920: ringneck_snake + n01729322: hognose_snake + n01729977: green_snake + n01734418: king_snake + n01735189: garter_snake + n01737021: water_snake + n01739381: vine_snake + n01740131: night_snake + n01742172: boa_constrictor + n01744401: rock_python + n01748264: Indian_cobra + n01749939: green_mamba + n01751748: sea_snake + n01753488: horned_viper + n01755581: diamondback + n01756291: sidewinder + n01768244: trilobite + n01770081: harvestman + n01770393: scorpion + n01773157: black_and_gold_garden_spider + n01773549: barn_spider + n01773797: garden_spider + n01774384: black_widow + n01774750: tarantula + n01775062: wolf_spider + n01776313: tick + n01784675: centipede + n01795545: black_grouse + n01796340: ptarmigan + n01797886: ruffed_grouse + n01798484: prairie_chicken + n01806143: peacock + n01806567: quail + n01807496: partridge + n01817953: African_grey + n01818515: macaw + n01819313: sulphur-crested_cockatoo + n01820546: lorikeet + n01824575: coucal + n01828970: bee_eater + n01829413: hornbill + n01833805: hummingbird + n01843065: jacamar + n01843383: toucan + n01847000: drake + n01855032: red-breasted_merganser + n01855672: goose + n01860187: black_swan + n01871265: tusker + n01872401: echidna + n01873310: platypus + n01877812: wallaby + n01882714: koala + n01883070: wombat + n01910747: jellyfish + n01914609: sea_anemone + n01917289: brain_coral + n01924916: flatworm + n01930112: nematode + n01943899: conch + n01944390: snail + n01945685: slug + n01950731: sea_slug + n01955084: chiton + n01968897: chambered_nautilus + n01978287: Dungeness_crab + n01978455: rock_crab + n01980166: fiddler_crab + n01981276: king_crab + n01983481: American_lobster + n01984695: spiny_lobster + n01985128: crayfish + n01986214: hermit_crab + n01990800: isopod + n02002556: white_stork + n02002724: black_stork + n02006656: spoonbill + n02007558: flamingo + n02009229: little_blue_heron + n02009912: American_egret + n02011460: bittern + n02012849: crane_(bird) + n02013706: limpkin + n02017213: European_gallinule + n02018207: American_coot + n02018795: bustard + n02025239: ruddy_turnstone + n02027492: red-backed_sandpiper + n02028035: redshank + n02033041: dowitcher + n02037110: oystercatcher + n02051845: pelican + n02056570: king_penguin + n02058221: albatross + n02066245: grey_whale + n02071294: killer_whale + n02074367: dugong + n02077923: sea_lion + n02085620: Chihuahua + n02085782: Japanese_spaniel + n02085936: Maltese_dog + n02086079: Pekinese + n02086240: Shih-Tzu + n02086646: Blenheim_spaniel + n02086910: papillon + n02087046: toy_terrier + n02087394: Rhodesian_ridgeback + n02088094: Afghan_hound + n02088238: basset + n02088364: beagle + n02088466: bloodhound + n02088632: bluetick + n02089078: black-and-tan_coonhound + n02089867: Walker_hound + n02089973: English_foxhound + n02090379: redbone + n02090622: borzoi + n02090721: Irish_wolfhound + n02091032: Italian_greyhound + n02091134: whippet + n02091244: Ibizan_hound + n02091467: Norwegian_elkhound + n02091635: otterhound + n02091831: Saluki + n02092002: Scottish_deerhound + n02092339: Weimaraner + n02093256: Staffordshire_bullterrier + n02093428: American_Staffordshire_terrier + n02093647: Bedlington_terrier + n02093754: Border_terrier + n02093859: Kerry_blue_terrier + n02093991: Irish_terrier + n02094114: Norfolk_terrier + n02094258: Norwich_terrier + n02094433: Yorkshire_terrier + n02095314: wire-haired_fox_terrier + n02095570: Lakeland_terrier + n02095889: Sealyham_terrier + n02096051: Airedale + n02096177: cairn + n02096294: Australian_terrier + n02096437: Dandie_Dinmont + n02096585: Boston_bull + n02097047: miniature_schnauzer + n02097130: giant_schnauzer + n02097209: standard_schnauzer + n02097298: Scotch_terrier + n02097474: Tibetan_terrier + n02097658: silky_terrier + n02098105: soft-coated_wheaten_terrier + n02098286: West_Highland_white_terrier + n02098413: Lhasa + n02099267: flat-coated_retriever + n02099429: curly-coated_retriever + n02099601: golden_retriever + n02099712: Labrador_retriever + n02099849: Chesapeake_Bay_retriever + n02100236: German_short-haired_pointer + n02100583: vizsla + n02100735: English_setter + n02100877: Irish_setter + n02101006: Gordon_setter + n02101388: Brittany_spaniel + n02101556: clumber + n02102040: English_springer + n02102177: Welsh_springer_spaniel + n02102318: cocker_spaniel + n02102480: Sussex_spaniel + n02102973: Irish_water_spaniel + n02104029: kuvasz + n02104365: schipperke + n02105056: groenendael + n02105162: malinois + n02105251: briard + n02105412: kelpie + n02105505: komondor + n02105641: Old_English_sheepdog + n02105855: Shetland_sheepdog + n02106030: collie + n02106166: Border_collie + n02106382: Bouvier_des_Flandres + n02106550: Rottweiler + n02106662: German_shepherd + n02107142: Doberman + n02107312: miniature_pinscher + n02107574: Greater_Swiss_Mountain_dog + n02107683: Bernese_mountain_dog + n02107908: Appenzeller + n02108000: EntleBucher + n02108089: boxer + n02108422: bull_mastiff + n02108551: Tibetan_mastiff + n02108915: French_bulldog + n02109047: Great_Dane + n02109525: Saint_Bernard + n02109961: Eskimo_dog + n02110063: malamute + n02110185: Siberian_husky + n02110341: dalmatian + n02110627: affenpinscher + n02110806: basenji + n02110958: pug + n02111129: Leonberg + n02111277: Newfoundland + n02111500: Great_Pyrenees + n02111889: Samoyed + n02112018: Pomeranian + n02112137: chow + n02112350: keeshond + n02112706: Brabancon_griffon + n02113023: Pembroke + n02113186: Cardigan + n02113624: toy_poodle + n02113712: miniature_poodle + n02113799: standard_poodle + n02113978: Mexican_hairless + n02114367: timber_wolf + n02114548: white_wolf + n02114712: red_wolf + n02114855: coyote + n02115641: dingo + n02115913: dhole + n02116738: African_hunting_dog + n02117135: hyena + n02119022: red_fox + n02119789: kit_fox + n02120079: Arctic_fox + n02120505: grey_fox + n02123045: tabby + n02123159: tiger_cat + n02123394: Persian_cat + n02123597: Siamese_cat + n02124075: Egyptian_cat + n02125311: cougar + n02127052: lynx + n02128385: leopard + n02128757: snow_leopard + n02128925: jaguar + n02129165: lion + n02129604: tiger + n02130308: cheetah + n02132136: brown_bear + n02133161: American_black_bear + n02134084: ice_bear + n02134418: sloth_bear + n02137549: mongoose + n02138441: meerkat + n02165105: tiger_beetle + n02165456: ladybug + n02167151: ground_beetle + n02168699: long-horned_beetle + n02169497: leaf_beetle + n02172182: dung_beetle + n02174001: rhinoceros_beetle + n02177972: weevil + n02190166: fly + n02206856: bee + n02219486: ant + n02226429: grasshopper + n02229544: cricket + n02231487: walking_stick + n02233338: cockroach + n02236044: mantis + n02256656: cicada + n02259212: leafhopper + n02264363: lacewing + n02268443: dragonfly + n02268853: damselfly + n02276258: admiral + n02277742: ringlet + n02279972: monarch + n02280649: cabbage_butterfly + n02281406: sulphur_butterfly + n02281787: lycaenid + n02317335: starfish + n02319095: sea_urchin + n02321529: sea_cucumber + n02325366: wood_rabbit + n02326432: hare + n02328150: Angora + n02342885: hamster + n02346627: porcupine + n02356798: fox_squirrel + n02361337: marmot + n02363005: beaver + n02364673: guinea_pig + n02389026: sorrel + n02391049: zebra + n02395406: hog + n02396427: wild_boar + n02397096: warthog + n02398521: hippopotamus + n02403003: ox + n02408429: water_buffalo + n02410509: bison + n02412080: ram + n02415577: bighorn + n02417914: ibex + n02422106: hartebeest + n02422699: impala + n02423022: gazelle + n02437312: Arabian_camel + n02437616: llama + n02441942: weasel + n02442845: mink + n02443114: polecat + n02443484: black-footed_ferret + n02444819: otter + n02445715: skunk + n02447366: badger + n02454379: armadillo + n02457408: three-toed_sloth + n02480495: orangutan + n02480855: gorilla + n02481823: chimpanzee + n02483362: gibbon + n02483708: siamang + n02484975: guenon + n02486261: patas + n02486410: baboon + n02487347: macaque + n02488291: langur + n02488702: colobus + n02489166: proboscis_monkey + n02490219: marmoset + n02492035: capuchin + n02492660: howler_monkey + n02493509: titi + n02493793: spider_monkey + n02494079: squirrel_monkey + n02497673: Madagascar_cat + n02500267: indri + n02504013: Indian_elephant + n02504458: African_elephant + n02509815: lesser_panda + n02510455: giant_panda + n02514041: barracouta + n02526121: eel + n02536864: coho + n02606052: rock_beauty + n02607072: anemone_fish + n02640242: sturgeon + n02641379: gar + n02643566: lionfish + n02655020: puffer + n02666196: abacus + n02667093: abaya + n02669723: academic_gown + n02672831: accordion + n02676566: acoustic_guitar + n02687172: aircraft_carrier + n02690373: airliner + n02692877: airship + n02699494: altar + n02701002: ambulance + n02704792: amphibian + n02708093: analog_clock + n02727426: apiary + n02730930: apron + n02747177: ashcan + n02749479: assault_rifle + n02769748: backpack + n02776631: bakery + n02777292: balance_beam + n02782093: balloon + n02783161: ballpoint + n02786058: Band_Aid + n02787622: banjo + n02788148: bannister + n02790996: barbell + n02791124: barber_chair + n02791270: barbershop + n02793495: barn + n02794156: barometer + n02795169: barrel + n02797295: barrow + n02799071: baseball + n02802426: basketball + n02804414: bassinet + n02804610: bassoon + n02807133: bathing_cap + n02808304: bath_towel + n02808440: bathtub + n02814533: beach_wagon + n02814860: beacon + n02815834: beaker + n02817516: bearskin + n02823428: beer_bottle + n02823750: beer_glass + n02825657: bell_cote + n02834397: bib + n02835271: bicycle-built-for-two + n02837789: bikini + n02840245: binder + n02841315: binoculars + n02843684: birdhouse + n02859443: boathouse + n02860847: bobsled + n02865351: bolo_tie + n02869837: bonnet + n02870880: bookcase + n02871525: bookshop + n02877765: bottlecap + n02879718: bow + n02883205: bow_tie + n02892201: brass + n02892767: brassiere + n02894605: breakwater + n02895154: breastplate + n02906734: broom + n02909870: bucket + n02910353: buckle + n02916936: bulletproof_vest + n02917067: bullet_train + n02927161: butcher_shop + n02930766: cab + n02939185: caldron + n02948072: candle + n02950826: cannon + n02951358: canoe + n02951585: can_opener + n02963159: cardigan + n02965783: car_mirror + n02966193: carousel + n02966687: carpenter's_kit + n02971356: carton + n02974003: car_wheel + n02977058: cash_machine + n02978881: cassette + n02979186: cassette_player + n02980441: castle + n02981792: catamaran + n02988304: CD_player + n02992211: cello + n02992529: cellular_telephone + n02999410: chain + n03000134: chainlink_fence + n03000247: chain_mail + n03000684: chain_saw + n03014705: chest + n03016953: chiffonier + n03017168: chime + n03018349: china_cabinet + n03026506: Christmas_stocking + n03028079: church + n03032252: cinema + n03041632: cleaver + n03042490: cliff_dwelling + n03045698: cloak + n03047690: clog + n03062245: cocktail_shaker + n03063599: coffee_mug + n03063689: coffeepot + n03065424: coil + n03075370: combination_lock + n03085013: computer_keyboard + n03089624: confectionery + n03095699: container_ship + n03100240: convertible + n03109150: corkscrew + n03110669: cornet + n03124043: cowboy_boot + n03124170: cowboy_hat + n03125729: cradle + n03126707: crane_(machine) + n03127747: crash_helmet + n03127925: crate + n03131574: crib + n03133878: Crock_Pot + n03134739: croquet_ball + n03141823: crutch + n03146219: cuirass + n03160309: dam + n03179701: desk + n03180011: desktop_computer + n03187595: dial_telephone + n03188531: diaper + n03196217: digital_clock + n03197337: digital_watch + n03201208: dining_table + n03207743: dishrag + n03207941: dishwasher + n03208938: disk_brake + n03216828: dock + n03218198: dogsled + n03220513: dome + n03223299: doormat + n03240683: drilling_platform + n03249569: drum + n03250847: drumstick + n03255030: dumbbell + n03259280: Dutch_oven + n03271574: electric_fan + n03272010: electric_guitar + n03272562: electric_locomotive + n03290653: entertainment_center + n03291819: envelope + n03297495: espresso_maker + n03314780: face_powder + n03325584: feather_boa + n03337140: file + n03344393: fireboat + n03345487: fire_engine + n03347037: fire_screen + n03355925: flagpole + n03372029: flute + n03376595: folding_chair + n03379051: football_helmet + n03384352: forklift + n03388043: fountain + n03388183: fountain_pen + n03388549: four-poster + n03393912: freight_car + n03394916: French_horn + n03400231: frying_pan + n03404251: fur_coat + n03417042: garbage_truck + n03424325: gasmask + n03425413: gas_pump + n03443371: goblet + n03444034: go-kart + n03445777: golf_ball + n03445924: golfcart + n03447447: gondola + n03447721: gong + n03450230: gown + n03452741: grand_piano + n03457902: greenhouse + n03459775: grille + n03461385: grocery_store + n03467068: guillotine + n03476684: hair_slide + n03476991: hair_spray + n03478589: half_track + n03481172: hammer + n03482405: hamper + n03483316: hand_blower + n03485407: hand-held_computer + n03485794: handkerchief + n03492542: hard_disc + n03494278: harmonica + n03495258: harp + n03496892: harvester + n03498962: hatchet + n03527444: holster + n03529860: home_theater + n03530642: honeycomb + n03532672: hook + n03534580: hoopskirt + n03535780: horizontal_bar + n03538406: horse_cart + n03544143: hourglass + n03584254: iPod + n03584829: iron + n03590841: jack-o'-lantern + n03594734: jean + n03594945: jeep + n03595614: jersey + n03598930: jigsaw_puzzle + n03599486: jinrikisha + n03602883: joystick + n03617480: kimono + n03623198: knee_pad + n03627232: knot + n03630383: lab_coat + n03633091: ladle + n03637318: lampshade + n03642806: laptop + n03649909: lawn_mower + n03657121: lens_cap + n03658185: letter_opener + n03661043: library + n03662601: lifeboat + n03666591: lighter + n03670208: limousine + n03673027: liner + n03676483: lipstick + n03680355: Loafer + n03690938: lotion + n03691459: loudspeaker + n03692522: loupe + n03697007: lumbermill + n03706229: magnetic_compass + n03709823: mailbag + n03710193: mailbox + n03710637: maillot_(tights) + n03710721: maillot_(tank_suit) + n03717622: manhole_cover + n03720891: maraca + n03721384: marimba + n03724870: mask + n03729826: matchstick + n03733131: maypole + n03733281: maze + n03733805: measuring_cup + n03742115: medicine_chest + n03743016: megalith + n03759954: microphone + n03761084: microwave + n03763968: military_uniform + n03764736: milk_can + n03769881: minibus + n03770439: miniskirt + n03770679: minivan + n03773504: missile + n03775071: mitten + n03775546: mixing_bowl + n03776460: mobile_home + n03777568: Model_T + n03777754: modem + n03781244: monastery + n03782006: monitor + n03785016: moped + n03786901: mortar + n03787032: mortarboard + n03788195: mosque + n03788365: mosquito_net + n03791053: motor_scooter + n03792782: mountain_bike + n03792972: mountain_tent + n03793489: mouse + n03794056: mousetrap + n03796401: moving_van + n03803284: muzzle + n03804744: nail + n03814639: neck_brace + n03814906: necklace + n03825788: nipple + n03832673: notebook + n03837869: obelisk + n03838899: oboe + n03840681: ocarina + n03841143: odometer + n03843555: oil_filter + n03854065: organ + n03857828: oscilloscope + n03866082: overskirt + n03868242: oxcart + n03868863: oxygen_mask + n03871628: packet + n03873416: paddle + n03874293: paddlewheel + n03874599: padlock + n03876231: paintbrush + n03877472: pajama + n03877845: palace + n03884397: panpipe + n03887697: paper_towel + n03888257: parachute + n03888605: parallel_bars + n03891251: park_bench + n03891332: parking_meter + n03895866: passenger_car + n03899768: patio + n03902125: pay-phone + n03903868: pedestal + n03908618: pencil_box + n03908714: pencil_sharpener + n03916031: perfume + n03920288: Petri_dish + n03924679: photocopier + n03929660: pick + n03929855: pickelhaube + n03930313: picket_fence + n03930630: pickup + n03933933: pier + n03935335: piggy_bank + n03937543: pill_bottle + n03938244: pillow + n03942813: ping-pong_ball + n03944341: pinwheel + n03947888: pirate + n03950228: pitcher + n03954731: plane + n03956157: planetarium + n03958227: plastic_bag + n03961711: plate_rack + n03967562: plow + n03970156: plunger + n03976467: Polaroid_camera + n03976657: pole + n03977966: police_van + n03980874: poncho + n03982430: pool_table + n03983396: pop_bottle + n03991062: pot + n03992509: potter's_wheel + n03995372: power_drill + n03998194: prayer_rug + n04004767: printer + n04005630: prison + n04008634: projectile + n04009552: projector + n04019541: puck + n04023962: punching_bag + n04026417: purse + n04033901: quill + n04033995: quilt + n04037443: racer + n04039381: racket + n04040759: radiator + n04041544: radio + n04044716: radio_telescope + n04049303: rain_barrel + n04065272: recreational_vehicle + n04067472: reel + n04069434: reflex_camera + n04070727: refrigerator + n04074963: remote_control + n04081281: restaurant + n04086273: revolver + n04090263: rifle + n04099969: rocking_chair + n04111531: rotisserie + n04116512: rubber_eraser + n04118538: rugby_ball + n04118776: rule + n04120489: running_shoe + n04125021: safe + n04127249: safety_pin + n04131690: saltshaker + n04133789: sandal + n04136333: sarong + n04141076: sax + n04141327: scabbard + n04141975: scale + n04146614: school_bus + n04147183: schooner + n04149813: scoreboard + n04152593: screen + n04153751: screw + n04154565: screwdriver + n04162706: seat_belt + n04179913: sewing_machine + n04192698: shield + n04200800: shoe_shop + n04201297: shoji + n04204238: shopping_basket + n04204347: shopping_cart + n04208210: shovel + n04209133: shower_cap + n04209239: shower_curtain + n04228054: ski + n04229816: ski_mask + n04235860: sleeping_bag + n04238763: slide_rule + n04239074: sliding_door + n04243546: slot + n04251144: snorkel + n04252077: snowmobile + n04252225: snowplow + n04254120: soap_dispenser + n04254680: soccer_ball + n04254777: sock + n04258138: solar_dish + n04259630: sombrero + n04263257: soup_bowl + n04264628: space_bar + n04265275: space_heater + n04266014: space_shuttle + n04270147: spatula + n04273569: speedboat + n04275548: spider_web + n04277352: spindle + n04285008: sports_car + n04286575: spotlight + n04296562: stage + n04310018: steam_locomotive + n04311004: steel_arch_bridge + n04311174: steel_drum + n04317175: stethoscope + n04325704: stole + n04326547: stone_wall + n04328186: stopwatch + n04330267: stove + n04332243: strainer + n04335435: streetcar + n04336792: stretcher + n04344873: studio_couch + n04346328: stupa + n04347754: submarine + n04350905: suit + n04355338: sundial + n04355933: sunglass + n04356056: sunglasses + n04357314: sunscreen + n04366367: suspension_bridge + n04367480: swab + n04370456: sweatshirt + n04371430: swimming_trunks + n04371774: swing + n04372370: switch + n04376876: syringe + n04380533: table_lamp + n04389033: tank + n04392985: tape_player + n04398044: teapot + n04399382: teddy + n04404412: television + n04409515: tennis_ball + n04417672: thatch + n04418357: theater_curtain + n04423845: thimble + n04428191: thresher + n04429376: throne + n04435653: tile_roof + n04442312: toaster + n04443257: tobacco_shop + n04447861: toilet_seat + n04456115: torch + n04458633: totem_pole + n04461696: tow_truck + n04462240: toyshop + n04465501: tractor + n04467665: trailer_truck + n04476259: tray + n04479046: trench_coat + n04482393: tricycle + n04483307: trimaran + n04485082: tripod + n04486054: triumphal_arch + n04487081: trolleybus + n04487394: trombone + n04493381: tub + n04501370: turnstile + n04505470: typewriter_keyboard + n04507155: umbrella + n04509417: unicycle + n04515003: upright + n04517823: vacuum + n04522168: vase + n04523525: vault + n04525038: velvet + n04525305: vending_machine + n04532106: vestment + n04532670: viaduct + n04536866: violin + n04540053: volleyball + n04542943: waffle_iron + n04548280: wall_clock + n04548362: wallet + n04550184: wardrobe + n04552348: warplane + n04553703: washbasin + n04554684: washer + n04557648: water_bottle + n04560804: water_jug + n04562935: water_tower + n04579145: whiskey_jug + n04579432: whistle + n04584207: wig + n04589890: window_screen + n04590129: window_shade + n04591157: Windsor_tie + n04591713: wine_bottle + n04592741: wing + n04596742: wok + n04597913: wooden_spoon + n04599235: wool + n04604644: worm_fence + n04606251: wreck + n04612504: yawl + n04613696: yurt + n06359193: web_site + n06596364: comic_book + n06785654: crossword_puzzle + n06794110: street_sign + n06874185: traffic_light + n07248320: book_jacket + n07565083: menu + n07579787: plate + n07583066: guacamole + n07584110: consomme + n07590611: hot_pot + n07613480: trifle + n07614500: ice_cream + n07615774: ice_lolly + n07684084: French_loaf + n07693725: bagel + n07695742: pretzel + n07697313: cheeseburger + n07697537: hotdog + n07711569: mashed_potato + n07714571: head_cabbage + n07714990: broccoli + n07715103: cauliflower + n07716358: zucchini + n07716906: spaghetti_squash + n07717410: acorn_squash + n07717556: butternut_squash + n07718472: cucumber + n07718747: artichoke + n07720875: bell_pepper + n07730033: cardoon + n07734744: mushroom + n07742313: Granny_Smith + n07745940: strawberry + n07747607: orange + n07749582: lemon + n07753113: fig + n07753275: pineapple + n07753592: banana + n07754684: jackfruit + n07760859: custard_apple + n07768694: pomegranate + n07802026: hay + n07831146: carbonara + n07836838: chocolate_sauce + n07860988: dough + n07871810: meat_loaf + n07873807: pizza + n07875152: potpie + n07880968: burrito + n07892512: red_wine + n07920052: espresso + n07930864: cup + n07932039: eggnog + n09193705: alp + n09229709: bubble + n09246464: cliff + n09256479: coral_reef + n09288635: geyser + n09332890: lakeside + n09399592: promontory + n09421951: sandbar + n09428293: seashore + n09468604: valley + n09472597: volcano + n09835506: ballplayer + n10148035: groom + n10565667: scuba_diver + n11879895: rapeseed + n11939491: daisy + n12057211: yellow_lady's_slipper + n12144580: corn + n12267677: acorn + n12620546: hip + n12768682: buckeye + n12985857: coral_fungus + n12998815: agaric + n13037406: gyromitra + n13040303: stinkhorn + n13044778: earthstar + n13052670: hen-of-the-woods + n13054560: bolete + n13133613: ear + n15075141: toilet_tissue + + +# Download script/URL (optional) +download: yolo/data/scripts/get_imagenet.sh diff --git a/ultralytics/datasets/Objects365.yaml b/ultralytics/datasets/Objects365.yaml new file mode 100644 index 0000000..8065432 --- /dev/null +++ b/ultralytics/datasets/Objects365.yaml @@ -0,0 +1,443 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Objects365 dataset https://www.objects365.org/ by Megvii +# Example usage: yolo train data=Objects365.yaml +# parent +# ├── ultralytics +# └── datasets +# └── Objects365 ← downloads here (712 GB = 367G data + 345G zips) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/Objects365 # dataset root dir +train: images/train # train images (relative to 'path') 1742289 images +val: images/val # val images (relative to 'path') 80000 images +test: # test images (optional) + +# Classes +names: + 0: Person + 1: Sneakers + 2: Chair + 3: Other Shoes + 4: Hat + 5: Car + 6: Lamp + 7: Glasses + 8: Bottle + 9: Desk + 10: Cup + 11: Street Lights + 12: Cabinet/shelf + 13: Handbag/Satchel + 14: Bracelet + 15: Plate + 16: Picture/Frame + 17: Helmet + 18: Book + 19: Gloves + 20: Storage box + 21: Boat + 22: Leather Shoes + 23: Flower + 24: Bench + 25: Potted Plant + 26: Bowl/Basin + 27: Flag + 28: Pillow + 29: Boots + 30: Vase + 31: Microphone + 32: Necklace + 33: Ring + 34: SUV + 35: Wine Glass + 36: Belt + 37: Monitor/TV + 38: Backpack + 39: Umbrella + 40: Traffic Light + 41: Speaker + 42: Watch + 43: Tie + 44: Trash bin Can + 45: Slippers + 46: Bicycle + 47: Stool + 48: Barrel/bucket + 49: Van + 50: Couch + 51: Sandals + 52: Basket + 53: Drum + 54: Pen/Pencil + 55: Bus + 56: Wild Bird + 57: High Heels + 58: Motorcycle + 59: Guitar + 60: Carpet + 61: Cell Phone + 62: Bread + 63: Camera + 64: Canned + 65: Truck + 66: Traffic cone + 67: Cymbal + 68: Lifesaver + 69: Towel + 70: Stuffed Toy + 71: Candle + 72: Sailboat + 73: Laptop + 74: Awning + 75: Bed + 76: Faucet + 77: Tent + 78: Horse + 79: Mirror + 80: Power outlet + 81: Sink + 82: Apple + 83: Air Conditioner + 84: Knife + 85: Hockey Stick + 86: Paddle + 87: Pickup Truck + 88: Fork + 89: Traffic Sign + 90: Balloon + 91: Tripod + 92: Dog + 93: Spoon + 94: Clock + 95: Pot + 96: Cow + 97: Cake + 98: Dinning Table + 99: Sheep + 100: Hanger + 101: Blackboard/Whiteboard + 102: Napkin + 103: Other Fish + 104: Orange/Tangerine + 105: Toiletry + 106: Keyboard + 107: Tomato + 108: Lantern + 109: Machinery Vehicle + 110: Fan + 111: Green Vegetables + 112: Banana + 113: Baseball Glove + 114: Airplane + 115: Mouse + 116: Train + 117: Pumpkin + 118: Soccer + 119: Skiboard + 120: Luggage + 121: Nightstand + 122: Tea pot + 123: Telephone + 124: Trolley + 125: Head Phone + 126: Sports Car + 127: Stop Sign + 128: Dessert + 129: Scooter + 130: Stroller + 131: Crane + 132: Remote + 133: Refrigerator + 134: Oven + 135: Lemon + 136: Duck + 137: Baseball Bat + 138: Surveillance Camera + 139: Cat + 140: Jug + 141: Broccoli + 142: Piano + 143: Pizza + 144: Elephant + 145: Skateboard + 146: Surfboard + 147: Gun + 148: Skating and Skiing shoes + 149: Gas stove + 150: Donut + 151: Bow Tie + 152: Carrot + 153: Toilet + 154: Kite + 155: Strawberry + 156: Other Balls + 157: Shovel + 158: Pepper + 159: Computer Box + 160: Toilet Paper + 161: Cleaning Products + 162: Chopsticks + 163: Microwave + 164: Pigeon + 165: Baseball + 166: Cutting/chopping Board + 167: Coffee Table + 168: Side Table + 169: Scissors + 170: Marker + 171: Pie + 172: Ladder + 173: Snowboard + 174: Cookies + 175: Radiator + 176: Fire Hydrant + 177: Basketball + 178: Zebra + 179: Grape + 180: Giraffe + 181: Potato + 182: Sausage + 183: Tricycle + 184: Violin + 185: Egg + 186: Fire Extinguisher + 187: Candy + 188: Fire Truck + 189: Billiards + 190: Converter + 191: Bathtub + 192: Wheelchair + 193: Golf Club + 194: Briefcase + 195: Cucumber + 196: Cigar/Cigarette + 197: Paint Brush + 198: Pear + 199: Heavy Truck + 200: Hamburger + 201: Extractor + 202: Extension Cord + 203: Tong + 204: Tennis Racket + 205: Folder + 206: American Football + 207: earphone + 208: Mask + 209: Kettle + 210: Tennis + 211: Ship + 212: Swing + 213: Coffee Machine + 214: Slide + 215: Carriage + 216: Onion + 217: Green beans + 218: Projector + 219: Frisbee + 220: Washing Machine/Drying Machine + 221: Chicken + 222: Printer + 223: Watermelon + 224: Saxophone + 225: Tissue + 226: Toothbrush + 227: Ice cream + 228: Hot-air balloon + 229: Cello + 230: French Fries + 231: Scale + 232: Trophy + 233: Cabbage + 234: Hot dog + 235: Blender + 236: Peach + 237: Rice + 238: Wallet/Purse + 239: Volleyball + 240: Deer + 241: Goose + 242: Tape + 243: Tablet + 244: Cosmetics + 245: Trumpet + 246: Pineapple + 247: Golf Ball + 248: Ambulance + 249: Parking meter + 250: Mango + 251: Key + 252: Hurdle + 253: Fishing Rod + 254: Medal + 255: Flute + 256: Brush + 257: Penguin + 258: Megaphone + 259: Corn + 260: Lettuce + 261: Garlic + 262: Swan + 263: Helicopter + 264: Green Onion + 265: Sandwich + 266: Nuts + 267: Speed Limit Sign + 268: Induction Cooker + 269: Broom + 270: Trombone + 271: Plum + 272: Rickshaw + 273: Goldfish + 274: Kiwi fruit + 275: Router/modem + 276: Poker Card + 277: Toaster + 278: Shrimp + 279: Sushi + 280: Cheese + 281: Notepaper + 282: Cherry + 283: Pliers + 284: CD + 285: Pasta + 286: Hammer + 287: Cue + 288: Avocado + 289: Hamimelon + 290: Flask + 291: Mushroom + 292: Screwdriver + 293: Soap + 294: Recorder + 295: Bear + 296: Eggplant + 297: Board Eraser + 298: Coconut + 299: Tape Measure/Ruler + 300: Pig + 301: Showerhead + 302: Globe + 303: Chips + 304: Steak + 305: Crosswalk Sign + 306: Stapler + 307: Camel + 308: Formula 1 + 309: Pomegranate + 310: Dishwasher + 311: Crab + 312: Hoverboard + 313: Meat ball + 314: Rice Cooker + 315: Tuba + 316: Calculator + 317: Papaya + 318: Antelope + 319: Parrot + 320: Seal + 321: Butterfly + 322: Dumbbell + 323: Donkey + 324: Lion + 325: Urinal + 326: Dolphin + 327: Electric Drill + 328: Hair Dryer + 329: Egg tart + 330: Jellyfish + 331: Treadmill + 332: Lighter + 333: Grapefruit + 334: Game board + 335: Mop + 336: Radish + 337: Baozi + 338: Target + 339: French + 340: Spring Rolls + 341: Monkey + 342: Rabbit + 343: Pencil Case + 344: Yak + 345: Red Cabbage + 346: Binoculars + 347: Asparagus + 348: Barbell + 349: Scallop + 350: Noddles + 351: Comb + 352: Dumpling + 353: Oyster + 354: Table Tennis paddle + 355: Cosmetics Brush/Eyeliner Pencil + 356: Chainsaw + 357: Eraser + 358: Lobster + 359: Durian + 360: Okra + 361: Lipstick + 362: Cosmetics Mirror + 363: Curling + 364: Table Tennis + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + from tqdm import tqdm + + from ultralytics.yolo.utils.checks import check_requirements + from ultralytics.yolo.utils.downloads import download + from ultralytics.yolo.utils.ops import xyxy2xywhn + + import numpy as np + from pathlib import Path + + check_requirements(('pycocotools>=2.0',)) + from pycocotools.coco import COCO + + # Make Directories + dir = Path(yaml['path']) # dataset root dir + for p in 'images', 'labels': + (dir / p).mkdir(parents=True, exist_ok=True) + for q in 'train', 'val': + (dir / p / q).mkdir(parents=True, exist_ok=True) + + # Train, Val Splits + for split, patches in [('train', 50 + 1), ('val', 43 + 1)]: + print(f"Processing {split} in {patches} patches ...") + images, labels = dir / 'images' / split, dir / 'labels' / split + + # Download + url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/" + if split == 'train': + download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir) # annotations json + download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, threads=8) + elif split == 'val': + download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir) # annotations json + download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, threads=8) + download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, threads=8) + + # Move + for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'): + f.rename(images / f.name) # move to /images/{split} + + # Labels + coco = COCO(dir / f'zhiyuan_objv2_{split}.json') + names = [x["name"] for x in coco.loadCats(coco.getCatIds())] + for cid, cat in enumerate(names): + catIds = coco.getCatIds(catNms=[cat]) + imgIds = coco.getImgIds(catIds=catIds) + for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'): + width, height = im["width"], im["height"] + path = Path(im["file_name"]) # image filename + try: + with open(labels / path.with_suffix('.txt').name, 'a') as file: + annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None) + for a in coco.loadAnns(annIds): + x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner) + xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4) + x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped + file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n") + except Exception as e: + print(e) diff --git a/ultralytics/datasets/SKU-110K.yaml b/ultralytics/datasets/SKU-110K.yaml new file mode 100644 index 0000000..ced2e08 --- /dev/null +++ b/ultralytics/datasets/SKU-110K.yaml @@ -0,0 +1,58 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail +# Example usage: yolo train data=SKU-110K.yaml +# parent +# ├── ultralytics +# └── datasets +# └── SKU-110K ← downloads here (13.6 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/SKU-110K # dataset root dir +train: train.txt # train images (relative to 'path') 8219 images +val: val.txt # val images (relative to 'path') 588 images +test: test.txt # test images (optional) 2936 images + +# Classes +names: + 0: object + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import shutil + from pathlib import Path + + import numpy as np + import pandas as pd + from tqdm import tqdm + + from ultralytics.yolo.utils.downloads import download + from ultralytics.yolo.utils.ops import xyxy2xywh + + # Download + dir = Path(yaml['path']) # dataset root dir + parent = Path(dir.parent) # download dir + urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz'] + download(urls, dir=parent) + + # Rename directories + if dir.exists(): + shutil.rmtree(dir) + (parent / 'SKU110K_fixed').rename(dir) # rename dir + (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir + + # Convert labels + names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names + for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv': + x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations + images, unique_images = x[:, 0], np.unique(x[:, 0]) + with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f: + f.writelines(f'./images/{s}\n' for s in unique_images) + for im in tqdm(unique_images, desc=f'Converting {dir / d}'): + cls = 0 # single-class dataset + with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f: + for r in x[images == im]: + w, h = r[6], r[7] # image width, height + xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance + f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label diff --git a/ultralytics/datasets/VOC.yaml b/ultralytics/datasets/VOC.yaml new file mode 100644 index 0000000..98b935d --- /dev/null +++ b/ultralytics/datasets/VOC.yaml @@ -0,0 +1,100 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford +# Example usage: yolo train data=VOC.yaml +# parent +# ├── ultralytics +# └── datasets +# └── VOC ← downloads here (2.8 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: /ssd2t/derron/datasets/VOC +train: # train images (relative to 'path') 16551 images + - images/train2012 + - images/train2007 + - images/val2012 + - images/val2007 +val: # val images (relative to 'path') 4952 images + - images/test2007 +test: # test images (optional) + - images/test2007 + +# Classes +names: + 0: aeroplane + 1: bicycle + 2: bird + 3: boat + 4: bottle + 5: bus + 6: car + 7: cat + 8: chair + 9: cow + 10: diningtable + 11: dog + 12: horse + 13: motorbike + 14: person + 15: pottedplant + 16: sheep + 17: sofa + 18: train + 19: tvmonitor + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import xml.etree.ElementTree as ET + + from tqdm import tqdm + from ultralytics.yolo.utils.downloads import download + from pathlib import Path + + def convert_label(path, lb_path, year, image_id): + def convert_box(size, box): + dw, dh = 1. / size[0], 1. / size[1] + x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2] + return x * dw, y * dh, w * dw, h * dh + + in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml') + out_file = open(lb_path, 'w') + tree = ET.parse(in_file) + root = tree.getroot() + size = root.find('size') + w = int(size.find('width').text) + h = int(size.find('height').text) + + names = list(yaml['names'].values()) # names list + for obj in root.iter('object'): + cls = obj.find('name').text + if cls in names and int(obj.find('difficult').text) != 1: + xmlbox = obj.find('bndbox') + bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) + cls_id = names.index(cls) # class id + out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') + + + # Download + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images + f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images + f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images + download(urls, dir=dir / 'images', curl=True, threads=3) + + # Convert + path = dir / 'images/VOCdevkit' + for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'): + imgs_path = dir / 'images' / f'{image_set}{year}' + lbs_path = dir / 'labels' / f'{image_set}{year}' + imgs_path.mkdir(exist_ok=True, parents=True) + lbs_path.mkdir(exist_ok=True, parents=True) + + with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f: + image_ids = f.read().strip().split() + for id in tqdm(image_ids, desc=f'{image_set}{year}'): + f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path + lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path + f.rename(imgs_path / f.name) # move image + convert_label(path, lb_path, year, id) # convert labels to YOLO format diff --git a/ultralytics/datasets/VisDrone.yaml b/ultralytics/datasets/VisDrone.yaml new file mode 100644 index 0000000..a37782f --- /dev/null +++ b/ultralytics/datasets/VisDrone.yaml @@ -0,0 +1,73 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University +# Example usage: yolo train data=VisDrone.yaml +# parent +# ├── ultralytics +# └── datasets +# └── VisDrone ← downloads here (2.3 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/VisDrone # dataset root dir +train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images +val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images +test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images + +# Classes +names: + 0: pedestrian + 1: people + 2: bicycle + 3: car + 4: van + 5: truck + 6: tricycle + 7: awning-tricycle + 8: bus + 9: motor + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import os + from pathlib import Path + + from ultralytics.yolo.utils.downloads import download + + def visdrone2yolo(dir): + from PIL import Image + from tqdm import tqdm + + def convert_box(size, box): + # Convert VisDrone box to YOLO xywh box + dw = 1. / size[0] + dh = 1. / size[1] + return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh + + (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory + pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}') + for f in pbar: + img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size + lines = [] + with open(f, 'r') as file: # read annotation.txt + for row in [x.split(',') for x in file.read().strip().splitlines()]: + if row[4] == '0': # VisDrone 'ignored regions' class 0 + continue + cls = int(row[5]) - 1 + box = convert_box(img_size, tuple(map(int, row[:4]))) + lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n") + with open(str(f).replace(f'{os.sep}annotations{os.sep}', f'{os.sep}labels{os.sep}'), 'w') as fl: + fl.writelines(lines) # write label.txt + + + # Download + dir = Path(yaml['path']) # dataset root dir + urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] + download(urls, dir=dir, curl=True, threads=4) + + # Convert + for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev': + visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels diff --git a/ultralytics/datasets/body-pose.yaml b/ultralytics/datasets/body-pose.yaml new file mode 100644 index 0000000..e418bb7 --- /dev/null +++ b/ultralytics/datasets/body-pose.yaml @@ -0,0 +1,12 @@ + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: /ssd2t/derron/datasets/body # dataset root dir +train: train # train images (relative to 'path') 1281167 images +val: test # val images (relative to 'path') 50000 images + +# Classes +names: + 0: front + 1: side + 2: back + 3: other diff --git a/ultralytics/datasets/celeba-human.yaml b/ultralytics/datasets/celeba-human.yaml new file mode 100644 index 0000000..a44f754 --- /dev/null +++ b/ultralytics/datasets/celeba-human.yaml @@ -0,0 +1,23 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# COCO8-pose dataset (first 8 images from COCO train2017) by Ultralytics +# Example usage: yolo train data=coco8-pose.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco8-pose ← downloads here (1 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: /ssd2t/derron/datasets/ # dataset root dir +train: CelebA-human/train # train images (relative to 'path') 4 images +val: CelebA-human/val # val images (relative to 'path') 4 images +test: # test images (optional) + +# Keypoints +kpt_shape: [5, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +flip_idx: [1, 0, 2, 4, 3] + +# Classes +names: + 0: person + diff --git a/ultralytics/datasets/coco-pose.yaml b/ultralytics/datasets/coco-pose.yaml new file mode 100644 index 0000000..f2e5f1f --- /dev/null +++ b/ultralytics/datasets/coco-pose.yaml @@ -0,0 +1,38 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# COCO 2017 dataset http://cocodataset.org by Microsoft +# Example usage: yolo train data=coco-pose.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco-pose ← downloads here (20.1 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco-pose # dataset root dir +train: train2017.txt # train images (relative to 'path') 118287 images +val: val2017.txt # val images (relative to 'path') 5000 images +test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 + +# Keypoints +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] + +# Classes +names: + 0: person + +# Download script/URL (optional) +download: | + from ultralytics.yolo.utils.downloads import download + from pathlib import Path + + # Download labels + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [url + 'coco2017labels-pose.zip'] # labels + #download(urls, dir=dir.parent) + # Download data + urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images + 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images + 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional) + #download(urls, dir=dir / 'images', threads=3) diff --git a/ultralytics/datasets/coco.yaml b/ultralytics/datasets/coco.yaml new file mode 100644 index 0000000..0e6edf7 --- /dev/null +++ b/ultralytics/datasets/coco.yaml @@ -0,0 +1,115 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# COCO 2017 dataset http://cocodataset.org by Microsoft +# Example usage: yolo train data=coco.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco ← downloads here (20.1 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco # dataset root dir +train: train2017.txt # train images (relative to 'path') 118287 images +val: val2017.txt # val images (relative to 'path') 5000 images +test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush + + +# Download script/URL (optional) +download: | + from ultralytics.yolo.utils.downloads import download + from pathlib import Path + + # Download labels + segments = True # segment or box labels + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels + download(urls, dir=dir.parent) + # Download data + urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images + 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images + 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional) + download(urls, dir=dir / 'images', threads=3) diff --git a/ultralytics/datasets/coco128-seg.yaml b/ultralytics/datasets/coco128-seg.yaml new file mode 100644 index 0000000..8c2e3da --- /dev/null +++ b/ultralytics/datasets/coco128-seg.yaml @@ -0,0 +1,101 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Example usage: yolo train data=coco128.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco128-seg ← downloads here (7 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128-seg # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush + + +# Download script/URL (optional) +download: https://ultralytics.com/assets/coco128-seg.zip diff --git a/ultralytics/datasets/coco128.yaml b/ultralytics/datasets/coco128.yaml new file mode 100644 index 0000000..9749ab6 --- /dev/null +++ b/ultralytics/datasets/coco128.yaml @@ -0,0 +1,101 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Example usage: yolo train data=coco128.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco128 ← downloads here (7 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128 # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush + + +# Download script/URL (optional) +download: https://ultralytics.com/assets/coco128.zip diff --git a/ultralytics/datasets/coco8-pose.yaml b/ultralytics/datasets/coco8-pose.yaml new file mode 100644 index 0000000..e6fab8b --- /dev/null +++ b/ultralytics/datasets/coco8-pose.yaml @@ -0,0 +1,25 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# COCO8-pose dataset (first 8 images from COCO train2017) by Ultralytics +# Example usage: yolo train data=coco8-pose.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco8-pose ← downloads here (1 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco8-pose # dataset root dir +train: images/train # train images (relative to 'path') 4 images +val: images/val # val images (relative to 'path') 4 images +test: # test images (optional) + +# Keypoints +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] + +# Classes +names: + 0: person + +# Download script/URL (optional) +download: https://ultralytics.com/assets/coco8-pose.zip diff --git a/ultralytics/datasets/coco8-seg.yaml b/ultralytics/datasets/coco8-seg.yaml new file mode 100644 index 0000000..e6faca1 --- /dev/null +++ b/ultralytics/datasets/coco8-seg.yaml @@ -0,0 +1,101 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# COCO8-seg dataset (first 8 images from COCO train2017) by Ultralytics +# Example usage: yolo train data=coco8-seg.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco8-seg ← downloads here (1 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco8-seg # dataset root dir +train: images/train # train images (relative to 'path') 4 images +val: images/val # val images (relative to 'path') 4 images +test: # test images (optional) + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush + + +# Download script/URL (optional) +download: https://ultralytics.com/assets/coco8-seg.zip diff --git a/ultralytics/datasets/coco8.yaml b/ultralytics/datasets/coco8.yaml new file mode 100644 index 0000000..eeb5d9d --- /dev/null +++ b/ultralytics/datasets/coco8.yaml @@ -0,0 +1,101 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# COCO8 dataset (first 8 images from COCO train2017) by Ultralytics +# Example usage: yolo train data=coco8.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco8 ← downloads here (1 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco8 # dataset root dir +train: images/train # train images (relative to 'path') 4 images +val: images/val # val images (relative to 'path') 4 images +test: # test images (optional) + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush + + +# Download script/URL (optional) +download: https://ultralytics.com/assets/coco8.zip diff --git a/ultralytics/datasets/fire.yaml b/ultralytics/datasets/fire.yaml new file mode 100644 index 0000000..6395ad4 --- /dev/null +++ b/ultralytics/datasets/fire.yaml @@ -0,0 +1,19 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# COCO8-pose dataset (first 8 images from COCO train2017) by Ultralytics +# Example usage: yolo train data=coco8-pose.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco8-pose ← downloads here (1 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: /ssd2t/derron/datasets/ # dataset root dir +train: fire/train # train images (relative to 'path') 4 images +val: fire/val # val images (relative to 'path') 4 images +test: # test images (optional) + +# Classes +names: + 0: fire + diff --git a/ultralytics/datasets/plate.yaml b/ultralytics/datasets/plate.yaml new file mode 100644 index 0000000..ef94135 --- /dev/null +++ b/ultralytics/datasets/plate.yaml @@ -0,0 +1,24 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# COCO8-pose dataset (first 8 images from COCO train2017) by Ultralytics +# Example usage: yolo train data=coco8-pose.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco8-pose ← downloads here (1 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: /ssd2t/derron/datasets/ # dataset root dir +train: widerface/mixed # train images (relative to 'path') 4 images +val: widerface/val # val images (relative to 'path') 4 images +test: # test images (optional) + +# Keypoints +kpt_shape: [5, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +flip_idx: [1, 0, 2, 4, 3] + +# Classes +names: + 0: single + 1: double + diff --git a/ultralytics/datasets/seaships.yaml b/ultralytics/datasets/seaships.yaml new file mode 100644 index 0000000..b945bcc --- /dev/null +++ b/ultralytics/datasets/seaships.yaml @@ -0,0 +1,21 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# COCO8-pose dataset (first 8 images from COCO train2017) by Ultralytics +# Example usage: yolo train data=coco8-pose.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco8-pose ← downloads here (1 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +train: /ssd2t/derron/datasets/Seaships/train # train images (relative to 'path') 4 images +val: /ssd2t/derron/datasets/Seaships/val # val images (relative to 'path') 4 images + +# Classes +names: + 0: ore carrier + 1: general cargo ship + 2: bulk cargo carrier + 3: container ship + 4: fishing boat + 5: passenger ship diff --git a/ultralytics/datasets/widerhuman.yaml b/ultralytics/datasets/widerhuman.yaml new file mode 100644 index 0000000..b40b31e --- /dev/null +++ b/ultralytics/datasets/widerhuman.yaml @@ -0,0 +1,24 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# COCO8-pose dataset (first 8 images from COCO train2017) by Ultralytics +# Example usage: yolo train data=coco8-pose.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco8-pose ← downloads here (1 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: /ssd2t/derron/datasets/ # dataset root dir +train: widerhuman/halpe # train images (relative to 'path') 4 images +train: widerhuman/train # train images (relative to 'path') 4 images +val: widerhuman/val # val images (relative to 'path') 4 images +test: # test images (optional) + +# Keypoints +kpt_shape: [5, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +flip_idx: [1, 0, 2, 4, 3] + +# Classes +names: + 0: person + diff --git a/ultralytics/datasets/xView.yaml b/ultralytics/datasets/xView.yaml new file mode 100644 index 0000000..6049f6f --- /dev/null +++ b/ultralytics/datasets/xView.yaml @@ -0,0 +1,153 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA) +# -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- +# Example usage: yolo train data=xView.yaml +# parent +# ├── ultralytics +# └── datasets +# └── xView ← downloads here (20.7 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/xView # dataset root dir +train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images +val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images + +# Classes +names: + 0: Fixed-wing Aircraft + 1: Small Aircraft + 2: Cargo Plane + 3: Helicopter + 4: Passenger Vehicle + 5: Small Car + 6: Bus + 7: Pickup Truck + 8: Utility Truck + 9: Truck + 10: Cargo Truck + 11: Truck w/Box + 12: Truck Tractor + 13: Trailer + 14: Truck w/Flatbed + 15: Truck w/Liquid + 16: Crane Truck + 17: Railway Vehicle + 18: Passenger Car + 19: Cargo Car + 20: Flat Car + 21: Tank car + 22: Locomotive + 23: Maritime Vessel + 24: Motorboat + 25: Sailboat + 26: Tugboat + 27: Barge + 28: Fishing Vessel + 29: Ferry + 30: Yacht + 31: Container Ship + 32: Oil Tanker + 33: Engineering Vehicle + 34: Tower crane + 35: Container Crane + 36: Reach Stacker + 37: Straddle Carrier + 38: Mobile Crane + 39: Dump Truck + 40: Haul Truck + 41: Scraper/Tractor + 42: Front loader/Bulldozer + 43: Excavator + 44: Cement Mixer + 45: Ground Grader + 46: Hut/Tent + 47: Shed + 48: Building + 49: Aircraft Hangar + 50: Damaged Building + 51: Facility + 52: Construction Site + 53: Vehicle Lot + 54: Helipad + 55: Storage Tank + 56: Shipping container lot + 57: Shipping Container + 58: Pylon + 59: Tower + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import json + import os + from pathlib import Path + + import numpy as np + from PIL import Image + from tqdm import tqdm + + from ultralytics.yolo.data.dataloaders.v5loader import autosplit + from ultralytics.yolo.utils.ops import xyxy2xywhn + + + def convert_labels(fname=Path('xView/xView_train.geojson')): + # Convert xView geoJSON labels to YOLO format + path = fname.parent + with open(fname) as f: + print(f'Loading {fname}...') + data = json.load(f) + + # Make dirs + labels = Path(path / 'labels' / 'train') + os.system(f'rm -rf {labels}') + labels.mkdir(parents=True, exist_ok=True) + + # xView classes 11-94 to 0-59 + xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11, + 12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1, + 29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46, + 47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59] + + shapes = {} + for feature in tqdm(data['features'], desc=f'Converting {fname}'): + p = feature['properties'] + if p['bounds_imcoords']: + id = p['image_id'] + file = path / 'train_images' / id + if file.exists(): # 1395.tif missing + try: + box = np.array([int(num) for num in p['bounds_imcoords'].split(",")]) + assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}' + cls = p['type_id'] + cls = xview_class2index[int(cls)] # xView class to 0-60 + assert 59 >= cls >= 0, f'incorrect class index {cls}' + + # Write YOLO label + if id not in shapes: + shapes[id] = Image.open(file).size + box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True) + with open((labels / id).with_suffix('.txt'), 'a') as f: + f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt + except Exception as e: + print(f'WARNING: skipping one label for {file}: {e}') + + + # Download manually from https://challenge.xviewdataset.org + dir = Path(yaml['path']) # dataset root dir + # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels + # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images + # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels) + # download(urls, dir=dir) + + # Convert labels + convert_labels(dir / 'xView_train.geojson') + + # Move images + images = Path(dir / 'images') + images.mkdir(parents=True, exist_ok=True) + Path(dir / 'train_images').rename(dir / 'images' / 'train') + Path(dir / 'val_images').rename(dir / 'images' / 'val') + + # Split + autosplit(dir / 'images' / 'train') diff --git a/ultralytics/datasets/yolov8-plate.yaml b/ultralytics/datasets/yolov8-plate.yaml new file mode 100644 index 0000000..24fcf92 --- /dev/null +++ b/ultralytics/datasets/yolov8-plate.yaml @@ -0,0 +1,24 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# COCO8-pose dataset (first 8 images from COCO train2017) by Ultralytics +# Example usage: yolo train data=coco8-pose.yaml +# parent +# ├── ultralytics +# └── datasets +# └── coco8-pose ← downloads here (1 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +# path: /ssd2t/derron/datasets/ # dataset root dir +train: /mnt/Gu/trainData/Detect/plate_detect/train_detect/open_datasets +val: /mnt/Gu/trainData/Detect/plate_detect/val_detect/gangao +test: # test images (optional) + +# Keypoints +kpt_shape: [4, 2] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +flip_idx: [1, 0, 3, 2] + +# Classes +names: + 0: single + 1: double + diff --git a/ultralytics/hub/__init__.py b/ultralytics/hub/__init__.py new file mode 100644 index 0000000..6059083 --- /dev/null +++ b/ultralytics/hub/__init__.py @@ -0,0 +1,117 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import requests + +from ultralytics.hub.auth import Auth +from ultralytics.hub.utils import PREFIX +from ultralytics.yolo.data.utils import HUBDatasetStats +from ultralytics.yolo.utils import LOGGER, SETTINGS, USER_CONFIG_DIR, yaml_save + + +def login(api_key=''): + """ + Log in to the Ultralytics HUB API using the provided API key. + + Args: + api_key (str, optional): May be an API key or a combination API key and model ID, i.e. key_id + + Example: + from ultralytics import hub + hub.login('API_KEY') + """ + Auth(api_key, verbose=True) + + +def logout(): + """ + Log out of Ultralytics HUB by removing the API key from the settings file. To log in again, use 'yolo hub login'. + + Example: + from ultralytics import hub + hub.logout() + """ + SETTINGS['api_key'] = '' + yaml_save(USER_CONFIG_DIR / 'settings.yaml', SETTINGS) + LOGGER.info(f"{PREFIX}logged out ✅. To log in again, use 'yolo hub login'.") + + +def start(key=''): + """ + Start training models with Ultralytics HUB (DEPRECATED). + + Args: + key (str, optional): A string containing either the API key and model ID combination (apikey_modelid), + or the full model URL (https://hub.ultralytics.com/models/apikey_modelid). + """ + api_key, model_id = key.split('_') + LOGGER.warning(f""" +WARNING ⚠️ ultralytics.start() is deprecated after 8.0.60. Updated usage to train Ultralytics HUB models is: + +from ultralytics import YOLO, hub + +hub.login('{api_key}') +model = YOLO('https://hub.ultralytics.com/models/{model_id}') +model.train()""") + + +def reset_model(model_id=''): + """Reset a trained model to an untrained state.""" + r = requests.post('https://api.ultralytics.com/model-reset', json={'apiKey': Auth().api_key, 'modelId': model_id}) + if r.status_code == 200: + LOGGER.info(f'{PREFIX}Model reset successfully') + return + LOGGER.warning(f'{PREFIX}Model reset failure {r.status_code} {r.reason}') + + +def export_fmts_hub(): + """Returns a list of HUB-supported export formats.""" + from ultralytics.yolo.engine.exporter import export_formats + return list(export_formats()['Argument'][1:]) + ['ultralytics_tflite', 'ultralytics_coreml'] + + +def export_model(model_id='', format='torchscript'): + """Export a model to all formats.""" + assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}" + r = requests.post(f'https://api.ultralytics.com/v1/models/{model_id}/export', + json={'format': format}, + headers={'x-api-key': Auth().api_key}) + assert r.status_code == 200, f'{PREFIX}{format} export failure {r.status_code} {r.reason}' + LOGGER.info(f'{PREFIX}{format} export started ✅') + + +def get_export(model_id='', format='torchscript'): + """Get an exported model dictionary with download URL.""" + assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}" + r = requests.post('https://api.ultralytics.com/get-export', + json={ + 'apiKey': Auth().api_key, + 'modelId': model_id, + 'format': format}) + assert r.status_code == 200, f'{PREFIX}{format} get_export failure {r.status_code} {r.reason}' + return r.json() + + +def check_dataset(path='', task='detect'): + """ + Function for error-checking HUB dataset Zip file before upload. It checks a dataset for errors before it is + uploaded to the HUB. Usage examples are given below. + + Args: + path (str, optional): Path to data.zip (with data.yaml inside data.zip). Defaults to ''. + task (str, optional): Dataset task. Options are 'detect', 'segment', 'pose', 'classify'. Defaults to 'detect'. + + Example: + ```python + from ultralytics.hub import check_dataset + + check_dataset('path/to/coco8.zip', task='detect') # detect dataset + check_dataset('path/to/coco8-seg.zip', task='segment') # segment dataset + check_dataset('path/to/coco8-pose.zip', task='pose') # pose dataset + ``` + """ + HUBDatasetStats(path=path, task=task).get_json() + LOGGER.info('Checks completed correctly ✅. Upload this dataset to https://hub.ultralytics.com/datasets/.') + + +if __name__ == '__main__': + start() diff --git a/ultralytics/hub/auth.py b/ultralytics/hub/auth.py new file mode 100644 index 0000000..960b3dc --- /dev/null +++ b/ultralytics/hub/auth.py @@ -0,0 +1,139 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import requests + +from ultralytics.hub.utils import HUB_API_ROOT, PREFIX, request_with_credentials +from ultralytics.yolo.utils import LOGGER, SETTINGS, emojis, is_colab, set_settings + +API_KEY_URL = 'https://hub.ultralytics.com/settings?tab=api+keys' + + +class Auth: + id_token = api_key = model_key = False + + def __init__(self, api_key='', verbose=False): + """ + Initialize the Auth class with an optional API key. + + Args: + api_key (str, optional): May be an API key or a combination API key and model ID, i.e. key_id + """ + # Split the input API key in case it contains a combined key_model and keep only the API key part + api_key = api_key.split('_')[0] + + # Set API key attribute as value passed or SETTINGS API key if none passed + self.api_key = api_key or SETTINGS.get('api_key', '') + + # If an API key is provided + if self.api_key: + # If the provided API key matches the API key in the SETTINGS + if self.api_key == SETTINGS.get('api_key'): + # Log that the user is already logged in + if verbose: + LOGGER.info(f'{PREFIX}Authenticated ✅') + return + else: + # Attempt to authenticate with the provided API key + success = self.authenticate() + # If the API key is not provided and the environment is a Google Colab notebook + elif is_colab(): + # Attempt to authenticate using browser cookies + success = self.auth_with_cookies() + else: + # Request an API key + success = self.request_api_key() + + # Update SETTINGS with the new API key after successful authentication + if success: + set_settings({'api_key': self.api_key}) + # Log that the new login was successful + if verbose: + LOGGER.info(f'{PREFIX}New authentication successful ✅') + elif verbose: + LOGGER.info(f'{PREFIX}Retrieve API key from {API_KEY_URL}') + + def request_api_key(self, max_attempts=3): + """ + Prompt the user to input their API key. Returns the model ID. + """ + import getpass + for attempts in range(max_attempts): + LOGGER.info(f'{PREFIX}Login. Attempt {attempts + 1} of {max_attempts}') + input_key = getpass.getpass(f'Enter API key from {API_KEY_URL} ') + self.api_key = input_key.split('_')[0] # remove model id if present + if self.authenticate(): + return True + raise ConnectionError(emojis(f'{PREFIX}Failed to authenticate ❌')) + + def authenticate(self) -> bool: + """ + Attempt to authenticate with the server using either id_token or API key. + + Returns: + bool: True if authentication is successful, False otherwise. + """ + try: + header = self.get_auth_header() + if header: + r = requests.post(f'{HUB_API_ROOT}/v1/auth', headers=header) + if not r.json().get('success', False): + raise ConnectionError('Unable to authenticate.') + return True + raise ConnectionError('User has not authenticated locally.') + except ConnectionError: + self.id_token = self.api_key = False # reset invalid + LOGGER.warning(f'{PREFIX}Invalid API key ⚠️') + return False + + def auth_with_cookies(self) -> bool: + """ + Attempt to fetch authentication via cookies and set id_token. + User must be logged in to HUB and running in a supported browser. + + Returns: + bool: True if authentication is successful, False otherwise. + """ + if not is_colab(): + return False # Currently only works with Colab + try: + authn = request_with_credentials(f'{HUB_API_ROOT}/v1/auth/auto') + if authn.get('success', False): + self.id_token = authn.get('data', {}).get('idToken', None) + self.authenticate() + return True + raise ConnectionError('Unable to fetch browser authentication details.') + except ConnectionError: + self.id_token = False # reset invalid + return False + + def get_auth_header(self): + """ + Get the authentication header for making API requests. + + Returns: + (dict): The authentication header if id_token or API key is set, None otherwise. + """ + if self.id_token: + return {'authorization': f'Bearer {self.id_token}'} + elif self.api_key: + return {'x-api-key': self.api_key} + else: + return None + + def get_state(self) -> bool: + """ + Get the authentication state. + + Returns: + bool: True if either id_token or API key is set, False otherwise. + """ + return self.id_token or self.api_key + + def set_api_key(self, key: str): + """ + Set the API key for authentication. + + Args: + key (str): The API key string. + """ + self.api_key = key diff --git a/ultralytics/hub/session.py b/ultralytics/hub/session.py new file mode 100644 index 0000000..1a96d20 --- /dev/null +++ b/ultralytics/hub/session.py @@ -0,0 +1,189 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +import signal +import sys +from pathlib import Path +from time import sleep + +import requests + +from ultralytics.hub.utils import HUB_API_ROOT, PREFIX, smart_request +from ultralytics.yolo.utils import LOGGER, __version__, checks, emojis, is_colab, threaded +from ultralytics.yolo.utils.errors import HUBModelError + +AGENT_NAME = f'python-{__version__}-colab' if is_colab() else f'python-{__version__}-local' + + +class HUBTrainingSession: + """ + HUB training session for Ultralytics HUB YOLO models. Handles model initialization, heartbeats, and checkpointing. + + Args: + url (str): Model identifier used to initialize the HUB training session. + + Attributes: + agent_id (str): Identifier for the instance communicating with the server. + model_id (str): Identifier for the YOLOv5 model being trained. + model_url (str): URL for the model in Ultralytics HUB. + api_url (str): API URL for the model in Ultralytics HUB. + auth_header (dict): Authentication header for the Ultralytics HUB API requests. + rate_limits (dict): Rate limits for different API calls (in seconds). + timers (dict): Timers for rate limiting. + metrics_queue (dict): Queue for the model's metrics. + model (dict): Model data fetched from Ultralytics HUB. + alive (bool): Indicates if the heartbeat loop is active. + """ + + def __init__(self, url): + """ + Initialize the HUBTrainingSession with the provided model identifier. + + Args: + url (str): Model identifier used to initialize the HUB training session. + It can be a URL string or a model key with specific format. + + Raises: + ValueError: If the provided model identifier is invalid. + ConnectionError: If connecting with global API key is not supported. + """ + + from ultralytics.hub.auth import Auth + + # Parse input + if url.startswith('https://hub.ultralytics.com/models/'): + url = url.split('https://hub.ultralytics.com/models/')[-1] + if [len(x) for x in url.split('_')] == [42, 20]: + key, model_id = url.split('_') + elif len(url) == 20: + key, model_id = '', url + else: + raise HUBModelError(f"model='{url}' not found. Check format is correct, i.e. " + f"model='https://hub.ultralytics.com/models/MODEL_ID' and try again.") + + # Authorize + auth = Auth(key) + self.agent_id = None # identifies which instance is communicating with server + self.model_id = model_id + self.model_url = f'https://hub.ultralytics.com/models/{model_id}' + self.api_url = f'{HUB_API_ROOT}/v1/models/{model_id}' + self.auth_header = auth.get_auth_header() + self.rate_limits = {'metrics': 3.0, 'ckpt': 900.0, 'heartbeat': 300.0} # rate limits (seconds) + self.timers = {} # rate limit timers (seconds) + self.metrics_queue = {} # metrics queue + self.model = self._get_model() + self.alive = True + self._start_heartbeat() # start heartbeats + self._register_signal_handlers() + LOGGER.info(f'{PREFIX}View model at {self.model_url} 🚀') + + def _register_signal_handlers(self): + """Register signal handlers for SIGTERM and SIGINT signals to gracefully handle termination.""" + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + def _handle_signal(self, signum, frame): + """ + Handle kill signals and prevent heartbeats from being sent on Colab after termination. + This method does not use frame, it is included as it is passed by signal. + """ + if self.alive is True: + LOGGER.info(f'{PREFIX}Kill signal received! ❌') + self._stop_heartbeat() + sys.exit(signum) + + def _stop_heartbeat(self): + """Terminate the heartbeat loop.""" + self.alive = False + + def upload_metrics(self): + """Upload model metrics to Ultralytics HUB.""" + payload = {'metrics': self.metrics_queue.copy(), 'type': 'metrics'} + smart_request('post', self.api_url, json=payload, headers=self.auth_header, code=2) + + def _get_model(self): + """Fetch and return model data from Ultralytics HUB.""" + api_url = f'{HUB_API_ROOT}/v1/models/{self.model_id}' + + try: + response = smart_request('get', api_url, headers=self.auth_header, thread=False, code=0) + data = response.json().get('data', None) + + if data.get('status', None) == 'trained': + raise ValueError(emojis(f'Model is already trained and uploaded to {self.model_url} 🚀')) + + if not data.get('data', None): + raise ValueError('Dataset may still be processing. Please wait a minute and try again.') # RF fix + self.model_id = data['id'] + + if data['status'] == 'new': # new model to start training + self.train_args = { + # TODO: deprecate 'batch_size' key for 'batch' in 3Q23 + 'batch': data['batch' if ('batch' in data) else 'batch_size'], + 'epochs': data['epochs'], + 'imgsz': data['imgsz'], + 'patience': data['patience'], + 'device': data['device'], + 'cache': data['cache'], + 'data': data['data']} + self.model_file = data.get('cfg') or data.get('weights') # cfg for pretrained=False + self.model_file = checks.check_yolov5u_filename(self.model_file, verbose=False) # YOLOv5->YOLOv5u + elif data['status'] == 'training': # existing model to resume training + self.train_args = {'data': data['data'], 'resume': True} + self.model_file = data['resume'] + + return data + except requests.exceptions.ConnectionError as e: + raise ConnectionRefusedError('ERROR: The HUB server is not online. Please try again later.') from e + except Exception: + raise + + def upload_model(self, epoch, weights, is_best=False, map=0.0, final=False): + """ + Upload a model checkpoint to Ultralytics HUB. + + Args: + epoch (int): The current training epoch. + weights (str): Path to the model weights file. + is_best (bool): Indicates if the current model is the best one so far. + map (float): Mean average precision of the model. + final (bool): Indicates if the model is the final model after training. + """ + if Path(weights).is_file(): + with open(weights, 'rb') as f: + file = f.read() + else: + LOGGER.warning(f'{PREFIX}WARNING ⚠️ Model upload issue. Missing model {weights}.') + file = None + url = f'{self.api_url}/upload' + # url = 'http://httpbin.org/post' # for debug + data = {'epoch': epoch} + if final: + data.update({'type': 'final', 'map': map}) + smart_request('post', + url, + data=data, + files={'best.pt': file}, + headers=self.auth_header, + retry=10, + timeout=3600, + thread=False, + progress=True, + code=4) + else: + data.update({'type': 'epoch', 'isBest': bool(is_best)}) + smart_request('post', url, data=data, files={'last.pt': file}, headers=self.auth_header, code=3) + + @threaded + def _start_heartbeat(self): + """Begin a threaded heartbeat loop to report the agent's status to Ultralytics HUB.""" + while self.alive: + r = smart_request('post', + f'{HUB_API_ROOT}/v1/agent/heartbeat/models/{self.model_id}', + json={ + 'agent': AGENT_NAME, + 'agentId': self.agent_id}, + headers=self.auth_header, + retry=0, + code=5, + thread=False) # already in a thread + self.agent_id = r.json().get('data', {}).get('agentId', None) + sleep(self.rate_limits['heartbeat']) diff --git a/ultralytics/hub/utils.py b/ultralytics/hub/utils.py new file mode 100644 index 0000000..ecd64a9 --- /dev/null +++ b/ultralytics/hub/utils.py @@ -0,0 +1,220 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import os +import platform +import random +import sys +import threading +import time +from pathlib import Path + +import requests +from tqdm import tqdm + +from ultralytics.yolo.utils import (ENVIRONMENT, LOGGER, ONLINE, RANK, SETTINGS, TESTS_RUNNING, TQDM_BAR_FORMAT, + TryExcept, __version__, colorstr, get_git_origin_url, is_colab, is_git_dir, + is_pip_package) + +PREFIX = colorstr('Ultralytics HUB: ') +HELP_MSG = 'If this issue persists please visit https://github.com/ultralytics/hub/issues for assistance.' +HUB_API_ROOT = os.environ.get('ULTRALYTICS_HUB_API', 'https://api.ultralytics.com') + + +def request_with_credentials(url: str) -> any: + """ + Make an AJAX request with cookies attached in a Google Colab environment. + + Args: + url (str): The URL to make the request to. + + Returns: + (any): The response data from the AJAX request. + + Raises: + OSError: If the function is not run in a Google Colab environment. + """ + if not is_colab(): + raise OSError('request_with_credentials() must run in a Colab environment') + from google.colab import output # noqa + from IPython import display # noqa + display.display( + display.Javascript(""" + window._hub_tmp = new Promise((resolve, reject) => { + const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000) + fetch("%s", { + method: 'POST', + credentials: 'include' + }) + .then((response) => resolve(response.json())) + .then((json) => { + clearTimeout(timeout); + }).catch((err) => { + clearTimeout(timeout); + reject(err); + }); + }); + """ % url)) + return output.eval_js('_hub_tmp') + + +def requests_with_progress(method, url, **kwargs): + """ + Make an HTTP request using the specified method and URL, with an optional progress bar. + + Args: + method (str): The HTTP method to use (e.g. 'GET', 'POST'). + url (str): The URL to send the request to. + **kwargs (dict): Additional keyword arguments to pass to the underlying `requests.request` function. + + Returns: + (requests.Response): The response object from the HTTP request. + + Note: + If 'progress' is set to True, the progress bar will display the download progress + for responses with a known content length. + """ + progress = kwargs.pop('progress', False) + if not progress: + return requests.request(method, url, **kwargs) + response = requests.request(method, url, stream=True, **kwargs) + total = int(response.headers.get('content-length', 0)) # total size + try: + pbar = tqdm(total=total, unit='B', unit_scale=True, unit_divisor=1024, bar_format=TQDM_BAR_FORMAT) + for data in response.iter_content(chunk_size=1024): + pbar.update(len(data)) + pbar.close() + except requests.exceptions.ChunkedEncodingError: # avoid 'Connection broken: IncompleteRead' warnings + response.close() + return response + + +def smart_request(method, url, retry=3, timeout=30, thread=True, code=-1, verbose=True, progress=False, **kwargs): + """ + Makes an HTTP request using the 'requests' library, with exponential backoff retries up to a specified timeout. + + Args: + method (str): The HTTP method to use for the request. Choices are 'post' and 'get'. + url (str): The URL to make the request to. + retry (int, optional): Number of retries to attempt before giving up. Default is 3. + timeout (int, optional): Timeout in seconds after which the function will give up retrying. Default is 30. + thread (bool, optional): Whether to execute the request in a separate daemon thread. Default is True. + code (int, optional): An identifier for the request, used for logging purposes. Default is -1. + verbose (bool, optional): A flag to determine whether to print out to console or not. Default is True. + progress (bool, optional): Whether to show a progress bar during the request. Default is False. + **kwargs (dict): Keyword arguments to be passed to the requests function specified in method. + + Returns: + (requests.Response): The HTTP response object. If the request is executed in a separate thread, returns None. + """ + retry_codes = (408, 500) # retry only these codes + + @TryExcept(verbose=verbose) + def func(func_method, func_url, **func_kwargs): + """Make HTTP requests with retries and timeouts, with optional progress tracking.""" + r = None # response + t0 = time.time() # initial time for timer + for i in range(retry + 1): + if (time.time() - t0) > timeout: + break + r = requests_with_progress(func_method, func_url, **func_kwargs) # i.e. get(url, data, json, files) + if r.status_code < 300: # return codes in the 2xx range are generally considered "good" or "successful" + break + try: + m = r.json().get('message', 'No JSON message.') + except AttributeError: + m = 'Unable to read JSON.' + if i == 0: + if r.status_code in retry_codes: + m += f' Retrying {retry}x for {timeout}s.' if retry else '' + elif r.status_code == 429: # rate limit + h = r.headers # response headers + m = f"Rate limit reached ({h['X-RateLimit-Remaining']}/{h['X-RateLimit-Limit']}). " \ + f"Please retry after {h['Retry-After']}s." + if verbose: + LOGGER.warning(f'{PREFIX}{m} {HELP_MSG} ({r.status_code} #{code})') + if r.status_code not in retry_codes: + return r + time.sleep(2 ** i) # exponential standoff + return r + + args = method, url + kwargs['progress'] = progress + if thread: + threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True).start() + else: + return func(*args, **kwargs) + + +class Events: + """ + A class for collecting anonymous event analytics. Event analytics are enabled when sync=True in settings and + disabled when sync=False. Run 'yolo settings' to see and update settings YAML file. + + Attributes: + url (str): The URL to send anonymous events. + rate_limit (float): The rate limit in seconds for sending events. + metadata (dict): A dictionary containing metadata about the environment. + enabled (bool): A flag to enable or disable Events based on certain conditions. + """ + + url = 'https://www.google-analytics.com/mp/collect?measurement_id=G-X8NCJYTQXM&api_secret=QLQrATrNSwGRFRLE-cbHJw' + + def __init__(self): + """ + Initializes the Events object with default values for events, rate_limit, and metadata. + """ + self.events = [] # events list + self.rate_limit = 60.0 # rate limit (seconds) + self.t = 0.0 # rate limit timer (seconds) + self.metadata = { + 'cli': Path(sys.argv[0]).name == 'yolo', + 'install': 'git' if is_git_dir() else 'pip' if is_pip_package() else 'other', + 'python': '.'.join(platform.python_version_tuple()[:2]), # i.e. 3.10 + 'version': __version__, + 'env': ENVIRONMENT, + 'session_id': round(random.random() * 1E15), + 'engagement_time_msec': 1000} + self.enabled = \ + SETTINGS['sync'] and \ + RANK in (-1, 0) and \ + not TESTS_RUNNING and \ + ONLINE and \ + (is_pip_package() or get_git_origin_url() == 'https://github.com/ultralytics/ultralytics.git') + + def __call__(self, cfg): + """ + Attempts to add a new event to the events list and send events if the rate limit is reached. + + Args: + cfg (IterableSimpleNamespace): The configuration object containing mode and task information. + """ + if not self.enabled: + # Events disabled, do nothing + return + + # Attempt to add to events + if len(self.events) < 25: # Events list limited to 25 events (drop any events past this) + params = {**self.metadata, **{'task': cfg.task}} + if cfg.mode == 'export': + params['format'] = cfg.format + self.events.append({'name': cfg.mode, 'params': params}) + + # Check rate limit + t = time.time() + if (t - self.t) < self.rate_limit: + # Time is under rate limiter, wait to send + return + + # Time is over rate limiter, send now + data = {'client_id': SETTINGS['uuid'], 'events': self.events} # SHA-256 anonymized UUID hash and events list + + # POST equivalent to requests.post(self.url, json=data) + smart_request('post', self.url, json=data, retry=0, verbose=False) + + # Reset events and rate limit timer + self.events = [] + self.t = t + + +# Run below code on hub/utils init ------------------------------------------------------------------------------------- +events = Events() diff --git a/ultralytics/models/README.md b/ultralytics/models/README.md new file mode 100644 index 0000000..a0edc4b --- /dev/null +++ b/ultralytics/models/README.md @@ -0,0 +1,45 @@ +## Models + +Welcome to the Ultralytics Models directory! Here you will find a wide variety of pre-configured model configuration +files (`*.yaml`s) that can be used to create custom YOLO models. The models in this directory have been expertly crafted +and fine-tuned by the Ultralytics team to provide the best performance for a wide range of object detection and image +segmentation tasks. + +These model configurations cover a wide range of scenarios, from simple object detection to more complex tasks like +instance segmentation and object tracking. They are also designed to run efficiently on a variety of hardware platforms, +from CPUs to GPUs. Whether you are a seasoned machine learning practitioner or just getting started with YOLO, this +directory provides a great starting point for your custom model development needs. + +To get started, simply browse through the models in this directory and find one that best suits your needs. Once you've +selected a model, you can use the provided `*.yaml` file to train and deploy your custom YOLO model with ease. See full +details at the Ultralytics [Docs](https://docs.ultralytics.com/models), and if you need help or have any questions, feel free +to reach out to the Ultralytics team for support. So, don't wait, start creating your custom YOLO model now! + +### Usage + +Model `*.yaml` files may be used directly in the Command Line Interface (CLI) with a `yolo` command: + +```bash +yolo task=detect mode=train model=yolov8n.yaml data=coco128.yaml epochs=100 +``` + +They may also be used directly in a Python environment, and accepts the same +[arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above: + +```python +from ultralytics import YOLO + +model = YOLO("model.yaml") # build a YOLOv8n model from scratch +# YOLO("model.pt") use pre-trained model if available +model.info() # display model information +model.train(data="coco128.yaml", epochs=100) # train the model +``` + +## Pre-trained Model Architectures + +Ultralytics supports many model architectures. Visit https://docs.ultralytics.com/models to view detailed information +and usage. Any of these models can be used by loading their configs or pretrained checkpoints if available. + +## Contributing New Models + +If you've developed a new model architecture or have improvements for existing models that you'd like to contribute to the Ultralytics community, please submit your contribution in a new Pull Request. For more details, visit our [Contributing Guide](https://docs.ultralytics.com/help/contributing). diff --git a/ultralytics/models/rt-detr/rtdetr-l.yaml b/ultralytics/models/rt-detr/rtdetr-l.yaml new file mode 100644 index 0000000..bd20da1 --- /dev/null +++ b/ultralytics/models/rt-detr/rtdetr-l.yaml @@ -0,0 +1,50 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# RT-DETR-l object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/rtdetr + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' + # [depth, width, max_channels] + l: [1.00, 1.00, 1024] + +backbone: + # [from, repeats, module, args] + - [-1, 1, HGStem, [32, 48]] # 0-P2/4 + - [-1, 6, HGBlock, [48, 128, 3]] # stage 1 + + - [-1, 1, DWConv, [128, 3, 2, 1, False]] # 2-P3/8 + - [-1, 6, HGBlock, [96, 512, 3]] # stage 2 + + - [-1, 1, DWConv, [512, 3, 2, 1, False]] # 4-P3/16 + - [-1, 6, HGBlock, [192, 1024, 5, True, False]] # cm, c2, k, light, shortcut + - [-1, 6, HGBlock, [192, 1024, 5, True, True]] + - [-1, 6, HGBlock, [192, 1024, 5, True, True]] # stage 3 + + - [-1, 1, DWConv, [1024, 3, 2, 1, False]] # 8-P4/32 + - [-1, 6, HGBlock, [384, 2048, 5, True, False]] # stage 4 + +head: + - [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 10 input_proj.2 + - [-1, 1, AIFI, [1024, 8]] + - [-1, 1, Conv, [256, 1, 1]] # 12, Y5, lateral_convs.0 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [7, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 input_proj.1 + - [[-2, -1], 1, Concat, [1]] + - [-1, 3, RepC3, [256]] # 16, fpn_blocks.0 + - [-1, 1, Conv, [256, 1, 1]] # 17, Y4, lateral_convs.1 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 19 input_proj.0 + - [[-2, -1], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, RepC3, [256]] # X3 (21), fpn_blocks.1 + + - [-1, 1, Conv, [256, 3, 2]] # 22, downsample_convs.0 + - [[-1, 17], 1, Concat, [1]] # cat Y4 + - [-1, 3, RepC3, [256]] # F4 (24), pan_blocks.0 + + - [-1, 1, Conv, [256, 3, 2]] # 25, downsample_convs.1 + - [[-1, 12], 1, Concat, [1]] # cat Y5 + - [-1, 3, RepC3, [256]] # F5 (27), pan_blocks.1 + + - [[21, 24, 27], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/models/rt-detr/rtdetr-x.yaml b/ultralytics/models/rt-detr/rtdetr-x.yaml new file mode 100644 index 0000000..848cb52 --- /dev/null +++ b/ultralytics/models/rt-detr/rtdetr-x.yaml @@ -0,0 +1,54 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# RT-DETR-x object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/rtdetr + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' + # [depth, width, max_channels] + x: [1.00, 1.00, 2048] + +backbone: + # [from, repeats, module, args] + - [-1, 1, HGStem, [32, 64]] # 0-P2/4 + - [-1, 6, HGBlock, [64, 128, 3]] # stage 1 + + - [-1, 1, DWConv, [128, 3, 2, 1, False]] # 2-P3/8 + - [-1, 6, HGBlock, [128, 512, 3]] + - [-1, 6, HGBlock, [128, 512, 3, False, True]] # 4-stage 2 + + - [-1, 1, DWConv, [512, 3, 2, 1, False]] # 5-P3/16 + - [-1, 6, HGBlock, [256, 1024, 5, True, False]] # cm, c2, k, light, shortcut + - [-1, 6, HGBlock, [256, 1024, 5, True, True]] + - [-1, 6, HGBlock, [256, 1024, 5, True, True]] + - [-1, 6, HGBlock, [256, 1024, 5, True, True]] + - [-1, 6, HGBlock, [256, 1024, 5, True, True]] # 10-stage 3 + + - [-1, 1, DWConv, [1024, 3, 2, 1, False]] # 11-P4/32 + - [-1, 6, HGBlock, [512, 2048, 5, True, False]] + - [-1, 6, HGBlock, [512, 2048, 5, True, True]] # 13-stage 4 + +head: + - [-1, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 14 input_proj.2 + - [-1, 1, AIFI, [2048, 8]] + - [-1, 1, Conv, [384, 1, 1]] # 16, Y5, lateral_convs.0 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [10, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 18 input_proj.1 + - [[-2, -1], 1, Concat, [1]] + - [-1, 3, RepC3, [384]] # 20, fpn_blocks.0 + - [-1, 1, Conv, [384, 1, 1]] # 21, Y4, lateral_convs.1 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [4, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 23 input_proj.0 + - [[-2, -1], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, RepC3, [384]] # X3 (25), fpn_blocks.1 + + - [-1, 1, Conv, [384, 3, 2]] # 26, downsample_convs.0 + - [[-1, 21], 1, Concat, [1]] # cat Y4 + - [-1, 3, RepC3, [384]] # F4 (28), pan_blocks.0 + + - [-1, 1, Conv, [384, 3, 2]] # 29, downsample_convs.1 + - [[-1, 16], 1, Concat, [1]] # cat Y5 + - [-1, 3, RepC3, [384]] # F5 (31), pan_blocks.1 + + - [[25, 28, 31], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/models/v3/yolov3-spp.yaml b/ultralytics/models/v3/yolov3-spp.yaml new file mode 100644 index 0000000..406e019 --- /dev/null +++ b/ultralytics/models/v3/yolov3-spp.yaml @@ -0,0 +1,48 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv3-SPP object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/yolov3 + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3-SPP head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, SPP, [512, [5, 9, 13]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc]], # Detect(P3, P4, P5) + ] diff --git a/ultralytics/models/v3/yolov3-tiny.yaml b/ultralytics/models/v3/yolov3-tiny.yaml new file mode 100644 index 0000000..69d8e42 --- /dev/null +++ b/ultralytics/models/v3/yolov3-tiny.yaml @@ -0,0 +1,39 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv3-tiny object detection model with P4-P5 outputs. For details see https://docs.ultralytics.com/models/yolov3 + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# YOLOv3-tiny backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [16, 3, 1]], # 0 + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 + [-1, 1, Conv, [32, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 + [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 + ] + +# YOLOv3-tiny head +head: + [[-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) + + [[19, 15], 1, Detect, [nc]], # Detect(P4, P5) + ] diff --git a/ultralytics/models/v3/yolov3.yaml b/ultralytics/models/v3/yolov3.yaml new file mode 100644 index 0000000..7cc0afa --- /dev/null +++ b/ultralytics/models/v3/yolov3.yaml @@ -0,0 +1,48 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv3 object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/yolov3 + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3 head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc]], # Detect(P3, P4, P5) + ] diff --git a/ultralytics/models/v5/yolov5-p6.yaml b/ultralytics/models/v5/yolov5-p6.yaml new file mode 100644 index 0000000..d468377 --- /dev/null +++ b/ultralytics/models/v5/yolov5-p6.yaml @@ -0,0 +1,61 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv5 object detection model with P3-P6 outputs. For details see https://docs.ultralytics.com/models/yolov5 + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov5n-p6.yaml' will call yolov5-p6.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 1024] + l: [1.00, 1.00, 1024] + x: [1.33, 1.25, 1024] + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc]], # Detect(P3, P4, P5, P6) + ] diff --git a/ultralytics/models/v5/yolov5.yaml b/ultralytics/models/v5/yolov5.yaml new file mode 100644 index 0000000..4a3fced --- /dev/null +++ b/ultralytics/models/v5/yolov5.yaml @@ -0,0 +1,50 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv5 object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/yolov5 + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov5n.yaml' will call yolov5.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 1024] + l: [1.00, 1.00, 1024] + x: [1.33, 1.25, 1024] + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc]], # Detect(P3, P4, P5) + ] diff --git a/ultralytics/models/v6/yolov6.yaml b/ultralytics/models/v6/yolov6.yaml new file mode 100644 index 0000000..cb5e32a --- /dev/null +++ b/ultralytics/models/v6/yolov6.yaml @@ -0,0 +1,53 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv6 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/models/yolov6 + +# Parameters +nc: 80 # number of classes +activation: nn.ReLU() # (optional) model default activation function +scales: # model compound scaling constants, i.e. 'model=yolov6n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 768] + l: [1.00, 1.00, 512] + x: [1.00, 1.25, 512] + +# YOLOv6-3.0s backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 6, Conv, [128, 3, 1]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 12, Conv, [256, 3, 1]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 18, Conv, [512, 3, 1]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 6, Conv, [1024, 3, 1]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv6-3.0s head +head: + - [-1, 1, Conv, [256, 1, 1]] + - [-1, 1, nn.ConvTranspose2d, [256, 2, 2, 0]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, Conv, [256, 3, 1]] + - [-1, 9, Conv, [256, 3, 1]] # 14 + + - [-1, 1, Conv, [128, 1, 1]] + - [-1, 1, nn.ConvTranspose2d, [128, 2, 2, 0]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 1, Conv, [128, 3, 1]] + - [-1, 9, Conv, [128, 3, 1]] # 19 + + - [-1, 1, Conv, [128, 3, 2]] + - [[-1, 15], 1, Concat, [1]] # cat head P4 + - [-1, 1, Conv, [256, 3, 1]] + - [-1, 9, Conv, [256, 3, 1]] # 23 + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 1, Conv, [512, 3, 1]] + - [-1, 9, Conv, [512, 3, 1]] # 27 + + - [[19, 23, 27], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/models/v8/yolov8-cls.yaml b/ultralytics/models/v8/yolov8-cls.yaml new file mode 100644 index 0000000..5332f1d --- /dev/null +++ b/ultralytics/models/v8/yolov8-cls.yaml @@ -0,0 +1,29 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8-cls image classification model. For Usage examples see https://docs.ultralytics.com/tasks/classify + +# Parameters +nc: 1000 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 1024] + l: [1.00, 1.00, 1024] + x: [1.00, 1.25, 1024] + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + +# YOLOv8.0n head +head: + - [-1, 1, Classify, [nc]] # Classify diff --git a/ultralytics/models/v8/yolov8-lite-s-pose.yaml b/ultralytics/models/v8/yolov8-lite-s-pose.yaml new file mode 100644 index 0000000..e2157ff --- /dev/null +++ b/ultralytics/models/v8/yolov8-lite-s-pose.yaml @@ -0,0 +1,44 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# YOLOv8-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose + +# Parameters +nc: 1 # number of classes +kpt_shape: [5, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# custom backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, StemBlock, [32, 3, 2] ], # 0-P2/4 + [ -1, 1, Shuffle_Block, [96, 2]], # 1-P3/8 + [ -1, 3, Shuffle_Block, [96, 1]], # 2 + [ -1, 1, Shuffle_Block, [192, 2]], # 3-P4/16 + [ -1, 7, Shuffle_Block, [192, 1]], # 4 + [ -1, 1, Shuffle_Block, [384, 2]], # 5-P5/32 + [ -1, 3, Shuffle_Block, [384, 1]], # 6 + [ -1, 1, SPPF, [384, 5]], + ] + +# v5lite-e head +head: + [ [ -1, 1, Conv, [96, 1, 1]], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest']], + [[ -1, 4], 1, Concat, [1]], # cat backbone P4 + [ -1, 1, DWConvblock, [96, 3, 1]], # 11 + + [ -1, 1, Conv, [96, 1, 1]], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest']], + [[ -1, 2], 1, Concat, [1]], # cat backbone P3 + [ -1, 1, DWConvblock, [96, 3, 1] ], # 15 (P3/8-small) + + [-1, 1, DWConvblock, [96, 3, 2]], + [[ -1, 12], 1, ADD, [1]], # cat head P4 + [ -1, 1, DWConvblock, [96, 3, 1]], # 18 (P4/16-medium) + + [ -1, 1, DWConvblock, [96, 3, 2]], + [[ -1, 8], 1, ADD, [1]], # cat head P5 + [ -1, 1, DWConvblock, [96, 3, 1]], # 21 (P5/32-large) + + [[ 15, 18, 21], 1, Pose, [nc, kpt_shape]], # Detect(P3, P4, P5) + ] diff --git a/ultralytics/models/v8/yolov8-lite-t-pose.yaml b/ultralytics/models/v8/yolov8-lite-t-pose.yaml new file mode 100644 index 0000000..59abec5 --- /dev/null +++ b/ultralytics/models/v8/yolov8-lite-t-pose.yaml @@ -0,0 +1,44 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# YOLOv8-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose + +# Parameters +nc: 2 # number of classes +kpt_shape: [4, 2] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# custom backbone +# custom backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, StemBlock, [16, 3, 2] ], # 0-P2/4 + [ -1, 1, Shuffle_Block, [48, 2]], # 1-P3/8 + [ -1, 2, Shuffle_Block, [48, 1]], # 2 + [ -1, 1, Shuffle_Block, [96, 2]], # 3-P4/16 + [ -1, 5, Shuffle_Block, [96, 1]], # 4 + [ -1, 1, Shuffle_Block, [192, 2]], # 5-P5/32 + [ -1, 2, Shuffle_Block, [192, 1]], # 6 + [ -1, 1, SPPF, [192, 5]], + ] + +# v5lite-e head +head: + [ [ -1, 1, Conv, [48, 1, 1]], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest']], + [[ -1, 4], 1, Concat, [1]], # cat backbone P4 + [ -1, 1, DWConvblock, [48, 3, 1]], # 11 + + [ -1, 1, Conv, [48, 1, 1]], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest']], + [[ -1, 2], 1, Concat, [1]], # cat backbone P3 + [ -1, 1, DWConvblock, [48, 3, 1] ], # 15 (P3/8-small) + + [-1, 1, DWConvblock, [48, 3, 2]], + [[ -1, 12], 1, ADD, [1]], # cat head P4 + [ -1, 1, DWConvblock, [48, 3, 1]], # 18 (P4/16-medium) + + [ -1, 1, DWConvblock, [48, 3, 2]], + [[ -1, 8], 1, ADD, [1]], # cat head P5 + [ -1, 1, DWConvblock, [48, 3, 1]], # 21 (P5/32-large) + [[ 15, 18, 21], 1, Pose, [nc, kpt_shape]], # Detect(P3, P4, P5) + ] diff --git a/ultralytics/models/v8/yolov8-p2.yaml b/ultralytics/models/v8/yolov8-p2.yaml new file mode 100644 index 0000000..3e286aa --- /dev/null +++ b/ultralytics/models/v8/yolov8-p2.yaml @@ -0,0 +1,54 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8 object detection model with P2-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 768] + l: [1.00, 1.00, 512] + x: [1.00, 1.25, 512] + +# YOLOv8.0 backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0-p2 head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 2], 1, Concat, [1]] # cat backbone P2 + - [-1, 3, C2f, [128]] # 18 (P2/4-xsmall) + + - [-1, 1, Conv, [128, 3, 2]] + - [[-1, 15], 1, Concat, [1]] # cat head P3 + - [-1, 3, C2f, [256]] # 21 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 24 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 27 (P5/32-large) + + - [[18, 21, 24, 27], 1, Detect, [nc]] # Detect(P2, P3, P4, P5) diff --git a/ultralytics/models/v8/yolov8-p6.yaml b/ultralytics/models/v8/yolov8-p6.yaml new file mode 100644 index 0000000..3635ed9 --- /dev/null +++ b/ultralytics/models/v8/yolov8-p6.yaml @@ -0,0 +1,56 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8 object detection model with P3-P6 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 768] + l: [1.00, 1.00, 512] + x: [1.00, 1.25, 512] + +# YOLOv8.0x6 backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [768, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 11 + +# YOLOv8.0x6 head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C2, [768, False]] # 14 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2, [512, False]] # 17 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2, [256, False]] # 20 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 17], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 14], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2, [768, False]] # 26 (P5/32-large) + + - [-1, 1, Conv, [768, 3, 2]] + - [[-1, 11], 1, Concat, [1]] # cat head P6 + - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) + + - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) diff --git a/ultralytics/models/v8/yolov8-pose-p6.yaml b/ultralytics/models/v8/yolov8-pose-p6.yaml new file mode 100644 index 0000000..06381fb --- /dev/null +++ b/ultralytics/models/v8/yolov8-pose-p6.yaml @@ -0,0 +1,57 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose + +# Parameters +nc: 1 # number of classes +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 768] + l: [1.00, 1.00, 512] + x: [1.00, 1.25, 512] + +# YOLOv8.0x6 backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [768, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 11 + +# YOLOv8.0x6 head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C2, [768, False]] # 14 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2, [512, False]] # 17 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2, [256, False]] # 20 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 17], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 14], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2, [768, False]] # 26 (P5/32-large) + + - [-1, 1, Conv, [768, 3, 2]] + - [[-1, 11], 1, Concat, [1]] # cat head P6 + - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) + + - [[20, 23, 26, 29], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5, P6) diff --git a/ultralytics/models/v8/yolov8-pose.yaml b/ultralytics/models/v8/yolov8-pose.yaml new file mode 100644 index 0000000..9f48e1e --- /dev/null +++ b/ultralytics/models/v8/yolov8-pose.yaml @@ -0,0 +1,47 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose + +# Parameters +nc: 1 # number of classes +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +scales: # model compound scaling constants, i.e. 'model=yolov8n-pose.yaml' will call yolov8-pose.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 768] + l: [1.00, 1.00, 512] + x: [1.00, 1.25, 512] + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5) diff --git a/ultralytics/models/v8/yolov8-rtdetr.yaml b/ultralytics/models/v8/yolov8-rtdetr.yaml new file mode 100644 index 0000000..a058106 --- /dev/null +++ b/ultralytics/models/v8/yolov8-rtdetr.yaml @@ -0,0 +1,46 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/models/v8/yolov8-seg.yaml b/ultralytics/models/v8/yolov8-seg.yaml new file mode 100644 index 0000000..fbb08fc --- /dev/null +++ b/ultralytics/models/v8/yolov8-seg.yaml @@ -0,0 +1,46 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-seg.yaml' will call yolov8-seg.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 768] + l: [1.00, 1.00, 512] + x: [1.00, 1.25, 512] + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5) diff --git a/ultralytics/models/v8/yolov8-tiny-pose.yaml b/ultralytics/models/v8/yolov8-tiny-pose.yaml new file mode 100644 index 0000000..6fb78c2 --- /dev/null +++ b/ultralytics/models/v8/yolov8-tiny-pose.yaml @@ -0,0 +1,110 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# YOLOv8-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose + +# Parameters +nc: 1 # number of classes +kpt_shape: [5, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +activation: nn.ReLU() +# yolov7-tiny backbone +backbone: + # [from, number, module, args] c2, k=1, s=1, p=None, g=1, act=True + [[-1, 1, Conv, [32, 3, 2, None, 1]], # 0-P1/2 + + [-1, 1, Conv, [64, 3, 2, None, 1]], # 1-P2/4 + + [-1, 1, Conv, [32, 1, 1, None, 1]], + [-2, 1, Conv, [32, 1, 1, None, 1]], + [-1, 1, Conv, [32, 3, 1, None, 1]], + [-1, 1, Conv, [32, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [64, 1, 1, None, 1]], # 7 + + [-1, 1, MP, []], # 8-P3/8 + [-1, 1, Conv, [64, 1, 1, None, 1]], + [-2, 1, Conv, [64, 1, 1, None, 1]], + [-1, 1, Conv, [64, 3, 1, None, 1]], + [-1, 1, Conv, [64, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1, None, 1]], # 14 + + [-1, 1, MP, []], # 15-P4/16 + [-1, 1, Conv, [128, 1, 1, None, 1]], + [-2, 1, Conv, [128, 1, 1, None, 1]], + [-1, 1, Conv, [128, 3, 1, None, 1]], + [-1, 1, Conv, [128, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1]], # 21 + + [-1, 1, MP, []], # 22-P5/32 + [-1, 1, Conv, [256, 1, 1, None, 1]], + [-2, 1, Conv, [256, 1, 1, None, 1]], + [-1, 1, Conv, [256, 3, 1, None, 1]], + [-1, 1, Conv, [256, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1, None, 1]], # 28 + ] + +# yolov7-tiny head +head: + [[-1, 1, Conv, [256, 1, 1, None, 1]], + [-2, 1, Conv, [256, 1, 1, None, 1]], + [-1, 1, SPF, [5]], + [-2, 1, SPF, [9]], + [-3, 1, SPF, [13]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1]], + [[-1, -7], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1]], # 37 + + [-1, 1, Conv, [128, 1, 1, None, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [21, 1, Conv, [128, 1, 1, None, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [64, 1, 1, None, 1]], + [-2, 1, Conv, [64, 1, 1, None, 1]], + [-1, 1, Conv, [64, 3, 1, None, 1]], + [-1, 1, Conv, [64, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1, None, 1]], # 47 + + [-1, 1, Conv, [64, 1, 1, None, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [14, 1, Conv, [64, 1, 1, None, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [32, 1, 1, None, 1]], + [-2, 1, Conv, [32, 1, 1, None, 1]], + [-1, 1, Conv, [32, 3, 1, None, 1]], + [-1, 1, Conv, [32, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [64, 1, 1, None, 1]], # 57 + + [-1, 1, Conv, [128, 3, 2, None, 1]], + [[-1, 47], 1, Concat, [1]], + + [-1, 1, Conv, [64, 1, 1, None, 1]], + [-2, 1, Conv, [64, 1, 1, None, 1]], + [-1, 1, Conv, [64, 3, 1, None, 1]], + [-1, 1, Conv, [64, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1, None, 1]], # 65 + + [-1, 1, Conv, [256, 3, 2, None, 1]], + [[-1, 37], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1, None, 1]], + [-2, 1, Conv, [128, 1, 1, None, 1]], + [-1, 1, Conv, [128, 3, 1, None, 1]], + [-1, 1, Conv, [128, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1]], # 73 + + [57, 1, Conv, [128, 3, 1, None, 1]], + [65, 1, Conv, [256, 3, 1, None, 1]], + [73, 1, Conv, [512, 3, 1, None, 1]], + + [[74,75,76], 1, Pose, [nc, kpt_shape]], # Detect(P3, P4, P5) + ] diff --git a/ultralytics/models/v8/yolov8-tiny.yaml b/ultralytics/models/v8/yolov8-tiny.yaml new file mode 100644 index 0000000..3259f85 --- /dev/null +++ b/ultralytics/models/v8/yolov8-tiny.yaml @@ -0,0 +1,109 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# YOLOv8-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# yolov7-tiny backbone +backbone: + # [from, number, module, args] c2, k=1, s=1, p=None, g=1, act=True + [[-1, 1, Conv, [32, 3, 2, None, 1]], # 0-P1/2 + + [-1, 1, Conv, [64, 3, 2, None, 1]], # 1-P2/4 + + [-1, 1, Conv, [32, 1, 1, None, 1]], + [-2, 1, Conv, [32, 1, 1, None, 1]], + [-1, 1, Conv, [32, 3, 1, None, 1]], + [-1, 1, Conv, [32, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [64, 1, 1, None, 1]], # 7 + + [-1, 1, MP, []], # 8-P3/8 + [-1, 1, Conv, [64, 1, 1, None, 1]], + [-2, 1, Conv, [64, 1, 1, None, 1]], + [-1, 1, Conv, [64, 3, 1, None, 1]], + [-1, 1, Conv, [64, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1, None, 1]], # 14 + + [-1, 1, MP, []], # 15-P4/16 + [-1, 1, Conv, [128, 1, 1, None, 1]], + [-2, 1, Conv, [128, 1, 1, None, 1]], + [-1, 1, Conv, [128, 3, 1, None, 1]], + [-1, 1, Conv, [128, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1]], # 21 + + [-1, 1, MP, []], # 22-P5/32 + [-1, 1, Conv, [256, 1, 1, None, 1]], + [-2, 1, Conv, [256, 1, 1, None, 1]], + [-1, 1, Conv, [256, 3, 1, None, 1]], + [-1, 1, Conv, [256, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1, None, 1]], # 28 + ] + +# yolov7-tiny head +head: + [[-1, 1, Conv, [256, 1, 1, None, 1]], + [-2, 1, Conv, [256, 1, 1, None, 1]], + [-1, 1, SPF, [5]], + [-2, 1, SPF, [9]], + [-3, 1, SPF, [13]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1]], + [[-1, -7], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1]], # 37 + + [-1, 1, Conv, [128, 1, 1, None, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [21, 1, Conv, [128, 1, 1, None, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [64, 1, 1, None, 1]], + [-2, 1, Conv, [64, 1, 1, None, 1]], + [-1, 1, Conv, [64, 3, 1, None, 1]], + [-1, 1, Conv, [64, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1, None, 1]], # 47 + + [-1, 1, Conv, [64, 1, 1, None, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [14, 1, Conv, [64, 1, 1, None, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [32, 1, 1, None, 1]], + [-2, 1, Conv, [32, 1, 1, None, 1]], + [-1, 1, Conv, [32, 3, 1, None, 1]], + [-1, 1, Conv, [32, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [64, 1, 1, None, 1]], # 57 + + [-1, 1, Conv, [128, 3, 2, None, 1]], + [[-1, 47], 1, Concat, [1]], + + [-1, 1, Conv, [64, 1, 1, None, 1]], + [-2, 1, Conv, [64, 1, 1, None, 1]], + [-1, 1, Conv, [64, 3, 1, None, 1]], + [-1, 1, Conv, [64, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1, None, 1]], # 65 + + [-1, 1, Conv, [256, 3, 2, None, 1]], + [[-1, 37], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1, None, 1]], + [-2, 1, Conv, [128, 1, 1, None, 1]], + [-1, 1, Conv, [128, 3, 1, None, 1]], + [-1, 1, Conv, [128, 3, 1, None, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1]], # 73 + + [57, 1, Conv, [128, 3, 1, None, 1]], + [65, 1, Conv, [256, 3, 1, None, 1]], + [73, 1, Conv, [512, 3, 1, None, 1]], + + [[74,75,76], 1, Detect, [nc]], # Detect(P3, P4, P5) + ] diff --git a/ultralytics/models/v8/yolov8.yaml b/ultralytics/models/v8/yolov8.yaml new file mode 100644 index 0000000..2255450 --- /dev/null +++ b/ultralytics/models/v8/yolov8.yaml @@ -0,0 +1,46 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/nn/__init__.py b/ultralytics/nn/__init__.py new file mode 100644 index 0000000..9889b7e --- /dev/null +++ b/ultralytics/nn/__init__.py @@ -0,0 +1,9 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .tasks import (BaseModel, ClassificationModel, DetectionModel, SegmentationModel, attempt_load_one_weight, + attempt_load_weights, guess_model_scale, guess_model_task, parse_model, torch_safe_load, + yaml_model_load) + +__all__ = ('attempt_load_one_weight', 'attempt_load_weights', 'parse_model', 'yaml_model_load', 'guess_model_task', + 'guess_model_scale', 'torch_safe_load', 'DetectionModel', 'SegmentationModel', 'ClassificationModel', + 'BaseModel') diff --git a/ultralytics/nn/autobackend.py b/ultralytics/nn/autobackend.py new file mode 100644 index 0000000..04e8dca --- /dev/null +++ b/ultralytics/nn/autobackend.py @@ -0,0 +1,481 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import ast +import contextlib +import json +import os +import platform +import zipfile +from collections import OrderedDict, namedtuple +from pathlib import Path +from urllib.parse import urlparse + +import cv2 +import numpy as np +import torch +import torch.nn as nn +from PIL import Image + +from ultralytics.yolo.utils import ARM64, LINUX, LOGGER, ROOT, yaml_load +from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_version, check_yaml +from ultralytics.yolo.utils.downloads import attempt_download_asset, is_url +from ultralytics.yolo.utils.ops import xywh2xyxy + + +def check_class_names(names): + """Check class names. Map imagenet class codes to human-readable names if required. Convert lists to dicts.""" + if isinstance(names, list): # names is a list + names = dict(enumerate(names)) # convert to dict + if isinstance(names, dict): + # Convert 1) string keys to int, i.e. '0' to 0, and non-string values to strings, i.e. True to 'True' + names = {int(k): str(v) for k, v in names.items()} + n = len(names) + if max(names.keys()) >= n: + raise KeyError(f'{n}-class dataset requires class indices 0-{n - 1}, but you have invalid class indices ' + f'{min(names.keys())}-{max(names.keys())} defined in your dataset YAML.') + if isinstance(names[0], str) and names[0].startswith('n0'): # imagenet class codes, i.e. 'n01440764' + map = yaml_load(ROOT / 'datasets/ImageNet.yaml')['map'] # human-readable names + names = {k: map[v] for k, v in names.items()} + return names + + +class AutoBackend(nn.Module): + + def __init__(self, + weights='yolov8n.pt', + device=torch.device('cpu'), + dnn=False, + data=None, + fp16=False, + fuse=True, + verbose=True): + """ + MultiBackend class for python inference on various platforms using Ultralytics YOLO. + + Args: + weights (str): The path to the weights file. Default: 'yolov8n.pt' + device (torch.device): The device to run the model on. + dnn (bool): Use OpenCV DNN module for inference if True, defaults to False. + data (str | Path | optional): Additional data.yaml file for class names. + fp16 (bool): If True, use half precision. Default: False + fuse (bool): Whether to fuse the model or not. Default: True + verbose (bool): Whether to run in verbose mode or not. Default: True + + Supported formats and their naming conventions: + | Format | Suffix | + |-----------------------|------------------| + | PyTorch | *.pt | + | TorchScript | *.torchscript | + | ONNX Runtime | *.onnx | + | ONNX OpenCV DNN | *.onnx dnn=True | + | OpenVINO | *.xml | + | CoreML | *.mlmodel | + | TensorRT | *.engine | + | TensorFlow SavedModel | *_saved_model | + | TensorFlow GraphDef | *.pb | + | TensorFlow Lite | *.tflite | + | TensorFlow Edge TPU | *_edgetpu.tflite | + | PaddlePaddle | *_paddle_model | + | ncnn | *_ncnn_model | + """ + super().__init__() + w = str(weights[0] if isinstance(weights, list) else weights) + nn_module = isinstance(weights, torch.nn.Module) + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn, triton = \ + self._model_type(w) + fp16 &= pt or jit or onnx or engine or nn_module or triton # FP16 + nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) + stride = 32 # default stride + model, metadata = None, None + cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA + if not (pt or triton or nn_module): + w = attempt_download_asset(w) # download if not local + + # NOTE: special case: in-memory pytorch model + if nn_module: + model = weights.to(device) + model = model.fuse(verbose=verbose) if fuse else model + if hasattr(model, 'kpt_shape'): + kpt_shape = model.kpt_shape # pose-only + stride = max(int(model.stride.max()), 32) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + model.half() if fp16 else model.float() + self.model = model # explicitly assign for to(), cpu(), cuda(), half() + pt = True + elif pt: # PyTorch + from ultralytics.nn.tasks import attempt_load_weights + model = attempt_load_weights(weights if isinstance(weights, list) else w, + device=device, + inplace=True, + fuse=fuse) + if hasattr(model, 'kpt_shape'): + kpt_shape = model.kpt_shape # pose-only + stride = max(int(model.stride.max()), 32) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + model.half() if fp16 else model.float() + self.model = model # explicitly assign for to(), cpu(), cuda(), half() + elif jit: # TorchScript + LOGGER.info(f'Loading {w} for TorchScript inference...') + extra_files = {'config.txt': ''} # model metadata + model = torch.jit.load(w, _extra_files=extra_files, map_location=device) + model.half() if fp16 else model.float() + if extra_files['config.txt']: # load metadata dict + metadata = json.loads(extra_files['config.txt'], object_hook=lambda x: dict(x.items())) + elif dnn: # ONNX OpenCV DNN + LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') + check_requirements('opencv-python>=4.5.4') + net = cv2.dnn.readNetFromONNX(w) + elif onnx: # ONNX Runtime + LOGGER.info(f'Loading {w} for ONNX Runtime inference...') + check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) + import onnxruntime + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + session = onnxruntime.InferenceSession(w, providers=providers) + output_names = [x.name for x in session.get_outputs()] + metadata = session.get_modelmeta().custom_metadata_map # metadata + elif xml: # OpenVINO + LOGGER.info(f'Loading {w} for OpenVINO inference...') + check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/ + from openvino.runtime import Core, Layout, get_batch # noqa + core = Core() + w = Path(w) + if not w.is_file(): # if not *.xml + w = next(w.glob('*.xml')) # get *.xml file from *_openvino_model dir + ov_model = core.read_model(model=str(w), weights=w.with_suffix('.bin')) + if ov_model.get_parameters()[0].get_layout().empty: + ov_model.get_parameters()[0].set_layout(Layout('NCHW')) + batch_dim = get_batch(ov_model) + if batch_dim.is_static: + batch_size = batch_dim.get_length() + ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device + metadata = w.parent / 'metadata.yaml' + elif engine: # TensorRT + LOGGER.info(f'Loading {w} for TensorRT inference...') + try: + import tensorrt as trt # noqa https://developer.nvidia.com/nvidia-tensorrt-download + except ImportError: + if LINUX: + check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') + import tensorrt as trt # noqa + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 + if device.type == 'cpu': + device = torch.device('cuda:0') + Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + logger = trt.Logger(trt.Logger.INFO) + # Read file + with open(w, 'rb') as f, trt.Runtime(logger) as runtime: + meta_len = int.from_bytes(f.read(4), byteorder='little') # read metadata length + metadata = json.loads(f.read(meta_len).decode('utf-8')) # read metadata + model = runtime.deserialize_cuda_engine(f.read()) # read engine + context = model.create_execution_context() + bindings = OrderedDict() + output_names = [] + fp16 = False # default updated below + dynamic = False + for i in range(model.num_bindings): + name = model.get_binding_name(i) + dtype = trt.nptype(model.get_binding_dtype(i)) + if model.binding_is_input(i): + if -1 in tuple(model.get_binding_shape(i)): # dynamic + dynamic = True + context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) + if dtype == np.float16: + fp16 = True + else: # output + output_names.append(name) + shape = tuple(context.get_binding_shape(i)) + im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) + bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) + binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) + batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size + elif coreml: # CoreML + LOGGER.info(f'Loading {w} for CoreML inference...') + import coremltools as ct + model = ct.models.MLModel(w) + metadata = dict(model.user_defined_metadata) + elif saved_model: # TF SavedModel + LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') + import tensorflow as tf + keras = False # assume TF1 saved_model + model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) + metadata = Path(w) / 'metadata.yaml' + elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') + import tensorflow as tf + + from ultralytics.yolo.engine.exporter import gd_outputs + + def wrap_frozen_graph(gd, inputs, outputs): + """Wrap frozen graphs for deployment.""" + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped + ge = x.graph.as_graph_element + return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) + + gd = tf.Graph().as_graph_def() # TF GraphDef + with open(w, 'rb') as f: + gd.ParseFromString(f.read()) + frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd)) + elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python + try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu + from tflite_runtime.interpreter import Interpreter, load_delegate + except ImportError: + import tensorflow as tf + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate + if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime + LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') + delegate = { + 'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] + interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) + else: # TFLite + LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + interpreter = Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs + # Load metadata + with contextlib.suppress(zipfile.BadZipFile): + with zipfile.ZipFile(w, 'r') as model: + meta_file = model.namelist()[0] + metadata = ast.literal_eval(model.read(meta_file).decode('utf-8')) + elif tfjs: # TF.js + raise NotImplementedError('YOLOv8 TF.js inference is not currently supported.') + elif paddle: # PaddlePaddle + LOGGER.info(f'Loading {w} for PaddlePaddle inference...') + check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') + import paddle.inference as pdi # noqa + w = Path(w) + if not w.is_file(): # if not *.pdmodel + w = next(w.rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir + config = pdi.Config(str(w), str(w.with_suffix('.pdiparams'))) + if cuda: + config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) + predictor = pdi.create_predictor(config) + input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) + output_names = predictor.get_output_names() + metadata = w.parents[1] / 'metadata.yaml' + elif ncnn: # ncnn + LOGGER.info(f'Loading {w} for ncnn inference...') + check_requirements('git+https://github.com/Tencent/ncnn.git' if ARM64 else 'ncnn') # requires NCNN + import ncnn as pyncnn + net = pyncnn.Net() + net.opt.num_threads = os.cpu_count() + net.opt.use_vulkan_compute = cuda + w = Path(w) + if not w.is_file(): # if not *.param + w = next(w.glob('*.param')) # get *.param file from *_ncnn_model dir + net.load_param(str(w)) + net.load_model(str(w.with_suffix('.bin'))) + metadata = w.parent / 'metadata.yaml' + elif triton: # NVIDIA Triton Inference Server + LOGGER.info('Triton Inference Server not supported...') + ''' + TODO: + check_requirements('tritonclient[all]') + from utils.triton import TritonRemoteModel + model = TritonRemoteModel(url=w) + nhwc = model.runtime.startswith("tensorflow") + ''' + else: + from ultralytics.yolo.engine.exporter import export_formats + raise TypeError(f"model='{w}' is not a supported model format. " + 'See https://docs.ultralytics.com/modes/predict for help.' + f'\n\n{export_formats()}') + + # Load external metadata YAML + if isinstance(metadata, (str, Path)) and Path(metadata).exists(): + metadata = yaml_load(metadata) + if metadata: + for k, v in metadata.items(): + if k in ('stride', 'batch'): + metadata[k] = int(v) + elif k in ('imgsz', 'names', 'kpt_shape') and isinstance(v, str): + metadata[k] = eval(v) + stride = metadata['stride'] + task = metadata['task'] + batch = metadata['batch'] + imgsz = metadata['imgsz'] + names = metadata['names'] + kpt_shape = metadata.get('kpt_shape') + elif not (pt or triton or nn_module): + LOGGER.warning(f"WARNING ⚠️ Metadata not found for 'model={weights}'") + + # Check names + if 'names' not in locals(): # names missing + names = self._apply_default_class_names(data) + names = check_class_names(names) + + self.__dict__.update(locals()) # assign all variables to self + + def forward(self, im, augment=False, visualize=False): + """ + Runs inference on the YOLOv8 MultiBackend model. + + Args: + im (torch.Tensor): The image tensor to perform inference on. + augment (bool): whether to perform data augmentation during inference, defaults to False + visualize (bool): whether to visualize the output predictions, defaults to False + + Returns: + (tuple): Tuple containing the raw output tensor, and processed output for visualization (if visualize=True) + """ + b, ch, h, w = im.shape # batch, channel, height, width + if self.fp16 and im.dtype != torch.float16: + im = im.half() # to FP16 + if self.nhwc: + im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3) + + if self.pt or self.nn_module: # PyTorch + y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) + elif self.jit: # TorchScript + y = self.model(im) + elif self.dnn: # ONNX OpenCV DNN + im = im.cpu().numpy() # torch to numpy + self.net.setInput(im) + y = self.net.forward() + elif self.onnx: # ONNX Runtime + im = im.cpu().numpy() # torch to numpy + y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) + elif self.xml: # OpenVINO + im = im.cpu().numpy() # FP32 + y = list(self.ov_compiled_model(im).values()) + elif self.engine: # TensorRT + if self.dynamic and im.shape != self.bindings['images'].shape: + i = self.model.get_binding_index('images') + self.context.set_binding_shape(i, im.shape) # reshape if dynamic + self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) + for name in self.output_names: + i = self.model.get_binding_index(name) + self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i))) + s = self.bindings['images'].shape + assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" + self.binding_addrs['images'] = int(im.data_ptr()) + self.context.execute_v2(list(self.binding_addrs.values())) + y = [self.bindings[x].data for x in sorted(self.output_names)] + elif self.coreml: # CoreML + im = im[0].cpu().numpy() + im_pil = Image.fromarray((im * 255).astype('uint8')) + # im = im.resize((192, 320), Image.BILINEAR) + y = self.model.predict({'image': im_pil}) # coordinates are xywh normalized + if 'confidence' in y: + box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + elif len(y) == 1: # classification model + y = list(y.values()) + elif len(y) == 2: # segmentation model + y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) + elif self.paddle: # PaddlePaddle + im = im.cpu().numpy().astype(np.float32) + self.input_handle.copy_from_cpu(im) + self.predictor.run() + y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] + elif self.ncnn: # ncnn + mat_in = self.pyncnn.Mat(im[0].cpu().numpy()) + ex = self.net.create_extractor() + input_names, output_names = self.net.input_names(), self.net.output_names() + ex.input(input_names[0], mat_in) + y = [] + for output_name in output_names: + mat_out = self.pyncnn.Mat() + ex.extract(output_name, mat_out) + y.append(np.array(mat_out)[None]) + elif self.triton: # NVIDIA Triton Inference Server + y = self.model(im) + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + im = im.cpu().numpy() + if self.saved_model: # SavedModel + y = self.model(im, training=False) if self.keras else self.model(im) + if not isinstance(y, list): + y = [y] + elif self.pb: # GraphDef + y = self.frozen_func(x=self.tf.constant(im)) + if len(y) == 2 and len(self.names) == 999: # segments and names not defined + ip, ib = (0, 1) if len(y[0].shape) == 4 else (1, 0) # index of protos, boxes + nc = y[ib].shape[1] - y[ip].shape[3] - 4 # y = (1, 160, 160, 32), (1, 116, 8400) + self.names = {i: f'class{i}' for i in range(nc)} + else: # Lite or Edge TPU + input = self.input_details[0] + int8 = input['dtype'] == np.int8 # is TFLite quantized int8 model + if int8: + scale, zero_point = input['quantization'] + im = (im / scale + zero_point).astype(np.int8) # de-scale + self.interpreter.set_tensor(input['index'], im) + self.interpreter.invoke() + y = [] + for output in self.output_details: + x = self.interpreter.get_tensor(output['index']) + if int8: + scale, zero_point = output['quantization'] + x = (x.astype(np.float32) - zero_point) * scale # re-scale + y.append(x) + # TF segment fixes: export is reversed vs ONNX export and protos are transposed + if len(y) == 2: # segment with (det, proto) output order reversed + if len(y[1].shape) != 4: + y = list(reversed(y)) # should be y = (1, 116, 8400), (1, 160, 160, 32) + y[1] = np.transpose(y[1], (0, 3, 1, 2)) # should be y = (1, 116, 8400), (1, 32, 160, 160) + y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] + # y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels + + # for x in y: + # print(type(x), len(x)) if isinstance(x, (list, tuple)) else print(type(x), x.shape) # debug shapes + if isinstance(y, (list, tuple)): + return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] + else: + return self.from_numpy(y) + + def from_numpy(self, x): + """ + Convert a numpy array to a tensor. + + Args: + x (np.ndarray): The array to be converted. + + Returns: + (torch.Tensor): The converted tensor + """ + return torch.tensor(x).to(self.device) if isinstance(x, np.ndarray) else x + + def warmup(self, imgsz=(1, 3, 640, 640)): + """ + Warm up the model by running one forward pass with a dummy input. + + Args: + imgsz (tuple): The shape of the dummy input tensor in the format (batch_size, channels, height, width) + + Returns: + (None): This method runs the forward pass and don't return any value + """ + warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton, self.nn_module + if any(warmup_types) and (self.device.type != 'cpu' or self.triton): + im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input + for _ in range(2 if self.jit else 1): # + self.forward(im) # warmup + + @staticmethod + def _apply_default_class_names(data): + """Applies default class names to an input YAML file or returns numerical class names.""" + with contextlib.suppress(Exception): + return yaml_load(check_yaml(data))['names'] + return {i: f'class{i}' for i in range(999)} # return default if above errors + + @staticmethod + def _model_type(p='path/to/model.pt'): + """ + This function takes a path to a model file and returns the model type + + Args: + p: path to the model file. Defaults to path/to/model.pt + """ + # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx + # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle] + from ultralytics.yolo.engine.exporter import export_formats + sf = list(export_formats().Suffix) # export suffixes + if not is_url(p, check=False) and not isinstance(p, str): + check_suffix(p, sf) # checks + url = urlparse(p) # if url may be Triton inference server + types = [s in Path(p).name for s in sf] + types[8] &= not types[9] # tflite &= not edgetpu + triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) + return types + [triton] diff --git a/ultralytics/nn/autoshape.py b/ultralytics/nn/autoshape.py new file mode 100644 index 0000000..d557f78 --- /dev/null +++ b/ultralytics/nn/autoshape.py @@ -0,0 +1,244 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Common modules +""" + +from copy import copy +from pathlib import Path + +import cv2 +import numpy as np +import requests +import torch +import torch.nn as nn +from PIL import Image, ImageOps +from torch.cuda import amp + +from ultralytics.nn.autobackend import AutoBackend +from ultralytics.yolo.data.augment import LetterBox +from ultralytics.yolo.utils import LOGGER, colorstr +from ultralytics.yolo.utils.files import increment_path +from ultralytics.yolo.utils.ops import Profile, make_divisible, non_max_suppression, scale_boxes, xyxy2xywh +from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box +from ultralytics.yolo.utils.torch_utils import copy_attr, smart_inference_mode + + +class AutoShape(nn.Module): + """YOLOv8 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS.""" + conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + agnostic = False # NMS class-agnostic + multi_label = False # NMS multiple labels per box + classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs + max_det = 1000 # maximum number of detections per image + amp = False # Automatic Mixed Precision (AMP) inference + + def __init__(self, model, verbose=True): + """Initializes object and copies attributes from model object.""" + super().__init__() + if verbose: + LOGGER.info('Adding AutoShape... ') + copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes + self.dmb = isinstance(model, AutoBackend) # DetectMultiBackend() instance + self.pt = not self.dmb or model.pt # PyTorch model + self.model = model.eval() + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.inplace = False # Detect.inplace=False for safe multithread inference + m.export = True # do not output loss values + + def _apply(self, fn): + """Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers.""" + self = super()._apply(fn) + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) + return self + + @smart_inference_mode() + def forward(self, ims, size=640, augment=False, profile=False): + """Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:.""" + # file: ims = 'data/images/zidane.jpg' # str or PosixPath + # URI: = 'https://ultralytics.com/images/zidane.jpg' + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) + # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) + # numpy: = np.zeros((640,1280,3)) # HWC + # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) + # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + + dt = (Profile(), Profile(), Profile()) + with dt[0]: + if isinstance(size, int): # expand + size = (size, size) + p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param + autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference + if isinstance(ims, torch.Tensor): # torch + with amp.autocast(autocast): + return self.model(ims.to(p.device).type_as(p), augment=augment) # inference + + # Preprocess + n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(ims): + f = f'image{i}' # filename + if isinstance(im, (str, Path)): # filename or uri + im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im + im = np.asarray(ImageOps.exif_transpose(im)) + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(ImageOps.exif_transpose(im)), getattr(im, 'filename', f) or f + files.append(Path(f).with_suffix('.jpg').name) + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = max(size) / max(s) # gain + shape1.append([y * g for y in s]) + ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update + shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape + x = [LetterBox(shape1, auto=False)(image=im)['img'] for im in ims] # pad + x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 + + with amp.autocast(autocast): + # Inference + with dt[1]: + y = self.model(x, augment=augment) # forward + + # Postprocess + with dt[2]: + y = non_max_suppression(y if self.dmb else y[0], + self.conf, + self.iou, + self.classes, + self.agnostic, + self.multi_label, + max_det=self.max_det) # NMS + for i in range(n): + scale_boxes(shape1, y[i][:, :4], shape0[i]) + + return Detections(ims, y, files, dt, self.names, x.shape) + + +class Detections: + """ YOLOv8 detections class for inference results""" + + def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): + """Initialize object attributes for YOLO detection results.""" + super().__init__() + d = pred[0].device # device + gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations + self.ims = ims # list of images as numpy arrays + self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) + self.names = names # class names + self.files = files # image filenames + self.times = times # profiling times + self.xyxy = pred # xyxy pixels + self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels + self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized + self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized + self.n = len(self.pred) # number of images (batch size) + self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) + self.s = tuple(shape) # inference BCHW shape + + def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): + """Return performance metrics and optionally cropped/save images or results.""" + s, crops = '', [] + for i, (im, pred) in enumerate(zip(self.ims, self.pred)): + s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string + if pred.shape[0]: + for c in pred[:, -1].unique(): + n = (pred[:, -1] == c).sum() # detections per class + s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + s = s.rstrip(', ') + if show or save or render or crop: + annotator = Annotator(im, example=str(self.names)) + for *box, conf, cls in reversed(pred): # xyxy, confidence, class + label = f'{self.names[int(cls)]} {conf:.2f}' + if crop: + file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None + crops.append({ + 'box': box, + 'conf': conf, + 'cls': cls, + 'label': label, + 'im': save_one_box(box, im, file=file, save=save)}) + else: # all others + annotator.box_label(box, label if labels else '', color=colors(cls)) + im = annotator.im + else: + s += '(no detections)' + + im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np + if show: + im.show(self.files[i]) # show + if save: + f = self.files[i] + im.save(save_dir / f) # save + if i == self.n - 1: + LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") + if render: + self.ims[i] = np.asarray(im) + if pprint: + s = s.lstrip('\n') + return f'{s}\nSpeed: %.1fms preprocess, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t + if crop: + if save: + LOGGER.info(f'Saved results to {save_dir}\n') + return crops + + def show(self, labels=True): + """Displays YOLO results with detected bounding boxes.""" + self._run(show=True, labels=labels) # show results + + def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False): + """Save detection results with optional labels to specified directory.""" + save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir + self._run(save=True, labels=labels, save_dir=save_dir) # save results + + def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False): + """Crops images into detections and saves them if 'save' is True.""" + save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None + return self._run(crop=True, save=save, save_dir=save_dir) # crop results + + def render(self, labels=True): + """Renders detected objects and returns images.""" + self._run(render=True, labels=labels) # render results + return self.ims + + def pandas(self): + """Return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]).""" + import pandas + new = copy(self) # return copy + ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + setattr(new, k, [pandas.DataFrame(x, columns=c) for x in a]) + return new + + def tolist(self): + """Return a list of Detections objects, i.e. 'for result in results.tolist():'.""" + r = range(self.n) # iterable + x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] + # for d in x: + # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + # setattr(d, k, getattr(d, k)[0]) # pop out of list + return x + + def print(self): + """Print the results of the `self._run()` function.""" + LOGGER.info(self.__str__()) + + def __len__(self): # override len(results) + return self.n + + def __str__(self): # override print(results) + return self._run(pprint=True) # print results + + def __repr__(self): + """Returns a printable representation of the object.""" + return f'YOLOv8 {self.__class__} instance\n' + self.__str__() diff --git a/ultralytics/nn/modules/__init__.py b/ultralytics/nn/modules/__init__.py new file mode 100644 index 0000000..8efbf9d --- /dev/null +++ b/ultralytics/nn/modules/__init__.py @@ -0,0 +1,29 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Ultralytics modules. Visualize with: + +from ultralytics.nn.modules import * +import torch +import os + +x = torch.ones(1, 128, 40, 40) +m = Conv(128, 128) +f = f'{m._get_name()}.onnx' +torch.onnx.export(m, x, f) +os.system(f'onnxsim {f} {f} && open {f}') +""" + +from .block import (C1, C2, C3, C3TR, DFL, SPP, SPPF, Bottleneck, BottleneckCSP, C2f, C3Ghost, C3x, GhostBottleneck, + HGBlock, HGStem, Proto, RepC3, MP, SP, SPF, StemBlock, Shuffle_Block, DWConvblock, ADD) +from .conv import (CBAM, ChannelAttention, Concat, Conv, Conv2, ConvTranspose, DWConv, DWConvTranspose2d, Focus, + GhostConv, LightConv, RepConv, SpatialAttention) +from .head import Classify, Detect, Pose, RTDETRDecoder, Segment +from .transformer import (AIFI, MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer, LayerNorm2d, + MLPBlock, MSDeformAttn, TransformerBlock, TransformerEncoderLayer, TransformerLayer) + +__all__ = ('Conv', 'Conv2', 'LightConv', 'RepConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus', + 'GhostConv', 'ChannelAttention', 'SpatialAttention', 'CBAM', 'Concat', 'TransformerLayer', + 'TransformerBlock', 'MLPBlock', 'LayerNorm2d', 'DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3', + 'C2f', 'C3x', 'C3TR', 'C3Ghost', 'GhostBottleneck', 'Bottleneck', 'BottleneckCSP', 'Proto', 'Detect', + 'Segment', 'Pose', 'Classify', 'TransformerEncoderLayer', 'RepC3', 'RTDETRDecoder', 'AIFI', + 'DeformableTransformerDecoder', 'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP', 'MP', 'SP', 'SPF', 'StemBlock', 'Shuffle_Block', 'DWConvblock', 'ADD') diff --git a/ultralytics/nn/modules/block.py b/ultralytics/nn/modules/block.py new file mode 100644 index 0000000..8cc9ee9 --- /dev/null +++ b/ultralytics/nn/modules/block.py @@ -0,0 +1,450 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Block modules +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .conv import Conv, DWConv, GhostConv, LightConv, RepConv +from .transformer import TransformerBlock + +__all__ = ('DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3', 'C2f', 'C3x', 'C3TR', 'C3Ghost', + 'GhostBottleneck', 'Bottleneck', 'BottleneckCSP', 'Proto', 'RepC3', 'MP', 'SP', 'SPF') + +class MP(nn.Module): + def __init__(self, k=2): + super(MP, self).__init__() + self.m = nn.MaxPool2d(kernel_size=k, stride=k) + + def forward(self, x): + return self.m(x) + +class SP(nn.Module): + def __init__(self, k=3, s=1): + super(SP, self).__init__() + self.m = nn.MaxPool2d(kernel_size=k, stride=s, padding=k // 2) + + def forward(self, x): + return self.m(x) + +class SPF(nn.Module): + def __init__(self, k=3, s=1): + super(SPF, self).__init__() + self.n = (k - 1) // 2 + self.m = nn.Sequential(*[nn.MaxPool2d(kernel_size=3, stride=s, padding=1) for _ in range(self.n)]) + + def forward(self, x): + return self.m(x) + +# yolov7-lite +class StemBlock(nn.Module): + def __init__(self, c1, c2, k=3, s=2, p=None, g=1, act=True): + super(StemBlock, self).__init__() + self.stem_1 = Conv(c1, c2, k, s, p, g, act) + self.stem_2a = Conv(c2, c2 // 2, 1, 1, 0) + self.stem_2b = Conv(c2 // 2, c2, 3, 2, 1) + self.stem_2p = nn.MaxPool2d(kernel_size=2,stride=2,ceil_mode=True) + self.stem_3 = Conv(c2 * 2, c2, 1, 1, 0) + + def forward(self, x): + stem_1_out = self.stem_1(x) + stem_2a_out = self.stem_2a(stem_1_out) + stem_2b_out = self.stem_2b(stem_2a_out) + stem_2p_out = self.stem_2p(stem_1_out) + out = self.stem_3(torch.cat((stem_2b_out,stem_2p_out),1)) + return out + +class conv_bn_relu_maxpool(nn.Module): + def __init__(self, c1, c2): # ch_in, ch_out + super(conv_bn_relu_maxpool, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(c1, c2, kernel_size=3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(c2), + nn.SiLU(inplace=True), + ) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) + + def forward(self, x): + return self.maxpool(self.conv(x)) + +class DWConvblock(nn.Module): + "Depthwise conv + Pointwise conv" + def __init__(self, in_channels, out_channels, k, s): + super(DWConvblock, self).__init__() + self.p = k // 2 + self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=k, stride=s, padding=self.p, groups=in_channels, bias=False) + self.bn1 = nn.BatchNorm2d(in_channels) + self.act1 = nn.SiLU(inplace=True) + self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False) + self.bn2 = nn.BatchNorm2d(out_channels) + self.act2 = nn.SiLU(inplace=True) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + return x + +class ADD(nn.Module): + # Stortcut a list of tensors along dimension + def __init__(self, alpha=0.5): + super(ADD, self).__init__() + self.a = alpha + + def forward(self, x): + x1, x2 = x[0], x[1] + return torch.add(x1, x2, alpha=self.a) + +def channel_shuffle(x, groups): + batchsize, num_channels, height, width = x.data.size() + channels_per_group = num_channels // groups + + # reshape + x = x.view(batchsize, groups, channels_per_group, height, width) + x = torch.transpose(x, 1, 2).contiguous() + # flatten + x = x.view(batchsize, -1, height, width) + return x + +class Shuffle_Block(nn.Module): + def __init__(self, inp, oup, stride): + super(Shuffle_Block, self).__init__() + + if not (1 <= stride <= 3): + raise ValueError('illegal stride value') + self.stride = stride + + branch_features = oup // 2 + assert (self.stride != 1) or (inp == branch_features << 1) + + if self.stride > 1: + self.branch1 = nn.Sequential( + self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1), + nn.BatchNorm2d(inp), + nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(branch_features), + nn.SiLU(inplace=True), + ) + + self.branch2 = nn.Sequential( + nn.Conv2d(inp if (self.stride > 1) else branch_features, + branch_features, kernel_size=1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(branch_features), + nn.SiLU(inplace=True), + self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1), + nn.BatchNorm2d(branch_features), + nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(branch_features), + nn.SiLU(inplace=True), + ) + + @staticmethod + def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False): + return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i) + + def forward(self, x): + if self.stride == 1: + x1, x2 = x.chunk(2, dim=1) + out = torch.cat((x1, self.branch2(x2)), dim=1) + else: + out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) + + out = channel_shuffle(out, 2) + + return out + +# end of yolov7-lite + +class DFL(nn.Module): + """ + Integral module of Distribution Focal Loss (DFL). + Proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391 + """ + + def __init__(self, c1=16): + """Initialize a convolutional layer with a given number of input channels.""" + super().__init__() + self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False) + x = torch.arange(c1, dtype=torch.float) + self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1)) + self.c1 = c1 + + def forward(self, x): + """Applies a transformer layer on input tensor 'x' and returns a tensor.""" + b, c, a = x.shape # batch, channels, anchors + return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a) + # return self.conv(x.view(b, self.c1, 4, a).softmax(1)).view(b, 4, a) + + +class Proto(nn.Module): + """YOLOv8 mask Proto module for segmentation models.""" + + def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks + super().__init__() + self.cv1 = Conv(c1, c_, k=3) + self.upsample = nn.ConvTranspose2d(c_, c_, 2, 2, 0, bias=True) # nn.Upsample(scale_factor=2, mode='nearest') + self.cv2 = Conv(c_, c_, k=3) + self.cv3 = Conv(c_, c2) + + def forward(self, x): + """Performs a forward pass through layers using an upsampled input image.""" + return self.cv3(self.cv2(self.upsample(self.cv1(x)))) + + +class HGStem(nn.Module): + """StemBlock of PPHGNetV2 with 5 convolutions and one maxpool2d. + https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py + """ + + def __init__(self, c1, cm, c2): + super().__init__() + self.stem1 = Conv(c1, cm, 3, 2, act=nn.ReLU()) + self.stem2a = Conv(cm, cm // 2, 2, 1, 0, act=nn.ReLU()) + self.stem2b = Conv(cm // 2, cm, 2, 1, 0, act=nn.ReLU()) + self.stem3 = Conv(cm * 2, cm, 3, 2, act=nn.ReLU()) + self.stem4 = Conv(cm, c2, 1, 1, act=nn.ReLU()) + self.pool = nn.MaxPool2d(kernel_size=2, stride=1, padding=0, ceil_mode=True) + + def forward(self, x): + """Forward pass of a PPHGNetV2 backbone layer.""" + x = self.stem1(x) + x = F.pad(x, [0, 1, 0, 1]) + x2 = self.stem2a(x) + x2 = F.pad(x2, [0, 1, 0, 1]) + x2 = self.stem2b(x2) + x1 = self.pool(x) + x = torch.cat([x1, x2], dim=1) + x = self.stem3(x) + x = self.stem4(x) + return x + + +class HGBlock(nn.Module): + """HG_Block of PPHGNetV2 with 2 convolutions and LightConv. + https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py + """ + + def __init__(self, c1, cm, c2, k=3, n=6, lightconv=False, shortcut=False, act=nn.ReLU()): + super().__init__() + block = LightConv if lightconv else Conv + self.m = nn.ModuleList(block(c1 if i == 0 else cm, cm, k=k, act=act) for i in range(n)) + self.sc = Conv(c1 + n * cm, c2 // 2, 1, 1, act=act) # squeeze conv + self.ec = Conv(c2 // 2, c2, 1, 1, act=act) # excitation conv + self.add = shortcut and c1 == c2 + + def forward(self, x): + """Forward pass of a PPHGNetV2 backbone layer.""" + y = [x] + y.extend(m(y[-1]) for m in self.m) + y = self.ec(self.sc(torch.cat(y, 1))) + return y + x if self.add else y + + +class SPP(nn.Module): + """Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729.""" + + def __init__(self, c1, c2, k=(5, 9, 13)): + """Initialize the SPP layer with input/output channels and pooling kernel sizes.""" + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + + def forward(self, x): + """Forward pass of the SPP layer, performing spatial pyramid pooling.""" + x = self.cv1(x) + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + + +class SPPF(nn.Module): + """Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher.""" + + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + """Forward pass through Ghost Convolution block.""" + x = self.cv1(x) + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) + + +class C1(nn.Module): + """CSP Bottleneck with 1 convolution.""" + + def __init__(self, c1, c2, n=1): # ch_in, ch_out, number + super().__init__() + self.cv1 = Conv(c1, c2, 1, 1) + self.m = nn.Sequential(*(Conv(c2, c2, 3) for _ in range(n))) + + def forward(self, x): + """Applies cross-convolutions to input in the C3 module.""" + y = self.cv1(x) + return self.m(y) + y + + +class C2(nn.Module): + """CSP Bottleneck with 2 convolutions.""" + + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + self.c = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, 2 * self.c, 1, 1) + self.cv2 = Conv(2 * self.c, c2, 1) # optional act=FReLU(c2) + # self.attention = ChannelAttention(2 * self.c) # or SpatialAttention() + self.m = nn.Sequential(*(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))) + + def forward(self, x): + """Forward pass through the CSP bottleneck with 2 convolutions.""" + a, b = self.cv1(x).chunk(2, 1) + return self.cv2(torch.cat((self.m(a), b), 1)) + + +class C2f(nn.Module): + """CSP Bottleneck with 2 convolutions.""" + + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + self.c = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, 2 * self.c, 1, 1) + self.cv2 = Conv((2 + n) * self.c, c2, 1) # optional act=FReLU(c2) + self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n)) + + def forward(self, x): + """Forward pass through C2f layer.""" + y = list(self.cv1(x).chunk(2, 1)) + y.extend(m(y[-1]) for m in self.m) + return self.cv2(torch.cat(y, 1)) + + def forward_split(self, x): + """Forward pass using split() instead of chunk().""" + y = list(self.cv1(x).split((self.c, self.c), 1)) + y.extend(m(y[-1]) for m in self.m) + return self.cv2(torch.cat(y, 1)) + + +class C3(nn.Module): + """CSP Bottleneck with 3 convolutions.""" + + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, k=((1, 1), (3, 3)), e=1.0) for _ in range(n))) + + def forward(self, x): + """Forward pass through the CSP bottleneck with 2 convolutions.""" + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) + + +class C3x(C3): + """C3 module with cross-convolutions.""" + + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + """Initialize C3TR instance and set default parameters.""" + super().__init__(c1, c2, n, shortcut, g, e) + self.c_ = int(c2 * e) + self.m = nn.Sequential(*(Bottleneck(self.c_, self.c_, shortcut, g, k=((1, 3), (3, 1)), e=1) for _ in range(n))) + + +class RepC3(nn.Module): + """Rep C3.""" + + def __init__(self, c1, c2, n=3, e=1.0): + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c2, 1, 1) + self.cv2 = Conv(c1, c2, 1, 1) + self.m = nn.Sequential(*[RepConv(c_, c_) for _ in range(n)]) + self.cv3 = Conv(c_, c2, 1, 1) if c_ != c2 else nn.Identity() + + def forward(self, x): + """Forward pass of RT-DETR neck layer.""" + return self.cv3(self.m(self.cv1(x)) + self.cv2(x)) + + +class C3TR(C3): + """C3 module with TransformerBlock().""" + + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + """Initialize C3Ghost module with GhostBottleneck().""" + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = TransformerBlock(c_, c_, 4, n) + + +class C3Ghost(C3): + """C3 module with GhostBottleneck().""" + + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + """Initialize 'SPP' module with various pooling sizes for spatial pyramid pooling.""" + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) + + +class GhostBottleneck(nn.Module): + """Ghost Bottleneck https://github.com/huawei-noah/ghostnet.""" + + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super().__init__() + c_ = c2 // 2 + self.conv = nn.Sequential( + GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, + act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + """Applies skip connection and concatenation to input tensor.""" + return self.conv(x) + self.shortcut(x) + + +class Bottleneck(nn.Module): + """Standard bottleneck.""" + + def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5): # ch_in, ch_out, shortcut, groups, kernels, expand + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, k[0], 1) + self.cv2 = Conv(c_, c2, k[1], 1, g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + """'forward()' applies the YOLOv5 FPN to input data.""" + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class BottleneckCSP(nn.Module): + """CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks.""" + + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) + self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) + self.cv4 = Conv(2 * c_, c2, 1, 1) + self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) + self.act = nn.SiLU() + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) + + def forward(self, x): + """Applies a CSP bottleneck with 3 convolutions.""" + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) diff --git a/ultralytics/nn/modules/conv.py b/ultralytics/nn/modules/conv.py new file mode 100644 index 0000000..38ee3f5 --- /dev/null +++ b/ultralytics/nn/modules/conv.py @@ -0,0 +1,297 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Convolution modules +""" + +import math + +import numpy as np +import torch +import torch.nn as nn + +__all__ = ('Conv', 'LightConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus', 'GhostConv', + 'ChannelAttention', 'SpatialAttention', 'CBAM', 'Concat', 'RepConv') + + +def autopad(k, p=None, d=1): # kernel, padding, dilation + """Pad to 'same' shape outputs.""" + if d > 1: + k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size + if p is None: + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + return p + + +class Conv(nn.Module): + """Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation).""" + default_act = nn.SiLU() # default activation + + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): + """Initialize Conv layer with given arguments including activation.""" + super().__init__() + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) + self.bn = nn.BatchNorm2d(c2) + self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() + + def forward(self, x): + """Apply convolution, batch normalization and activation to input tensor.""" + return self.act(self.bn(self.conv(x))) + + def forward_fuse(self, x): + """Perform transposed convolution of 2D data.""" + return self.act(self.conv(x)) + + +class Conv2(Conv): + """Simplified RepConv module with Conv fusing.""" + + def __init__(self, c1, c2, k=3, s=1, p=None, g=1, d=1, act=True): + """Initialize Conv layer with given arguments including activation.""" + super().__init__(c1, c2, k, s, p, g=g, d=d, act=act) + self.cv2 = nn.Conv2d(c1, c2, 1, s, autopad(1, p, d), groups=g, dilation=d, bias=False) # add 1x1 conv + + def forward(self, x): + """Apply convolution, batch normalization and activation to input tensor.""" + return self.act(self.bn(self.conv(x) + self.cv2(x))) + + def fuse_convs(self): + """Fuse parallel convolutions.""" + w = torch.zeros_like(self.conv.weight.data) + i = [x // 2 for x in w.shape[2:]] + w[:, :, i[0]:i[0] + 1, i[1]:i[1] + 1] = self.cv2.weight.data.clone() + self.conv.weight.data += w + self.__delattr__('cv2') + + +class LightConv(nn.Module): + """Light convolution with args(ch_in, ch_out, kernel). + https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py + """ + + def __init__(self, c1, c2, k=1, act=nn.ReLU()): + """Initialize Conv layer with given arguments including activation.""" + super().__init__() + self.conv1 = Conv(c1, c2, 1, act=False) + self.conv2 = DWConv(c2, c2, k, act=act) + + def forward(self, x): + """Apply 2 convolutions to input tensor.""" + return self.conv2(self.conv1(x)) + + +class DWConv(Conv): + """Depth-wise convolution.""" + + def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation + super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act) + + +class DWConvTranspose2d(nn.ConvTranspose2d): + """Depth-wise transpose convolution.""" + + def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out + super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2)) + + +class ConvTranspose(nn.Module): + """Convolution transpose 2d layer.""" + default_act = nn.SiLU() # default activation + + def __init__(self, c1, c2, k=2, s=2, p=0, bn=True, act=True): + """Initialize ConvTranspose2d layer with batch normalization and activation function.""" + super().__init__() + self.conv_transpose = nn.ConvTranspose2d(c1, c2, k, s, p, bias=not bn) + self.bn = nn.BatchNorm2d(c2) if bn else nn.Identity() + self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() + + def forward(self, x): + """Applies transposed convolutions, batch normalization and activation to input.""" + return self.act(self.bn(self.conv_transpose(x))) + + def forward_fuse(self, x): + """Applies activation and convolution transpose operation to input.""" + return self.act(self.conv_transpose(x)) + + +class Focus(nn.Module): + """Focus wh information into c-space.""" + + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act) + # self.contract = Contract(gain=2) + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) + # return self.conv(self.contract(x)) + + +class GhostConv(nn.Module): + """Ghost Convolution https://github.com/huawei-noah/ghostnet.""" + + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super().__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act=act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act) + + def forward(self, x): + """Forward propagation through a Ghost Bottleneck layer with skip connection.""" + y = self.cv1(x) + return torch.cat((y, self.cv2(y)), 1) + + +class RepConv(nn.Module): + """RepConv is a basic rep-style block, including training and deploy status + This code is based on https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py + """ + default_act = nn.SiLU() # default activation + + def __init__(self, c1, c2, k=3, s=1, p=1, g=1, d=1, act=True, bn=False, deploy=False): + super().__init__() + assert k == 3 and p == 1 + self.g = g + self.c1 = c1 + self.c2 = c2 + self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() + + self.bn = nn.BatchNorm2d(num_features=c1) if bn and c2 == c1 and s == 1 else None + self.conv1 = Conv(c1, c2, k, s, p=p, g=g, act=False) + self.conv2 = Conv(c1, c2, 1, s, p=(p - k // 2), g=g, act=False) + + def forward_fuse(self, x): + """Forward process""" + return self.act(self.conv(x)) + + def forward(self, x): + """Forward process""" + id_out = 0 if self.bn is None else self.bn(x) + return self.act(self.conv1(x) + self.conv2(x) + id_out) + + def get_equivalent_kernel_bias(self): + kernel3x3, bias3x3 = self._fuse_bn_tensor(self.conv1) + kernel1x1, bias1x1 = self._fuse_bn_tensor(self.conv2) + kernelid, biasid = self._fuse_bn_tensor(self.bn) + return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid + + def _avg_to_3x3_tensor(self, avgp): + channels = self.c1 + groups = self.g + kernel_size = avgp.kernel_size + input_dim = channels // groups + k = torch.zeros((channels, input_dim, kernel_size, kernel_size)) + k[np.arange(channels), np.tile(np.arange(input_dim), groups), :, :] = 1.0 / kernel_size ** 2 + return k + + def _pad_1x1_to_3x3_tensor(self, kernel1x1): + if kernel1x1 is None: + return 0 + else: + return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1]) + + def _fuse_bn_tensor(self, branch): + if branch is None: + return 0, 0 + if isinstance(branch, Conv): + kernel = branch.conv.weight + running_mean = branch.bn.running_mean + running_var = branch.bn.running_var + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn.eps + elif isinstance(branch, nn.BatchNorm2d): + if not hasattr(self, 'id_tensor'): + input_dim = self.c1 // self.g + kernel_value = np.zeros((self.c1, input_dim, 3, 3), dtype=np.float32) + for i in range(self.c1): + kernel_value[i, i % input_dim, 1, 1] = 1 + self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device) + kernel = self.id_tensor + running_mean = branch.running_mean + running_var = branch.running_var + gamma = branch.weight + beta = branch.bias + eps = branch.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + + def fuse_convs(self): + if hasattr(self, 'conv'): + return + kernel, bias = self.get_equivalent_kernel_bias() + self.conv = nn.Conv2d(in_channels=self.conv1.conv.in_channels, + out_channels=self.conv1.conv.out_channels, + kernel_size=self.conv1.conv.kernel_size, + stride=self.conv1.conv.stride, + padding=self.conv1.conv.padding, + dilation=self.conv1.conv.dilation, + groups=self.conv1.conv.groups, + bias=True).requires_grad_(False) + self.conv.weight.data = kernel + self.conv.bias.data = bias + for para in self.parameters(): + para.detach_() + self.__delattr__('conv1') + self.__delattr__('conv2') + if hasattr(self, 'nm'): + self.__delattr__('nm') + if hasattr(self, 'bn'): + self.__delattr__('bn') + if hasattr(self, 'id_tensor'): + self.__delattr__('id_tensor') + + +class ChannelAttention(nn.Module): + """Channel-attention module https://github.com/open-mmlab/mmdetection/tree/v3.0.0rc1/configs/rtmdet.""" + + def __init__(self, channels: int) -> None: + super().__init__() + self.pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Conv2d(channels, channels, 1, 1, 0, bias=True) + self.act = nn.Sigmoid() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return x * self.act(self.fc(self.pool(x))) + + +class SpatialAttention(nn.Module): + """Spatial-attention module.""" + + def __init__(self, kernel_size=7): + """Initialize Spatial-attention module with kernel size argument.""" + super().__init__() + assert kernel_size in (3, 7), 'kernel size must be 3 or 7' + padding = 3 if kernel_size == 7 else 1 + self.cv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False) + self.act = nn.Sigmoid() + + def forward(self, x): + """Apply channel and spatial attention on input for feature recalibration.""" + return x * self.act(self.cv1(torch.cat([torch.mean(x, 1, keepdim=True), torch.max(x, 1, keepdim=True)[0]], 1))) + + +class CBAM(nn.Module): + """Convolutional Block Attention Module.""" + + def __init__(self, c1, kernel_size=7): # ch_in, kernels + super().__init__() + self.channel_attention = ChannelAttention(c1) + self.spatial_attention = SpatialAttention(kernel_size) + + def forward(self, x): + """Applies the forward pass through C1 module.""" + return self.spatial_attention(self.channel_attention(x)) + + +class Concat(nn.Module): + """Concatenate a list of tensors along dimension.""" + + def __init__(self, dimension=1): + """Concatenates a list of tensors along a specified dimension.""" + super().__init__() + self.d = dimension + + def forward(self, x): + """Forward pass for the YOLOv8 mask Proto module.""" + return torch.cat(x, self.d) diff --git a/ultralytics/nn/modules/head.py b/ultralytics/nn/modules/head.py new file mode 100644 index 0000000..325546d --- /dev/null +++ b/ultralytics/nn/modules/head.py @@ -0,0 +1,362 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Model head modules +""" + +import math + +import torch +import torch.nn as nn +from torch.nn.init import constant_, xavier_uniform_ + +from ultralytics.yolo.utils.tal import dist2bbox, make_anchors + +from .block import DFL, Proto +from .conv import Conv +from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer +from .utils import bias_init_with_prob, linear_init_ + +__all__ = 'Detect', 'Segment', 'Pose', 'Classify', 'RTDETRDecoder' + + +class Detect(nn.Module): + """YOLOv8 Detect head for detection models.""" + dynamic = False # force grid reconstruction + export = False # export mode + shape = None + anchors = torch.empty(0) # init + strides = torch.empty(0) # init + + def __init__(self, nc=80, ch=()): # detection layer + super().__init__() + self.nc = nc # number of classes + self.nl = len(ch) # number of detection layers + self.reg_max = 16 # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x) + self.no = nc + self.reg_max * 4 # number of outputs per anchor + self.stride = torch.zeros(self.nl) # strides computed during build + c2, c3 = max((16, ch[0] // 4, self.reg_max * 4)), max(ch[0], min(self.nc, 100)) # channels + self.cv2 = nn.ModuleList( + nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch) + self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch) + self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity() + + def forward(self, x): + """Concatenates and returns predicted bounding boxes and class probabilities.""" + shape = x[0].shape # BCHW + for i in range(self.nl): + x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1) + if self.training: + return x + if self.export: + return x + elif self.dynamic or self.shape != shape: + self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5)) + self.shape = shape + + x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2) + if self.export and self.format in ('saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs'): # avoid TF FlexSplitV ops + box = x_cat[:, :self.reg_max * 4] + cls = x_cat[:, self.reg_max * 4:] + else: + box, cls = x_cat.split((self.reg_max * 4, self.nc), 1) + dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + y = torch.cat((dbox, cls.sigmoid()), 1) + return y if self.export else (y, x) + + def bias_init(self): + """Initialize Detect() biases, WARNING: requires stride availability.""" + m = self # self.model[-1] # Detect() module + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1 + # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency + for a, b, s in zip(m.cv2, m.cv3, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img) + + +class Segment(Detect): + """YOLOv8 Segment head for segmentation models.""" + + def __init__(self, nc=80, nm=32, npr=256, ch=()): + """Initialize the YOLO model attributes such as the number of masks, prototypes, and the convolution layers.""" + super().__init__(nc, ch) + self.nm = nm # number of masks + self.npr = npr # number of protos + self.proto = Proto(ch[0], self.npr, self.nm) # protos + self.detect = Detect.forward + + c4 = max(ch[0] // 4, self.nm) + self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nm, 1)) for x in ch) + + def forward(self, x): + """Return model outputs and mask coefficients if training, otherwise return outputs and mask coefficients.""" + p = self.proto(x[0]) # mask protos + bs = p.shape[0] # batch size + + mc = torch.cat([self.cv4[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2) # mask coefficients + x = self.detect(self, x) + if self.training: + return x, mc, p + return (torch.cat([x, mc], 1), p) if self.export else (torch.cat([x[0], mc], 1), (x[1], mc, p)) + + +class Pose(Detect): + """YOLOv8 Pose head for keypoints models.""" + + def __init__(self, nc=80, kpt_shape=(17, 3), ch=()): + """Initialize YOLO network with default parameters and Convolutional Layers.""" + super().__init__(nc, ch) + self.kpt_shape = kpt_shape # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) + self.nk = kpt_shape[0] * kpt_shape[1] # number of keypoints total + self.detect = Detect.forward + + c4 = max(ch[0] // 4, self.nk) + self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nk, 1)) for x in ch) + + def forward(self, x): + """Perform forward pass through YOLO model and return predictions.""" + bs = x[0].shape[0] # batch size + if self.export: + temp_x = [xi.clone() for xi in x] + x = self.detect(self, x) + result = list() + for i in range(self.nl): + result.append(torch.cat([x[i],self.cv4[i](temp_x[i])], 1)) + return result + kpt = torch.cat([self.cv4[i](x[i]).view(bs, self.nk, -1) for i in range(self.nl)], -1) # (bs, 17*3, h*w) + x = self.detect(self, x) + if self.training: + return x, kpt + pred_kpt = self.kpts_decode(bs, kpt) + return torch.cat([x, pred_kpt], 1) if self.export else (torch.cat([x[0], pred_kpt], 1), (x[1], kpt)) + + def kpts_decode(self, bs, kpts): + """Decodes keypoints.""" + ndim = self.kpt_shape[1] + if self.export: # required for TFLite export to avoid 'PLACEHOLDER_FOR_GREATER_OP_CODES' bug + y = kpts.view(bs, *self.kpt_shape, -1) + a = (y[:, :, :2] * 2.0 + (self.anchors - 0.5)) * self.strides + if ndim == 3: + a = torch.cat((a, y[:, :, 2:3].sigmoid()), 2) + return a.view(bs, self.nk, -1) + else: + y = kpts.clone() + if ndim == 3: + y[:, 2::3].sigmoid_() # inplace sigmoid + y[:, 0::ndim] = (y[:, 0::ndim] * 2.0 + (self.anchors[0] - 0.5)) * self.strides + y[:, 1::ndim] = (y[:, 1::ndim] * 2.0 + (self.anchors[1] - 0.5)) * self.strides + return y + + +class Classify(nn.Module): + """YOLOv8 classification head, i.e. x(b,c1,20,20) to x(b,c2).""" + + def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + c_ = 1280 # efficientnet_b0 size + self.conv = Conv(c1, c_, k, s, p, g) + self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) + self.drop = nn.Dropout(p=0.0, inplace=True) + self.linear = nn.Linear(c_, c2) # to x(b,c2) + + def forward(self, x): + """Performs a forward pass of the YOLO model on input image data.""" + if isinstance(x, list): + x = torch.cat(x, 1) + x = self.linear(self.drop(self.pool(self.conv(x)).flatten(1))) + return x if self.training else x.softmax(1) + + +class RTDETRDecoder(nn.Module): + export = False # export mode + + def __init__( + self, + nc=80, + ch=(512, 1024, 2048), + hd=256, # hidden dim + nq=300, # num queries + ndp=4, # num decoder points + nh=8, # num head + ndl=6, # num decoder layers + d_ffn=1024, # dim of feedforward + dropout=0., + act=nn.ReLU(), + eval_idx=-1, + # training args + nd=100, # num denoising + label_noise_ratio=0.5, + box_noise_scale=1.0, + learnt_init_query=False): + super().__init__() + self.hidden_dim = hd + self.nhead = nh + self.nl = len(ch) # num level + self.nc = nc + self.num_queries = nq + self.num_decoder_layers = ndl + + # backbone feature projection + self.input_proj = nn.ModuleList(nn.Sequential(nn.Conv2d(x, hd, 1, bias=False), nn.BatchNorm2d(hd)) for x in ch) + # NOTE: simplified version but it's not consistent with .pt weights. + # self.input_proj = nn.ModuleList(Conv(x, hd, act=False) for x in ch) + + # Transformer module + decoder_layer = DeformableTransformerDecoderLayer(hd, nh, d_ffn, dropout, act, self.nl, ndp) + self.decoder = DeformableTransformerDecoder(hd, decoder_layer, ndl, eval_idx) + + # denoising part + self.denoising_class_embed = nn.Embedding(nc, hd) + self.num_denoising = nd + self.label_noise_ratio = label_noise_ratio + self.box_noise_scale = box_noise_scale + + # decoder embedding + self.learnt_init_query = learnt_init_query + if learnt_init_query: + self.tgt_embed = nn.Embedding(nq, hd) + self.query_pos_head = MLP(4, 2 * hd, hd, num_layers=2) + + # encoder head + self.enc_output = nn.Sequential(nn.Linear(hd, hd), nn.LayerNorm(hd)) + self.enc_score_head = nn.Linear(hd, nc) + self.enc_bbox_head = MLP(hd, hd, 4, num_layers=3) + + # decoder head + self.dec_score_head = nn.ModuleList([nn.Linear(hd, nc) for _ in range(ndl)]) + self.dec_bbox_head = nn.ModuleList([MLP(hd, hd, 4, num_layers=3) for _ in range(ndl)]) + + self._reset_parameters() + + def forward(self, x, batch=None): + from ultralytics.vit.utils.ops import get_cdn_group + + # input projection and embedding + feats, shapes = self._get_encoder_input(x) + + # prepare denoising training + dn_embed, dn_bbox, attn_mask, dn_meta = \ + get_cdn_group(batch, + self.nc, + self.num_queries, + self.denoising_class_embed.weight, + self.num_denoising, + self.label_noise_ratio, + self.box_noise_scale, + self.training) + + embed, refer_bbox, enc_bboxes, enc_scores = \ + self._get_decoder_input(feats, shapes, dn_embed, dn_bbox) + + # decoder + dec_bboxes, dec_scores = self.decoder(embed, + refer_bbox, + feats, + shapes, + self.dec_bbox_head, + self.dec_score_head, + self.query_pos_head, + attn_mask=attn_mask) + x = dec_bboxes, dec_scores, enc_bboxes, enc_scores, dn_meta + if self.training: + return x + # (bs, 300, 4+nc) + y = torch.cat((dec_bboxes.squeeze(0), dec_scores.squeeze(0).sigmoid()), -1) + return y if self.export else (y, x) + + def _generate_anchors(self, shapes, grid_size=0.05, dtype=torch.float32, device='cpu', eps=1e-2): + anchors = [] + for i, (h, w) in enumerate(shapes): + grid_y, grid_x = torch.meshgrid(torch.arange(end=h, dtype=dtype, device=device), + torch.arange(end=w, dtype=dtype, device=device), + indexing='ij') + grid_xy = torch.stack([grid_x, grid_y], -1) # (h, w, 2) + + valid_WH = torch.tensor([h, w], dtype=dtype, device=device) + grid_xy = (grid_xy.unsqueeze(0) + 0.5) / valid_WH # (1, h, w, 2) + wh = torch.ones_like(grid_xy, dtype=dtype, device=device) * grid_size * (2.0 ** i) + anchors.append(torch.cat([grid_xy, wh], -1).view(-1, h * w, 4)) # (1, h*w, 4) + + anchors = torch.cat(anchors, 1) # (1, h*w*nl, 4) + valid_mask = ((anchors > eps) * (anchors < 1 - eps)).all(-1, keepdim=True) # 1, h*w*nl, 1 + anchors = torch.log(anchors / (1 - anchors)) + anchors = anchors.masked_fill(~valid_mask, float('inf')) + return anchors, valid_mask + + def _get_encoder_input(self, x): + # get projection features + x = [self.input_proj[i](feat) for i, feat in enumerate(x)] + # get encoder inputs + feats = [] + shapes = [] + for feat in x: + h, w = feat.shape[2:] + # [b, c, h, w] -> [b, h*w, c] + feats.append(feat.flatten(2).permute(0, 2, 1)) + # [nl, 2] + shapes.append([h, w]) + + # [b, h*w, c] + feats = torch.cat(feats, 1) + return feats, shapes + + def _get_decoder_input(self, feats, shapes, dn_embed=None, dn_bbox=None): + bs = len(feats) + # prepare input for decoder + anchors, valid_mask = self._generate_anchors(shapes, dtype=feats.dtype, device=feats.device) + features = self.enc_output(valid_mask * feats) # bs, h*w, 256 + + enc_outputs_scores = self.enc_score_head(features) # (bs, h*w, nc) + # dynamic anchors + static content + enc_outputs_bboxes = self.enc_bbox_head(features) + anchors # (bs, h*w, 4) + + # query selection + # (bs, num_queries) + topk_ind = torch.topk(enc_outputs_scores.max(-1).values, self.num_queries, dim=1).indices.view(-1) + # (bs, num_queries) + batch_ind = torch.arange(end=bs, dtype=topk_ind.dtype).unsqueeze(-1).repeat(1, self.num_queries).view(-1) + + # Unsigmoided + refer_bbox = enc_outputs_bboxes[batch_ind, topk_ind].view(bs, self.num_queries, -1) + # refer_bbox = torch.gather(enc_outputs_bboxes, 1, topk_ind.reshape(bs, self.num_queries).unsqueeze(-1).repeat(1, 1, 4)) + + enc_bboxes = refer_bbox.sigmoid() + if dn_bbox is not None: + refer_bbox = torch.cat([dn_bbox, refer_bbox], 1) + if self.training: + refer_bbox = refer_bbox.detach() + enc_scores = enc_outputs_scores[batch_ind, topk_ind].view(bs, self.num_queries, -1) + + if self.learnt_init_query: + embeddings = self.tgt_embed.weight.unsqueeze(0).repeat(bs, 1, 1) + else: + embeddings = features[batch_ind, topk_ind].view(bs, self.num_queries, -1) + if self.training: + embeddings = embeddings.detach() + if dn_embed is not None: + embeddings = torch.cat([dn_embed, embeddings], 1) + + return embeddings, refer_bbox, enc_bboxes, enc_scores + + # TODO + def _reset_parameters(self): + # class and bbox head init + bias_cls = bias_init_with_prob(0.01) / 80 * self.nc + # NOTE: the weight initialization in `linear_init_` would cause NaN when training with custom datasets. + # linear_init_(self.enc_score_head) + constant_(self.enc_score_head.bias, bias_cls) + constant_(self.enc_bbox_head.layers[-1].weight, 0.) + constant_(self.enc_bbox_head.layers[-1].bias, 0.) + for cls_, reg_ in zip(self.dec_score_head, self.dec_bbox_head): + # linear_init_(cls_) + constant_(cls_.bias, bias_cls) + constant_(reg_.layers[-1].weight, 0.) + constant_(reg_.layers[-1].bias, 0.) + + linear_init_(self.enc_output[0]) + xavier_uniform_(self.enc_output[0].weight) + if self.learnt_init_query: + xavier_uniform_(self.tgt_embed.weight) + xavier_uniform_(self.query_pos_head.layers[0].weight) + xavier_uniform_(self.query_pos_head.layers[1].weight) + for layer in self.input_proj: + xavier_uniform_(layer[0].weight) diff --git a/ultralytics/nn/modules/transformer.py b/ultralytics/nn/modules/transformer.py new file mode 100644 index 0000000..b3304cc --- /dev/null +++ b/ultralytics/nn/modules/transformer.py @@ -0,0 +1,378 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Transformer modules +""" + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.init import constant_, xavier_uniform_ + +from .conv import Conv +from .utils import _get_clones, inverse_sigmoid, multi_scale_deformable_attn_pytorch + +__all__ = ('TransformerEncoderLayer', 'TransformerLayer', 'TransformerBlock', 'MLPBlock', 'LayerNorm2d', 'AIFI', + 'DeformableTransformerDecoder', 'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP') + + +class TransformerEncoderLayer(nn.Module): + """Transformer Encoder.""" + + def __init__(self, c1, cm=2048, num_heads=8, dropout=0.0, act=nn.GELU(), normalize_before=False): + super().__init__() + self.ma = nn.MultiheadAttention(c1, num_heads, dropout=dropout, batch_first=True) + # Implementation of Feedforward model + self.fc1 = nn.Linear(c1, cm) + self.fc2 = nn.Linear(cm, c1) + + self.norm1 = nn.LayerNorm(c1) + self.norm2 = nn.LayerNorm(c1) + self.dropout = nn.Dropout(dropout) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.act = act + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos=None): + """Add position embeddings if given.""" + return tensor if pos is None else tensor + pos + + def forward_post(self, src, src_mask=None, src_key_padding_mask=None, pos=None): + q = k = self.with_pos_embed(src, pos) + src2 = self.ma(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src = self.norm1(src) + src2 = self.fc2(self.dropout(self.act(self.fc1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src + + def forward_pre(self, src, src_mask=None, src_key_padding_mask=None, pos=None): + src2 = self.norm1(src) + q = k = self.with_pos_embed(src2, pos) + src2 = self.ma(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src2 = self.norm2(src) + src2 = self.fc2(self.dropout(self.act(self.fc1(src2)))) + src = src + self.dropout2(src2) + return src + + def forward(self, src, src_mask=None, src_key_padding_mask=None, pos=None): + """Forward propagates the input through the encoder module.""" + if self.normalize_before: + return self.forward_pre(src, src_mask, src_key_padding_mask, pos) + return self.forward_post(src, src_mask, src_key_padding_mask, pos) + + +class AIFI(TransformerEncoderLayer): + + def __init__(self, c1, cm=2048, num_heads=8, dropout=0, act=nn.GELU(), normalize_before=False): + super().__init__(c1, cm, num_heads, dropout, act, normalize_before) + + def forward(self, x): + c, h, w = x.shape[1:] + pos_embed = self.build_2d_sincos_position_embedding(w, h, c) + # flatten [B, C, H, W] to [B, HxW, C] + x = super().forward(x.flatten(2).permute(0, 2, 1), pos=pos_embed.to(device=x.device, dtype=x.dtype)) + return x.permute(0, 2, 1).view([-1, c, h, w]).contiguous() + + @staticmethod + def build_2d_sincos_position_embedding(w, h, embed_dim=256, temperature=10000.): + grid_w = torch.arange(int(w), dtype=torch.float32) + grid_h = torch.arange(int(h), dtype=torch.float32) + grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing='ij') + assert embed_dim % 4 == 0, \ + 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding' + pos_dim = embed_dim // 4 + omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim + omega = 1. / (temperature ** omega) + + out_w = grid_w.flatten()[..., None] @ omega[None] + out_h = grid_h.flatten()[..., None] @ omega[None] + + return torch.concat([torch.sin(out_w), torch.cos(out_w), + torch.sin(out_h), torch.cos(out_h)], axis=1)[None, :, :] + + +class TransformerLayer(nn.Module): + """Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance).""" + + def __init__(self, c, num_heads): + """Initializes a self-attention mechanism using linear transformations and multi-head attention.""" + super().__init__() + self.q = nn.Linear(c, c, bias=False) + self.k = nn.Linear(c, c, bias=False) + self.v = nn.Linear(c, c, bias=False) + self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) + self.fc1 = nn.Linear(c, c, bias=False) + self.fc2 = nn.Linear(c, c, bias=False) + + def forward(self, x): + """Apply a transformer block to the input x and return the output.""" + x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x + x = self.fc2(self.fc1(x)) + x + return x + + +class TransformerBlock(nn.Module): + """Vision Transformer https://arxiv.org/abs/2010.11929.""" + + def __init__(self, c1, c2, num_heads, num_layers): + """Initialize a Transformer module with position embedding and specified number of heads and layers.""" + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + self.linear = nn.Linear(c2, c2) # learnable position embedding + self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) + self.c2 = c2 + + def forward(self, x): + """Forward propagates the input through the bottleneck module.""" + if self.conv is not None: + x = self.conv(x) + b, _, w, h = x.shape + p = x.flatten(2).permute(2, 0, 1) + return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) + + +class MLPBlock(nn.Module): + + def __init__(self, embedding_dim, mlp_dim, act=nn.GELU): + super().__init__() + self.lin1 = nn.Linear(embedding_dim, mlp_dim) + self.lin2 = nn.Linear(mlp_dim, embedding_dim) + self.act = act() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.lin2(self.act(self.lin1(x))) + + +class MLP(nn.Module): + """ Very simple multi-layer perceptron (also called FFN)""" + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + + +# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa +# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa +class LayerNorm2d(nn.Module): + + def __init__(self, num_channels, eps=1e-6): + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x): + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +class MSDeformAttn(nn.Module): + """ + Original Multi-Scale Deformable Attention Module. + https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py + """ + + def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4): + super().__init__() + if d_model % n_heads != 0: + raise ValueError(f'd_model must be divisible by n_heads, but got {d_model} and {n_heads}') + _d_per_head = d_model // n_heads + # you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation + assert _d_per_head * n_heads == d_model, '`d_model` must be divisible by `n_heads`' + + self.im2col_step = 64 + + self.d_model = d_model + self.n_levels = n_levels + self.n_heads = n_heads + self.n_points = n_points + + self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2) + self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points) + self.value_proj = nn.Linear(d_model, d_model) + self.output_proj = nn.Linear(d_model, d_model) + + self._reset_parameters() + + def _reset_parameters(self): + constant_(self.sampling_offsets.weight.data, 0.) + thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat( + 1, self.n_levels, self.n_points, 1) + for i in range(self.n_points): + grid_init[:, :, i, :] *= i + 1 + with torch.no_grad(): + self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) + constant_(self.attention_weights.weight.data, 0.) + constant_(self.attention_weights.bias.data, 0.) + xavier_uniform_(self.value_proj.weight.data) + constant_(self.value_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + def forward(self, query, refer_bbox, value, value_shapes, value_mask=None): + """ + https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py + Args: + query (torch.Tensor): [bs, query_length, C] + refer_bbox (torch.Tensor): [bs, query_length, n_levels, 2], range in [0, 1], top-left (0,0), + bottom-right (1, 1), including padding area + value (torch.Tensor): [bs, value_length, C] + value_shapes (List): [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})] + value_mask (Tensor): [bs, value_length], True for non-padding elements, False for padding elements + + Returns: + output (Tensor): [bs, Length_{query}, C] + """ + bs, len_q = query.shape[:2] + len_v = value.shape[1] + assert sum(s[0] * s[1] for s in value_shapes) == len_v + + value = self.value_proj(value) + if value_mask is not None: + value = value.masked_fill(value_mask[..., None], float(0)) + value = value.view(bs, len_v, self.n_heads, self.d_model // self.n_heads) + sampling_offsets = self.sampling_offsets(query).view(bs, len_q, self.n_heads, self.n_levels, self.n_points, 2) + attention_weights = self.attention_weights(query).view(bs, len_q, self.n_heads, self.n_levels * self.n_points) + attention_weights = F.softmax(attention_weights, -1).view(bs, len_q, self.n_heads, self.n_levels, self.n_points) + # N, Len_q, n_heads, n_levels, n_points, 2 + num_points = refer_bbox.shape[-1] + if num_points == 2: + offset_normalizer = torch.as_tensor(value_shapes, dtype=query.dtype, device=query.device).flip(-1) + add = sampling_offsets / offset_normalizer[None, None, None, :, None, :] + sampling_locations = refer_bbox[:, :, None, :, None, :] + add + elif num_points == 4: + add = sampling_offsets / self.n_points * refer_bbox[:, :, None, :, None, 2:] * 0.5 + sampling_locations = refer_bbox[:, :, None, :, None, :2] + add + else: + raise ValueError(f'Last dim of reference_points must be 2 or 4, but got {num_points}.') + output = multi_scale_deformable_attn_pytorch(value, value_shapes, sampling_locations, attention_weights) + output = self.output_proj(output) + return output + + +class DeformableTransformerDecoderLayer(nn.Module): + """ + https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py + https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/deformable_transformer.py + """ + + def __init__(self, d_model=256, n_heads=8, d_ffn=1024, dropout=0., act=nn.ReLU(), n_levels=4, n_points=4): + super().__init__() + + # self attention + self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout) + self.dropout1 = nn.Dropout(dropout) + self.norm1 = nn.LayerNorm(d_model) + + # cross attention + self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points) + self.dropout2 = nn.Dropout(dropout) + self.norm2 = nn.LayerNorm(d_model) + + # ffn + self.linear1 = nn.Linear(d_model, d_ffn) + self.act = act + self.dropout3 = nn.Dropout(dropout) + self.linear2 = nn.Linear(d_ffn, d_model) + self.dropout4 = nn.Dropout(dropout) + self.norm3 = nn.LayerNorm(d_model) + + @staticmethod + def with_pos_embed(tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_ffn(self, tgt): + tgt2 = self.linear2(self.dropout3(self.act(self.linear1(tgt)))) + tgt = tgt + self.dropout4(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward(self, embed, refer_bbox, feats, shapes, padding_mask=None, attn_mask=None, query_pos=None): + # self attention + q = k = self.with_pos_embed(embed, query_pos) + tgt = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), embed.transpose(0, 1), + attn_mask=attn_mask)[0].transpose(0, 1) + embed = embed + self.dropout1(tgt) + embed = self.norm1(embed) + + # cross attention + tgt = self.cross_attn(self.with_pos_embed(embed, query_pos), refer_bbox.unsqueeze(2), feats, shapes, + padding_mask) + embed = embed + self.dropout2(tgt) + embed = self.norm2(embed) + + # ffn + embed = self.forward_ffn(embed) + + return embed + + +class DeformableTransformerDecoder(nn.Module): + """ + https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py + """ + + def __init__(self, hidden_dim, decoder_layer, num_layers, eval_idx=-1): + super().__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.hidden_dim = hidden_dim + self.eval_idx = eval_idx if eval_idx >= 0 else num_layers + eval_idx + + def forward( + self, + embed, # decoder embeddings + refer_bbox, # anchor + feats, # image features + shapes, # feature shapes + bbox_head, + score_head, + pos_mlp, + attn_mask=None, + padding_mask=None): + output = embed + dec_bboxes = [] + dec_cls = [] + last_refined_bbox = None + refer_bbox = refer_bbox.sigmoid() + for i, layer in enumerate(self.layers): + output = layer(output, refer_bbox, feats, shapes, padding_mask, attn_mask, pos_mlp(refer_bbox)) + + # refine bboxes, (bs, num_queries+num_denoising, 4) + refined_bbox = torch.sigmoid(bbox_head[i](output) + inverse_sigmoid(refer_bbox)) + + if self.training: + dec_cls.append(score_head[i](output)) + if i == 0: + dec_bboxes.append(refined_bbox) + else: + dec_bboxes.append(torch.sigmoid(bbox_head[i](output) + inverse_sigmoid(last_refined_bbox))) + elif i == self.eval_idx: + dec_cls.append(score_head[i](output)) + dec_bboxes.append(refined_bbox) + break + + last_refined_bbox = refined_bbox + refer_bbox = refined_bbox.detach() if self.training else refined_bbox + + return torch.stack(dec_bboxes), torch.stack(dec_cls) diff --git a/ultralytics/nn/modules/utils.py b/ultralytics/nn/modules/utils.py new file mode 100644 index 0000000..f8636dc --- /dev/null +++ b/ultralytics/nn/modules/utils.py @@ -0,0 +1,78 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Module utils +""" + +import copy +import math + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.init import uniform_ + +__all__ = 'multi_scale_deformable_attn_pytorch', 'inverse_sigmoid' + + +def _get_clones(module, n): + return nn.ModuleList([copy.deepcopy(module) for _ in range(n)]) + + +def bias_init_with_prob(prior_prob=0.01): + """initialize conv/fc bias value according to a given probability value.""" + return float(-np.log((1 - prior_prob) / prior_prob)) # return bias_init + + +def linear_init_(module): + bound = 1 / math.sqrt(module.weight.shape[0]) + uniform_(module.weight, -bound, bound) + if hasattr(module, 'bias') and module.bias is not None: + uniform_(module.bias, -bound, bound) + + +def inverse_sigmoid(x, eps=1e-5): + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +def multi_scale_deformable_attn_pytorch(value: torch.Tensor, value_spatial_shapes: torch.Tensor, + sampling_locations: torch.Tensor, + attention_weights: torch.Tensor) -> torch.Tensor: + """ + Multi-scale deformable attention. + https://github.com/IDEA-Research/detrex/blob/main/detrex/layers/multi_scale_deform_attn.py + """ + + bs, _, num_heads, embed_dims = value.shape + _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape + value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1) + sampling_grids = 2 * sampling_locations - 1 + sampling_value_list = [] + for level, (H_, W_) in enumerate(value_spatial_shapes): + # bs, H_*W_, num_heads, embed_dims -> + # bs, H_*W_, num_heads*embed_dims -> + # bs, num_heads*embed_dims, H_*W_ -> + # bs*num_heads, embed_dims, H_, W_ + value_l_ = (value_list[level].flatten(2).transpose(1, 2).reshape(bs * num_heads, embed_dims, H_, W_)) + # bs, num_queries, num_heads, num_points, 2 -> + # bs, num_heads, num_queries, num_points, 2 -> + # bs*num_heads, num_queries, num_points, 2 + sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1) + # bs*num_heads, embed_dims, num_queries, num_points + sampling_value_l_ = F.grid_sample(value_l_, + sampling_grid_l_, + mode='bilinear', + padding_mode='zeros', + align_corners=False) + sampling_value_list.append(sampling_value_l_) + # (bs, num_queries, num_heads, num_levels, num_points) -> + # (bs, num_heads, num_queries, num_levels, num_points) -> + # (bs, num_heads, 1, num_queries, num_levels*num_points) + attention_weights = attention_weights.transpose(1, 2).reshape(bs * num_heads, 1, num_queries, + num_levels * num_points) + output = ((torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view( + bs, num_heads * embed_dims, num_queries)) + return output.transpose(1, 2).contiguous() diff --git a/ultralytics/nn/tasks.py b/ultralytics/nn/tasks.py new file mode 100644 index 0000000..f47e191 --- /dev/null +++ b/ultralytics/nn/tasks.py @@ -0,0 +1,782 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import contextlib +from copy import deepcopy +from pathlib import Path + +import torch +import torch.nn as nn + +from ultralytics.nn.modules import (AIFI, C1, C2, C3, C3TR, SPP, SPPF, Bottleneck, BottleneckCSP, C2f, C3Ghost, C3x, + Classify, Concat, Conv, Conv2, ConvTranspose, Detect, DWConv, DWConvTranspose2d, + Focus, GhostBottleneck, GhostConv, HGBlock, HGStem, Pose, RepC3, RepConv, + RTDETRDecoder, Segment, MP, SP, SPF, StemBlock, Shuffle_Block, DWConvblock, ADD) +from ultralytics.yolo.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, colorstr, emojis, yaml_load +from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_yaml +from ultralytics.yolo.utils.loss import v8ClassificationLoss, v8DetectionLoss, v8PoseLoss, v8SegmentationLoss +from ultralytics.yolo.utils.plotting import feature_visualization +from ultralytics.yolo.utils.torch_utils import (fuse_conv_and_bn, fuse_deconv_and_bn, initialize_weights, + intersect_dicts, make_divisible, model_info, scale_img, time_sync) + +try: + import thop +except ImportError: + thop = None + + +class BaseModel(nn.Module): + """ + The BaseModel class serves as a base class for all the models in the Ultralytics YOLO family. + """ + + def forward(self, x, *args, **kwargs): + """ + Forward pass of the model on a single scale. + Wrapper for `_forward_once` method. + + Args: + x (torch.Tensor | dict): The input image tensor or a dict including image tensor and gt labels. + + Returns: + (torch.Tensor): The output of the network. + """ + if isinstance(x, dict): # for cases of training and validating while training. + return self.loss(x, *args, **kwargs) + return self.predict(x, *args, **kwargs) + + def predict(self, x, profile=False, visualize=False, augment=False): + """ + Perform a forward pass through the network. + + Args: + x (torch.Tensor): The input tensor to the model. + profile (bool): Print the computation time of each layer if True, defaults to False. + visualize (bool): Save the feature maps of the model if True, defaults to False. + augment (bool): Augment image during prediction, defaults to False. + + Returns: + (torch.Tensor): The last output of the model. + """ + if augment: + return self._predict_augment(x) + return self._predict_once(x, profile, visualize) + + def _predict_once(self, x, profile=False, visualize=False): + """ + Perform a forward pass through the network. + + Args: + x (torch.Tensor): The input tensor to the model. + profile (bool): Print the computation time of each layer if True, defaults to False. + visualize (bool): Save the feature maps of the model if True, defaults to False. + + Returns: + (torch.Tensor): The last output of the model. + """ + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + if profile: + self._profile_one_layer(m, x, dt) + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + if visualize: + feature_visualization(x, m.type, m.i, save_dir=visualize) + return x + + def _predict_augment(self, x): + """Perform augmentations on input image x and return augmented inference.""" + LOGGER.warning( + f'WARNING ⚠️ {self.__class__.__name__} has not supported augment inference yet! Now using single-scale inference instead.' + ) + return self._predict_once(x) + + def _profile_one_layer(self, m, x, dt): + """ + Profile the computation time and FLOPs of a single layer of the model on a given input. + Appends the results to the provided list. + + Args: + m (nn.Module): The layer to be profiled. + x (torch.Tensor): The input data to the layer. + dt (list): A list to store the computation time of the layer. + + Returns: + None + """ + c = m == self.model[-1] # is final layer, copy input as inplace fix + o = thop.profile(m, inputs=[x.clone() if c else x], verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + t = time_sync() + for _ in range(10): + m(x.clone() if c else x) + dt.append((time_sync() - t) * 100) + if m == self.model[0]: + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + if c: + LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") + + def fuse(self, verbose=True): + """ + Fuse the `Conv2d()` and `BatchNorm2d()` layers of the model into a single layer, in order to improve the + computation efficiency. + + Returns: + (nn.Module): The fused model is returned. + """ + if not self.is_fused(): + for m in self.model.modules(): + if isinstance(m, (Conv, Conv2, DWConv)) and hasattr(m, 'bn'): + if isinstance(m, Conv2): + m.fuse_convs() + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.forward_fuse # update forward + if isinstance(m, ConvTranspose) and hasattr(m, 'bn'): + m.conv_transpose = fuse_deconv_and_bn(m.conv_transpose, m.bn) + delattr(m, 'bn') # remove batchnorm + m.forward = m.forward_fuse # update forward + if isinstance(m, RepConv): + m.fuse_convs() + m.forward = m.forward_fuse # update forward + self.info(verbose=verbose) + + return self + + def is_fused(self, thresh=10): + """ + Check if the model has less than a certain threshold of BatchNorm layers. + + Args: + thresh (int, optional): The threshold number of BatchNorm layers. Default is 10. + + Returns: + (bool): True if the number of BatchNorm layers in the model is less than the threshold, False otherwise. + """ + bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() + return sum(isinstance(v, bn) for v in self.modules()) < thresh # True if < 'thresh' BatchNorm layers in model + + def info(self, detailed=False, verbose=True, imgsz=640): + """ + Prints model information + + Args: + verbose (bool): if True, prints out the model information. Defaults to False + imgsz (int): the size of the image that the model will be trained on. Defaults to 640 + """ + return model_info(self, detailed=detailed, verbose=verbose, imgsz=imgsz) + + def _apply(self, fn): + """ + `_apply()` is a function that applies a function to all the tensors in the model that are not + parameters or registered buffers + + Args: + fn: the function to apply to the model + + Returns: + A model that is a Detect() object. + """ + self = super()._apply(fn) + m = self.model[-1] # Detect() + if isinstance(m, (Detect, Segment)): + m.stride = fn(m.stride) + m.anchors = fn(m.anchors) + m.strides = fn(m.strides) + return self + + def load(self, weights, verbose=True): + """Load the weights into the model. + + Args: + weights (dict | torch.nn.Module): The pre-trained weights to be loaded. + verbose (bool, optional): Whether to log the transfer progress. Defaults to True. + """ + model = weights['model'] if isinstance(weights, dict) else weights # torchvision models are not dicts + csd = model.float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, self.state_dict()) # intersect + self.load_state_dict(csd, strict=False) # load + if verbose: + LOGGER.info(f'Transferred {len(csd)}/{len(self.model.state_dict())} items from pretrained weights') + + def loss(self, batch, preds=None): + """ + Compute loss + + Args: + batch (dict): Batch to compute loss on + preds (torch.Tensor | List[torch.Tensor]): Predictions. + """ + if not hasattr(self, 'criterion'): + self.criterion = self.init_criterion() + + preds = self.forward(batch['img']) if preds is None else preds + return self.criterion(preds, batch) + + def init_criterion(self): + raise NotImplementedError('compute_loss() needs to be implemented by task heads') + + +class DetectionModel(BaseModel): + """YOLOv8 detection model.""" + + def __init__(self, cfg='yolov8n.yaml', ch=3, nc=None, verbose=True): # model, input channels, number of classes + super().__init__() + self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=ch, verbose=verbose) # model, savelist + self.names = {i: f'{i}' for i in range(self.yaml['nc'])} # default names dict + self.inplace = self.yaml.get('inplace', True) + + # Build strides + m = self.model[-1] # Detect() + if isinstance(m, (Detect, Segment, Pose)): + s = 256 # 2x min stride + m.inplace = self.inplace + forward = lambda x: self.forward(x)[0] if isinstance(m, (Segment, Pose)) else self.forward(x) + m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward + self.stride = m.stride + m.bias_init() # only run once + else: + self.stride = torch.Tensor([32]) # default stride for i.e. RTDETR + + # Init weights, biases + initialize_weights(self) + if verbose: + self.info() + LOGGER.info('') + + def _predict_augment(self, x): + """Perform augmentations on input image x and return augmented inference and train outputs.""" + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = super().predict(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi = self._descale_pred(yi, fi, si, img_size) + y.append(yi) + y = self._clip_augmented(y) # clip augmented tails + return torch.cat(y, -1), None # augmented inference, train + + @staticmethod + def _descale_pred(p, flips, scale, img_size, dim=1): + """De-scale predictions following augmented inference (inverse operation).""" + p[:, :4] /= scale # de-scale + x, y, wh, cls = p.split((1, 1, 2, p.shape[dim] - 4), dim) + if flips == 2: + y = img_size[0] - y # de-flip ud + elif flips == 3: + x = img_size[1] - x # de-flip lr + return torch.cat((x, y, wh, cls), dim) + + def _clip_augmented(self, y): + """Clip YOLOv5 augmented inference tails.""" + nl = self.model[-1].nl # number of detection layers (P3-P5) + g = sum(4 ** x for x in range(nl)) # grid points + e = 1 # exclude layer count + i = (y[0].shape[-1] // g) * sum(4 ** x for x in range(e)) # indices + y[0] = y[0][..., :-i] # large + i = (y[-1].shape[-1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices + y[-1] = y[-1][..., i:] # small + return y + + def init_criterion(self): + return v8DetectionLoss(self) + + +class SegmentationModel(DetectionModel): + """YOLOv8 segmentation model.""" + + def __init__(self, cfg='yolov8n-seg.yaml', ch=3, nc=None, verbose=True): + """Initialize YOLOv8 segmentation model with given config and parameters.""" + super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose) + + def init_criterion(self): + return v8SegmentationLoss(self) + + def _predict_augment(self, x): + """Perform augmentations on input image x and return augmented inference.""" + LOGGER.warning( + f'WARNING ⚠️ {self.__class__.__name__} has not supported augment inference yet! Now using single-scale inference instead.' + ) + return self._predict_once(x) + + +class PoseModel(DetectionModel): + """YOLOv8 pose model.""" + + def __init__(self, cfg='yolov8n-pose.yaml', ch=3, nc=None, data_kpt_shape=(None, None), verbose=True): + """Initialize YOLOv8 Pose model.""" + if not isinstance(cfg, dict): + cfg = yaml_model_load(cfg) # load model YAML + if any(data_kpt_shape) and list(data_kpt_shape) != list(cfg['kpt_shape']): + LOGGER.info(f"Overriding model.yaml kpt_shape={cfg['kpt_shape']} with kpt_shape={data_kpt_shape}") + cfg['kpt_shape'] = data_kpt_shape + super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose) + + def init_criterion(self): + return v8PoseLoss(self) + + def _predict_augment(self, x): + """Perform augmentations on input image x and return augmented inference.""" + LOGGER.warning( + f'WARNING ⚠️ {self.__class__.__name__} has not supported augment inference yet! Now using single-scale inference instead.' + ) + return self._predict_once(x) + + +class ClassificationModel(BaseModel): + """YOLOv8 classification model.""" + + def __init__(self, + cfg=None, + model=None, + ch=3, + nc=None, + cutoff=10, + verbose=True): # yaml, model, channels, number of classes, cutoff index, verbose flag + super().__init__() + self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg, ch, nc, verbose) + + def _from_detection_model(self, model, nc=1000, cutoff=10): + """Create a YOLOv5 classification model from a YOLOv5 detection model.""" + from ultralytics.nn.autobackend import AutoBackend + if isinstance(model, AutoBackend): + model = model.model # unwrap DetectMultiBackend + model.model = model.model[:cutoff] # backbone + m = model.model[-1] # last layer + ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module + c = Classify(ch, nc) # Classify() + c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type + model.model[-1] = c # replace + self.model = model.model + self.stride = model.stride + self.save = [] + self.nc = nc + + def _from_yaml(self, cfg, ch, nc, verbose): + """Set YOLOv8 model configurations and define the model architecture.""" + self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + elif not nc and not self.yaml.get('nc', None): + raise ValueError('nc not specified. Must specify nc in model.yaml or function arguments.') + self.model, self.save = parse_model(deepcopy(self.yaml), ch=ch, verbose=verbose) # model, savelist + self.stride = torch.Tensor([1]) # no stride constraints + self.names = {i: f'{i}' for i in range(self.yaml['nc'])} # default names dict + self.info() + + @staticmethod + def reshape_outputs(model, nc): + """Update a TorchVision classification model to class count 'n' if required.""" + name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module + if isinstance(m, Classify): # YOLO Classify() head + if m.linear.out_features != nc: + m.linear = nn.Linear(m.linear.in_features, nc) + elif isinstance(m, nn.Linear): # ResNet, EfficientNet + if m.out_features != nc: + setattr(model, name, nn.Linear(m.in_features, nc)) + elif isinstance(m, nn.Sequential): + types = [type(x) for x in m] + if nn.Linear in types: + i = types.index(nn.Linear) # nn.Linear index + if m[i].out_features != nc: + m[i] = nn.Linear(m[i].in_features, nc) + elif nn.Conv2d in types: + i = types.index(nn.Conv2d) # nn.Conv2d index + if m[i].out_channels != nc: + m[i] = nn.Conv2d(m[i].in_channels, nc, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None) + + def init_criterion(self): + """Compute the classification loss between predictions and true labels.""" + return v8ClassificationLoss() + + +class RTDETRDetectionModel(DetectionModel): + + def __init__(self, cfg='rtdetr-l.yaml', ch=3, nc=None, verbose=True): + super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose) + + def init_criterion(self): + """Compute the classification loss between predictions and true labels.""" + from ultralytics.vit.utils.loss import RTDETRDetectionLoss + + return RTDETRDetectionLoss(nc=self.nc, use_vfl=True) + + def loss(self, batch, preds=None): + if not hasattr(self, 'criterion'): + self.criterion = self.init_criterion() + + img = batch['img'] + # NOTE: preprocess gt_bbox and gt_labels to list. + bs = len(img) + batch_idx = batch['batch_idx'] + gt_groups = [(batch_idx == i).sum().item() for i in range(bs)] + targets = { + 'cls': batch['cls'].to(img.device, dtype=torch.long).view(-1), + 'bboxes': batch['bboxes'].to(device=img.device), + 'batch_idx': batch_idx.to(img.device, dtype=torch.long).view(-1), + 'gt_groups': gt_groups} + + preds = self.predict(img, batch=targets) if preds is None else preds + dec_bboxes, dec_scores, enc_bboxes, enc_scores, dn_meta = preds if self.training else preds[1] + if dn_meta is None: + dn_bboxes, dn_scores = None, None + else: + dn_bboxes, dec_bboxes = torch.split(dec_bboxes, dn_meta['dn_num_split'], dim=2) + dn_scores, dec_scores = torch.split(dec_scores, dn_meta['dn_num_split'], dim=2) + + dec_bboxes = torch.cat([enc_bboxes.unsqueeze(0), dec_bboxes]) # (7, bs, 300, 4) + dec_scores = torch.cat([enc_scores.unsqueeze(0), dec_scores]) + + loss = self.criterion((dec_bboxes, dec_scores), + targets, + dn_bboxes=dn_bboxes, + dn_scores=dn_scores, + dn_meta=dn_meta) + # NOTE: There are like 12 losses in RTDETR, backward with all losses but only show the main three losses. + return sum(loss.values()), torch.as_tensor([loss[k].detach() for k in ['loss_giou', 'loss_class', 'loss_bbox']], + device=img.device) + + def predict(self, x, profile=False, visualize=False, batch=None, augment=False): + """ + Perform a forward pass through the network. + + Args: + x (torch.Tensor): The input tensor to the model + profile (bool): Print the computation time of each layer if True, defaults to False. + visualize (bool): Save the feature maps of the model if True, defaults to False + batch (dict): A dict including gt boxes and labels from dataloader. + + Returns: + (torch.Tensor): The last output of the model. + """ + y, dt = [], [] # outputs + for m in self.model[:-1]: # except the head part + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + if profile: + self._profile_one_layer(m, x, dt) + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + if visualize: + feature_visualization(x, m.type, m.i, save_dir=visualize) + head = self.model[-1] + x = head([y[j] for j in head.f], batch) # head inference + return x + + +class Ensemble(nn.ModuleList): + """Ensemble of models.""" + + def __init__(self): + """Initialize an ensemble of models.""" + super().__init__() + + def forward(self, x, augment=False, profile=False, visualize=False): + """Function generates the YOLOv5 network's final layer.""" + y = [module(x, augment, profile, visualize)[0] for module in self] + # y = torch.stack(y).max(0)[0] # max ensemble + # y = torch.stack(y).mean(0) # mean ensemble + y = torch.cat(y, 2) # nms ensemble, y shape(B, HW, C) + return y, None # inference, train output + + +# Functions ------------------------------------------------------------------------------------------------------------ + + +def torch_safe_load(weight): + """ + This function attempts to load a PyTorch model with the torch.load() function. If a ModuleNotFoundError is raised, + it catches the error, logs a warning message, and attempts to install the missing module via the + check_requirements() function. After installation, the function again attempts to load the model using torch.load(). + + Args: + weight (str): The file path of the PyTorch model. + + Returns: + (dict): The loaded PyTorch model. + """ + from ultralytics.yolo.utils.downloads import attempt_download_asset + + check_suffix(file=weight, suffix='.pt') + file = attempt_download_asset(weight) # search online if missing locally + try: + return torch.load(file, map_location='cpu'), file # load + except ModuleNotFoundError as e: # e.name is missing module name + if e.name == 'models': + raise TypeError( + emojis(f'ERROR ❌️ {weight} appears to be an Ultralytics YOLOv5 model originally trained ' + f'with https://github.com/ultralytics/yolov5.\nThis model is NOT forwards compatible with ' + f'YOLOv8 at https://github.com/ultralytics/ultralytics.' + f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to " + f"run a command with an official YOLOv8 model, i.e. 'yolo predict model=yolov8n.pt'")) from e + LOGGER.warning(f"WARNING ⚠️ {weight} appears to require '{e.name}', which is not in ultralytics requirements." + f"\nAutoInstall will run now for '{e.name}' but this feature will be removed in the future." + f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to " + f"run a command with an official YOLOv8 model, i.e. 'yolo predict model=yolov8n.pt'") + check_requirements(e.name) # install missing module + + return torch.load(file, map_location='cpu'), file # load + + +def attempt_load_weights(weights, device=None, inplace=True, fuse=False): + """Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a.""" + + ensemble = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + ckpt, w = torch_safe_load(w) # load ckpt + args = {**DEFAULT_CFG_DICT, **ckpt['train_args']} if 'train_args' in ckpt else None # combined args + model = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model + + # Model compatibility updates + model.args = args # attach args to model + model.pt_path = w # attach *.pt file path to model + model.task = guess_model_task(model) + if not hasattr(model, 'stride'): + model.stride = torch.tensor([32.]) + + # Append + ensemble.append(model.fuse().eval() if fuse and hasattr(model, 'fuse') else model.eval()) # model in eval mode + + # Module compatibility updates + for m in ensemble.modules(): + t = type(m) + if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Segment): + m.inplace = inplace # torch 1.7.0 compatibility + elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): + m.recompute_scale_factor = None # torch 1.11.0 compatibility + + # Return model + if len(ensemble) == 1: + return ensemble[-1] + + # Return ensemble + LOGGER.info(f'Ensemble created with {weights}\n') + for k in 'names', 'nc', 'yaml': + setattr(ensemble, k, getattr(ensemble[0], k)) + ensemble.stride = ensemble[torch.argmax(torch.tensor([m.stride.max() for m in ensemble])).int()].stride + assert all(ensemble[0].nc == m.nc for m in ensemble), f'Models differ in class counts {[m.nc for m in ensemble]}' + return ensemble + + +def attempt_load_one_weight(weight, device=None, inplace=True, fuse=False): + """Loads a single model weights.""" + ckpt, weight = torch_safe_load(weight) # load ckpt + args = {**DEFAULT_CFG_DICT, **(ckpt.get('train_args', {}))} # combine model and default args, preferring model args + model = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model + + # Model compatibility updates + model.args = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # attach args to model + model.pt_path = weight # attach *.pt file path to model + model.task = guess_model_task(model) + if not hasattr(model, 'stride'): + model.stride = torch.tensor([32.]) + + model = model.fuse().eval() if fuse and hasattr(model, 'fuse') else model.eval() # model in eval mode + + # Module compatibility updates + for m in model.modules(): + t = type(m) + if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Segment): + m.inplace = inplace # torch 1.7.0 compatibility + elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): + m.recompute_scale_factor = None # torch 1.11.0 compatibility + + # Return model and ckpt + return model, ckpt + + +def parse_model(d, ch, verbose=True): # model_dict, input_channels(3) + """Parse a YOLO model.yaml dictionary into a PyTorch model.""" + import ast + + # Args + max_channels = float('inf') + nc, act, scales = (d.get(x) for x in ('nc', 'activation', 'scales')) + depth, width, kpt_shape = (d.get(x, 1.0) for x in ('depth_multiple', 'width_multiple', 'kpt_shape')) + if scales: + scale = d.get('scale') + if not scale: + scale = tuple(scales.keys())[0] + LOGGER.warning(f"WARNING ⚠️ no model scale passed. Assuming scale='{scale}'.") + depth, width, max_channels = scales[scale] + + if act: + Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() + if verbose: + LOGGER.info(f"{colorstr('activation:')} {act}") # print + + if verbose: + LOGGER.info(f"\n{'':>3}{'from':>20}{'n':>3}{'params':>10} {'module':<45}{'arguments':<30}") + ch = [ch] + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = getattr(torch.nn, m[3:]) if 'nn.' in m else globals()[m] # get module + for j, a in enumerate(args): + if isinstance(a, str): + with contextlib.suppress(ValueError): + args[j] = locals()[a] if a in locals() else ast.literal_eval(a) + + n = n_ = max(round(n * depth), 1) if n > 1 else n # depth gain + if m in (Classify, Conv, ConvTranspose, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, Focus, + BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x, RepC3, StemBlock, Shuffle_Block, DWConvblock): + c1, c2 = ch[f], args[0] + if c2 != nc: # if c2 not equal to number of classes (i.e. for Classify() output) + c2 = make_divisible(min(c2, max_channels) * width, 8) + + args = [c1, c2, *args[1:]] + if m in (BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, C3x, RepC3): + args.insert(2, n) # number of repeats + n = 1 + elif m is AIFI: + args = [ch[f], *args] + elif m in (HGStem, HGBlock): + c1, cm, c2 = ch[f], args[0], args[1] + args = [c1, cm, c2, *args[2:]] + if m is HGBlock: + args.insert(4, n) # number of repeats + n = 1 + + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum(ch[x] for x in f) + elif m is ADD: + c2 = sum([ch[x] for x in f])//2 + elif m in (Detect, Segment, Pose, RTDETRDecoder): + args.append([ch[x] for x in f]) + if m is Segment: + args[2] = make_divisible(min(args[2], max_channels) * width, 8) + else: + c2 = ch[f] + + m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + m.np = sum(x.numel() for x in m_.parameters()) # number params + m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type + if verbose: + LOGGER.info(f'{i:>3}{str(f):>20}{n_:>3}{m.np:10.0f} {t:<45}{str(args):<30}') # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +def yaml_model_load(path): + """Load a YOLOv8 model from a YAML file.""" + import re + + path = Path(path) + if path.stem in (f'yolov{d}{x}6' for x in 'nsmlx' for d in (5, 8)): + new_stem = re.sub(r'(\d+)([nslmx])6(.+)?$', r'\1\2-p6\3', path.stem) + LOGGER.warning(f'WARNING ⚠️ Ultralytics YOLO P6 models now use -p6 suffix. Renaming {path.stem} to {new_stem}.') + path = path.with_name(new_stem + path.suffix) + + unified_path = re.sub(r'(\d+)([nslmx])(.+)?$', r'\1\3', str(path)) # i.e. yolov8x.yaml -> yolov8.yaml + yaml_file = check_yaml(unified_path, hard=False) or check_yaml(path) + d = yaml_load(yaml_file) # model dict + d['scale'] = guess_model_scale(path) + d['yaml_file'] = str(path) + return d + + +def guess_model_scale(model_path): + """ + Takes a path to a YOLO model's YAML file as input and extracts the size character of the model's scale. + The function uses regular expression matching to find the pattern of the model scale in the YAML file name, + which is denoted by n, s, m, l, or x. The function returns the size character of the model scale as a string. + + Args: + model_path (str | Path): The path to the YOLO model's YAML file. + + Returns: + (str): The size character of the model's scale, which can be n, s, m, l, or x. + """ + with contextlib.suppress(AttributeError): + import re + return re.search(r'yolov\d+([nslmx])', Path(model_path).stem).group(1) # n, s, m, l, or x + return '' + + +def guess_model_task(model): + """ + Guess the task of a PyTorch model from its architecture or configuration. + + Args: + model (nn.Module | dict): PyTorch model or model configuration in YAML format. + + Returns: + (str): Task of the model ('detect', 'segment', 'classify', 'pose'). + + Raises: + SyntaxError: If the task of the model could not be determined. + """ + + def cfg2task(cfg): + """Guess from YAML dictionary.""" + m = cfg['head'][-1][-2].lower() # output module name + if m in ('classify', 'classifier', 'cls', 'fc'): + return 'classify' + if m == 'detect': + return 'detect' + if m == 'segment': + return 'segment' + if m == 'pose': + return 'pose' + + # Guess from model cfg + if isinstance(model, dict): + with contextlib.suppress(Exception): + return cfg2task(model) + + # Guess from PyTorch model + if isinstance(model, nn.Module): # PyTorch model + for x in 'model.args', 'model.model.args', 'model.model.model.args': + with contextlib.suppress(Exception): + return eval(x)['task'] + for x in 'model.yaml', 'model.model.yaml', 'model.model.model.yaml': + with contextlib.suppress(Exception): + return cfg2task(eval(x)) + + for m in model.modules(): + if isinstance(m, Detect): + return 'detect' + elif isinstance(m, Segment): + return 'segment' + elif isinstance(m, Classify): + return 'classify' + elif isinstance(m, Pose): + return 'pose' + + # Guess from model filename + if isinstance(model, (str, Path)): + model = Path(model) + if '-seg' in model.stem or 'segment' in model.parts: + return 'segment' + elif '-cls' in model.stem or 'classify' in model.parts: + return 'classify' + elif '-pose' in model.stem or 'pose' in model.parts: + return 'pose' + elif 'detect' in model.parts: + return 'detect' + + # Unable to determine task from model + LOGGER.warning("WARNING ⚠️ Unable to automatically guess model task, assuming 'task=detect'. " + "Explicitly define task for your model, i.e. 'task=detect', 'segment', 'classify', or 'pose'.") + return 'detect' # assume detect diff --git a/ultralytics/tracker/README.md b/ultralytics/tracker/README.md new file mode 100644 index 0000000..26ec0c3 --- /dev/null +++ b/ultralytics/tracker/README.md @@ -0,0 +1,86 @@ +# Tracker + +## Supported Trackers + +- [x] ByteTracker +- [x] BoT-SORT + +## Usage + +### python interface: + +You can use the Python interface to track objects using the YOLO model. + +```python +from ultralytics import YOLO + +model = YOLO("yolov8n.pt") # or a segmentation model .i.e yolov8n-seg.pt +model.track( + source="video/streams", + stream=True, + tracker="botsort.yaml", # or 'bytetrack.yaml' + show=True, +) +``` + +You can get the IDs of the tracked objects using the following code: + +```python +from ultralytics import YOLO + +model = YOLO("yolov8n.pt") + +for result in model.track(source="video.mp4"): + print( + result.boxes.id.cpu().numpy().astype(int) + ) # this will print the IDs of the tracked objects in the frame +``` + +If you want to use the tracker with a folder of images or when you loop on the video frames, you should use the `persist` parameter to tell the model that these frames are related to each other so the IDs will be fixed for the same objects. Otherwise, the IDs will be different in each frame because in each loop, the model creates a new object for tracking, but the `persist` parameter makes it use the same object for tracking. + +```python +import cv2 +from ultralytics import YOLO + +cap = cv2.VideoCapture("video.mp4") +model = YOLO("yolov8n.pt") +while True: + ret, frame = cap.read() + if not ret: + break + results = model.track(frame, persist=True) + boxes = results[0].boxes.xyxy.cpu().numpy().astype(int) + ids = results[0].boxes.id.cpu().numpy().astype(int) + for box, id in zip(boxes, ids): + cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2) + cv2.putText( + frame, + f"Id {id}", + (box[0], box[1]), + cv2.FONT_HERSHEY_SIMPLEX, + 1, + (0, 0, 255), + 2, + ) + cv2.imshow("frame", frame) + if cv2.waitKey(1) & 0xFF == ord("q"): + break +``` + +## Change tracker parameters + +You can change the tracker parameters by eding the `tracker.yaml` file which is located in the ultralytics/tracker/cfg folder. + +## Command Line Interface (CLI) + +You can also use the command line interface to track objects using the YOLO model. + +```bash +yolo detect track source=... tracker=... +yolo segment track source=... tracker=... +yolo pose track source=... tracker=... +``` + +By default, trackers will use the configuration in `ultralytics/tracker/cfg`. +We also support using a modified tracker config file. Please refer to the tracker config files +in `ultralytics/tracker/cfg`.
diff --git a/ultralytics/tracker/__init__.py b/ultralytics/tracker/__init__.py new file mode 100644 index 0000000..13d3903 --- /dev/null +++ b/ultralytics/tracker/__init__.py @@ -0,0 +1,6 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .track import register_tracker +from .trackers import BOTSORT, BYTETracker + +__all__ = 'register_tracker', 'BOTSORT', 'BYTETracker' # allow simpler import diff --git a/ultralytics/tracker/cfg/botsort.yaml b/ultralytics/tracker/cfg/botsort.yaml new file mode 100644 index 0000000..d4947c6 --- /dev/null +++ b/ultralytics/tracker/cfg/botsort.yaml @@ -0,0 +1,18 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Default YOLO tracker settings for BoT-SORT tracker https://github.com/NirAharon/BoT-SORT + +tracker_type: botsort # tracker type, ['botsort', 'bytetrack'] +track_high_thresh: 0.5 # threshold for the first association +track_low_thresh: 0.1 # threshold for the second association +new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks +track_buffer: 30 # buffer to calculate the time when to remove tracks +match_thresh: 0.8 # threshold for matching tracks +# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now) +# mot20: False # for tracker evaluation(not used for now) + +# BoT-SORT settings +cmc_method: sparseOptFlow # method of global motion compensation +# ReID model related thresh (not supported yet) +proximity_thresh: 0.5 +appearance_thresh: 0.25 +with_reid: False diff --git a/ultralytics/tracker/cfg/bytetrack.yaml b/ultralytics/tracker/cfg/bytetrack.yaml new file mode 100644 index 0000000..5060f92 --- /dev/null +++ b/ultralytics/tracker/cfg/bytetrack.yaml @@ -0,0 +1,11 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack + +tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack'] +track_high_thresh: 0.5 # threshold for the first association +track_low_thresh: 0.1 # threshold for the second association +new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks +track_buffer: 30 # buffer to calculate the time when to remove tracks +match_thresh: 0.8 # threshold for matching tracks +# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now) +# mot20: False # for tracker evaluation(not used for now) diff --git a/ultralytics/tracker/track.py b/ultralytics/tracker/track.py new file mode 100644 index 0000000..d08abfc --- /dev/null +++ b/ultralytics/tracker/track.py @@ -0,0 +1,65 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from functools import partial + +import torch + +from ultralytics.yolo.utils import IterableSimpleNamespace, yaml_load +from ultralytics.yolo.utils.checks import check_yaml + +from .trackers import BOTSORT, BYTETracker + +TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT} + + +def on_predict_start(predictor, persist=False): + """ + Initialize trackers for object tracking during prediction. + + Args: + predictor (object): The predictor object to initialize trackers for. + persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False. + + Raises: + AssertionError: If the tracker_type is not 'bytetrack' or 'botsort'. + """ + if hasattr(predictor, 'trackers') and persist: + return + tracker = check_yaml(predictor.args.tracker) + cfg = IterableSimpleNamespace(**yaml_load(tracker)) + assert cfg.tracker_type in ['bytetrack', 'botsort'], \ + f"Only support 'bytetrack' and 'botsort' for now, but got '{cfg.tracker_type}'" + trackers = [] + for _ in range(predictor.dataset.bs): + tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30) + trackers.append(tracker) + predictor.trackers = trackers + + +def on_predict_postprocess_end(predictor): + """Postprocess detected boxes and update with object tracking.""" + bs = predictor.dataset.bs + im0s = predictor.batch[1] + for i in range(bs): + det = predictor.results[i].boxes.cpu().numpy() + if len(det) == 0: + continue + tracks = predictor.trackers[i].update(det, im0s[i]) + if len(tracks) == 0: + continue + idx = tracks[:, -1].astype(int) + predictor.results[i] = predictor.results[i][idx] + predictor.results[i].update(boxes=torch.as_tensor(tracks[:, :-1])) + + +def register_tracker(model, persist): + """ + Register tracking callbacks to the model for object tracking during prediction. + + Args: + model (object): The model object to register tracking callbacks for. + persist (bool): Whether to persist the trackers if they already exist. + + """ + model.add_callback('on_predict_start', partial(on_predict_start, persist=persist)) + model.add_callback('on_predict_postprocess_end', on_predict_postprocess_end) diff --git a/ultralytics/tracker/trackers/__init__.py b/ultralytics/tracker/trackers/__init__.py new file mode 100644 index 0000000..a0fd890 --- /dev/null +++ b/ultralytics/tracker/trackers/__init__.py @@ -0,0 +1,6 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .bot_sort import BOTSORT +from .byte_tracker import BYTETracker + +__all__ = 'BOTSORT', 'BYTETracker' # allow simpler import diff --git a/ultralytics/tracker/trackers/basetrack.py b/ultralytics/tracker/trackers/basetrack.py new file mode 100644 index 0000000..3c7b0f7 --- /dev/null +++ b/ultralytics/tracker/trackers/basetrack.py @@ -0,0 +1,71 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from collections import OrderedDict + +import numpy as np + + +class TrackState: + """Enumeration of possible object tracking states.""" + + New = 0 + Tracked = 1 + Lost = 2 + Removed = 3 + + +class BaseTrack: + """Base class for object tracking, handling basic track attributes and operations.""" + + _count = 0 + + track_id = 0 + is_activated = False + state = TrackState.New + + history = OrderedDict() + features = [] + curr_feature = None + score = 0 + start_frame = 0 + frame_id = 0 + time_since_update = 0 + + # Multi-camera + location = (np.inf, np.inf) + + @property + def end_frame(self): + """Return the last frame ID of the track.""" + return self.frame_id + + @staticmethod + def next_id(): + """Increment and return the global track ID counter.""" + BaseTrack._count += 1 + return BaseTrack._count + + def activate(self, *args): + """Activate the track with the provided arguments.""" + raise NotImplementedError + + def predict(self): + """Predict the next state of the track.""" + raise NotImplementedError + + def update(self, *args, **kwargs): + """Update the track with new observations.""" + raise NotImplementedError + + def mark_lost(self): + """Mark the track as lost.""" + self.state = TrackState.Lost + + def mark_removed(self): + """Mark the track as removed.""" + self.state = TrackState.Removed + + @staticmethod + def reset_id(): + """Reset the global track ID counter.""" + BaseTrack._count = 0 diff --git a/ultralytics/tracker/trackers/bot_sort.py b/ultralytics/tracker/trackers/bot_sort.py new file mode 100644 index 0000000..10e8868 --- /dev/null +++ b/ultralytics/tracker/trackers/bot_sort.py @@ -0,0 +1,148 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from collections import deque + +import numpy as np + +from ..utils import matching +from ..utils.gmc import GMC +from ..utils.kalman_filter import KalmanFilterXYWH +from .basetrack import TrackState +from .byte_tracker import BYTETracker, STrack + + +class BOTrack(STrack): + shared_kalman = KalmanFilterXYWH() + + def __init__(self, tlwh, score, cls, feat=None, feat_history=50): + """Initialize YOLOv8 object with temporal parameters, such as feature history, alpha and current features.""" + super().__init__(tlwh, score, cls) + + self.smooth_feat = None + self.curr_feat = None + if feat is not None: + self.update_features(feat) + self.features = deque([], maxlen=feat_history) + self.alpha = 0.9 + + def update_features(self, feat): + """Update features vector and smooth it using exponential moving average.""" + feat /= np.linalg.norm(feat) + self.curr_feat = feat + if self.smooth_feat is None: + self.smooth_feat = feat + else: + self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat + self.features.append(feat) + self.smooth_feat /= np.linalg.norm(self.smooth_feat) + + def predict(self): + """Predicts the mean and covariance using Kalman filter.""" + mean_state = self.mean.copy() + if self.state != TrackState.Tracked: + mean_state[6] = 0 + mean_state[7] = 0 + + self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) + + def re_activate(self, new_track, frame_id, new_id=False): + """Reactivates a track with updated features and optionally assigns a new ID.""" + if new_track.curr_feat is not None: + self.update_features(new_track.curr_feat) + super().re_activate(new_track, frame_id, new_id) + + def update(self, new_track, frame_id): + """Update the YOLOv8 instance with new track and frame ID.""" + if new_track.curr_feat is not None: + self.update_features(new_track.curr_feat) + super().update(new_track, frame_id) + + @property + def tlwh(self): + """Get current position in bounding box format `(top left x, top left y, + width, height)`. + """ + if self.mean is None: + return self._tlwh.copy() + ret = self.mean[:4].copy() + ret[:2] -= ret[2:] / 2 + return ret + + @staticmethod + def multi_predict(stracks): + """Predicts the mean and covariance of multiple object tracks using shared Kalman filter.""" + if len(stracks) <= 0: + return + multi_mean = np.asarray([st.mean.copy() for st in stracks]) + multi_covariance = np.asarray([st.covariance for st in stracks]) + for i, st in enumerate(stracks): + if st.state != TrackState.Tracked: + multi_mean[i][6] = 0 + multi_mean[i][7] = 0 + multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance) + for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): + stracks[i].mean = mean + stracks[i].covariance = cov + + def convert_coords(self, tlwh): + """Converts Top-Left-Width-Height bounding box coordinates to X-Y-Width-Height format.""" + return self.tlwh_to_xywh(tlwh) + + @staticmethod + def tlwh_to_xywh(tlwh): + """Convert bounding box to format `(center x, center y, width, + height)`. + """ + ret = np.asarray(tlwh).copy() + ret[:2] += ret[2:] / 2 + return ret + + +class BOTSORT(BYTETracker): + + def __init__(self, args, frame_rate=30): + """Initialize YOLOv8 object with ReID module and GMC algorithm.""" + super().__init__(args, frame_rate) + # ReID module + self.proximity_thresh = args.proximity_thresh + self.appearance_thresh = args.appearance_thresh + + if args.with_reid: + # Haven't supported BoT-SORT(reid) yet + self.encoder = None + # self.gmc = GMC(method=args.cmc_method, verbose=[args.name, args.ablation]) + self.gmc = GMC(method=args.cmc_method) + + def get_kalmanfilter(self): + """Returns an instance of KalmanFilterXYWH for object tracking.""" + return KalmanFilterXYWH() + + def init_track(self, dets, scores, cls, img=None): + """Initialize track with detections, scores, and classes.""" + if len(dets) == 0: + return [] + if self.args.with_reid and self.encoder is not None: + features_keep = self.encoder.inference(img, dets) + return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections + else: + return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections + + def get_dists(self, tracks, detections): + """Get distances between tracks and detections using IoU and (optionally) ReID embeddings.""" + dists = matching.iou_distance(tracks, detections) + dists_mask = (dists > self.proximity_thresh) + + # TODO: mot20 + # if not self.args.mot20: + dists = matching.fuse_score(dists, detections) + + if self.args.with_reid and self.encoder is not None: + emb_dists = matching.embedding_distance(tracks, detections) / 2.0 + emb_dists[emb_dists > self.appearance_thresh] = 1.0 + emb_dists[dists_mask] = 1.0 + dists = np.minimum(dists, emb_dists) + return dists + + def multi_predict(self, tracks): + """Predict and track multiple objects with YOLOv8 model.""" + BOTrack.multi_predict(tracks) diff --git a/ultralytics/tracker/trackers/byte_tracker.py b/ultralytics/tracker/trackers/byte_tracker.py new file mode 100644 index 0000000..6034cdc --- /dev/null +++ b/ultralytics/tracker/trackers/byte_tracker.py @@ -0,0 +1,364 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import numpy as np + +from ..utils import matching +from ..utils.kalman_filter import KalmanFilterXYAH +from .basetrack import BaseTrack, TrackState + + +class STrack(BaseTrack): + shared_kalman = KalmanFilterXYAH() + + def __init__(self, tlwh, score, cls): + """wait activate.""" + self._tlwh = np.asarray(self.tlbr_to_tlwh(tlwh[:-1]), dtype=np.float32) + self.kalman_filter = None + self.mean, self.covariance = None, None + self.is_activated = False + + self.score = score + self.tracklet_len = 0 + self.cls = cls + self.idx = tlwh[-1] + + def predict(self): + """Predicts mean and covariance using Kalman filter.""" + mean_state = self.mean.copy() + if self.state != TrackState.Tracked: + mean_state[7] = 0 + self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) + + @staticmethod + def multi_predict(stracks): + """Perform multi-object predictive tracking using Kalman filter for given stracks.""" + if len(stracks) <= 0: + return + multi_mean = np.asarray([st.mean.copy() for st in stracks]) + multi_covariance = np.asarray([st.covariance for st in stracks]) + for i, st in enumerate(stracks): + if st.state != TrackState.Tracked: + multi_mean[i][7] = 0 + multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance) + for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): + stracks[i].mean = mean + stracks[i].covariance = cov + + @staticmethod + def multi_gmc(stracks, H=np.eye(2, 3)): + """Update state tracks positions and covariances using a homography matrix.""" + if len(stracks) > 0: + multi_mean = np.asarray([st.mean.copy() for st in stracks]) + multi_covariance = np.asarray([st.covariance for st in stracks]) + + R = H[:2, :2] + R8x8 = np.kron(np.eye(4, dtype=float), R) + t = H[:2, 2] + + for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): + mean = R8x8.dot(mean) + mean[:2] += t + cov = R8x8.dot(cov).dot(R8x8.transpose()) + + stracks[i].mean = mean + stracks[i].covariance = cov + + def activate(self, kalman_filter, frame_id): + """Start a new tracklet.""" + self.kalman_filter = kalman_filter + self.track_id = self.next_id() + self.mean, self.covariance = self.kalman_filter.initiate(self.convert_coords(self._tlwh)) + + self.tracklet_len = 0 + self.state = TrackState.Tracked + if frame_id == 1: + self.is_activated = True + self.frame_id = frame_id + self.start_frame = frame_id + + def re_activate(self, new_track, frame_id, new_id=False): + """Reactivates a previously lost track with a new detection.""" + self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, + self.convert_coords(new_track.tlwh)) + self.tracklet_len = 0 + self.state = TrackState.Tracked + self.is_activated = True + self.frame_id = frame_id + if new_id: + self.track_id = self.next_id() + self.score = new_track.score + self.cls = new_track.cls + self.idx = new_track.idx + + def update(self, new_track, frame_id): + """ + Update a matched track + :type new_track: STrack + :type frame_id: int + :return: + """ + self.frame_id = frame_id + self.tracklet_len += 1 + + new_tlwh = new_track.tlwh + self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, + self.convert_coords(new_tlwh)) + self.state = TrackState.Tracked + self.is_activated = True + + self.score = new_track.score + self.cls = new_track.cls + self.idx = new_track.idx + + def convert_coords(self, tlwh): + """Convert a bounding box's top-left-width-height format to its x-y-angle-height equivalent.""" + return self.tlwh_to_xyah(tlwh) + + @property + def tlwh(self): + """Get current position in bounding box format `(top left x, top left y, + width, height)`. + """ + if self.mean is None: + return self._tlwh.copy() + ret = self.mean[:4].copy() + ret[2] *= ret[3] + ret[:2] -= ret[2:] / 2 + return ret + + @property + def tlbr(self): + """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., + `(top left, bottom right)`. + """ + ret = self.tlwh.copy() + ret[2:] += ret[:2] + return ret + + @staticmethod + def tlwh_to_xyah(tlwh): + """Convert bounding box to format `(center x, center y, aspect ratio, + height)`, where the aspect ratio is `width / height`. + """ + ret = np.asarray(tlwh).copy() + ret[:2] += ret[2:] / 2 + ret[2] /= ret[3] + return ret + + @staticmethod + def tlbr_to_tlwh(tlbr): + """Converts top-left bottom-right format to top-left width height format.""" + ret = np.asarray(tlbr).copy() + ret[2:] -= ret[:2] + return ret + + @staticmethod + def tlwh_to_tlbr(tlwh): + """Converts tlwh bounding box format to tlbr format.""" + ret = np.asarray(tlwh).copy() + ret[2:] += ret[:2] + return ret + + def __repr__(self): + """Return a string representation of the BYTETracker object with start and end frames and track ID.""" + return f'OT_{self.track_id}_({self.start_frame}-{self.end_frame})' + + +class BYTETracker: + + def __init__(self, args, frame_rate=30): + """Initialize a YOLOv8 object to track objects with given arguments and frame rate.""" + self.tracked_stracks = [] # type: list[STrack] + self.lost_stracks = [] # type: list[STrack] + self.removed_stracks = [] # type: list[STrack] + + self.frame_id = 0 + self.args = args + self.max_time_lost = int(frame_rate / 30.0 * args.track_buffer) + self.kalman_filter = self.get_kalmanfilter() + self.reset_id() + + def update(self, results, img=None): + """Updates object tracker with new detections and returns tracked object bounding boxes.""" + self.frame_id += 1 + activated_stracks = [] + refind_stracks = [] + lost_stracks = [] + removed_stracks = [] + + scores = results.conf + bboxes = results.xyxy + # Add index + bboxes = np.concatenate([bboxes, np.arange(len(bboxes)).reshape(-1, 1)], axis=-1) + cls = results.cls + + remain_inds = scores > self.args.track_high_thresh + inds_low = scores > self.args.track_low_thresh + inds_high = scores < self.args.track_high_thresh + + inds_second = np.logical_and(inds_low, inds_high) + dets_second = bboxes[inds_second] + dets = bboxes[remain_inds] + scores_keep = scores[remain_inds] + scores_second = scores[inds_second] + cls_keep = cls[remain_inds] + cls_second = cls[inds_second] + + detections = self.init_track(dets, scores_keep, cls_keep, img) + # Add newly detected tracklets to tracked_stracks + unconfirmed = [] + tracked_stracks = [] # type: list[STrack] + for track in self.tracked_stracks: + if not track.is_activated: + unconfirmed.append(track) + else: + tracked_stracks.append(track) + # Step 2: First association, with high score detection boxes + strack_pool = self.joint_stracks(tracked_stracks, self.lost_stracks) + # Predict the current location with KF + self.multi_predict(strack_pool) + if hasattr(self, 'gmc') and img is not None: + warp = self.gmc.apply(img, dets) + STrack.multi_gmc(strack_pool, warp) + STrack.multi_gmc(unconfirmed, warp) + + dists = self.get_dists(strack_pool, detections) + matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.args.match_thresh) + + for itracked, idet in matches: + track = strack_pool[itracked] + det = detections[idet] + if track.state == TrackState.Tracked: + track.update(det, self.frame_id) + activated_stracks.append(track) + else: + track.re_activate(det, self.frame_id, new_id=False) + refind_stracks.append(track) + # Step 3: Second association, with low score detection boxes + # association the untrack to the low score detections + detections_second = self.init_track(dets_second, scores_second, cls_second, img) + r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked] + # TODO + dists = matching.iou_distance(r_tracked_stracks, detections_second) + matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5) + for itracked, idet in matches: + track = r_tracked_stracks[itracked] + det = detections_second[idet] + if track.state == TrackState.Tracked: + track.update(det, self.frame_id) + activated_stracks.append(track) + else: + track.re_activate(det, self.frame_id, new_id=False) + refind_stracks.append(track) + + for it in u_track: + track = r_tracked_stracks[it] + if track.state != TrackState.Lost: + track.mark_lost() + lost_stracks.append(track) + # Deal with unconfirmed tracks, usually tracks with only one beginning frame + detections = [detections[i] for i in u_detection] + dists = self.get_dists(unconfirmed, detections) + matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7) + for itracked, idet in matches: + unconfirmed[itracked].update(detections[idet], self.frame_id) + activated_stracks.append(unconfirmed[itracked]) + for it in u_unconfirmed: + track = unconfirmed[it] + track.mark_removed() + removed_stracks.append(track) + # Step 4: Init new stracks + for inew in u_detection: + track = detections[inew] + if track.score < self.args.new_track_thresh: + continue + track.activate(self.kalman_filter, self.frame_id) + activated_stracks.append(track) + # Step 5: Update state + for track in self.lost_stracks: + if self.frame_id - track.end_frame > self.max_time_lost: + track.mark_removed() + removed_stracks.append(track) + + self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked] + self.tracked_stracks = self.joint_stracks(self.tracked_stracks, activated_stracks) + self.tracked_stracks = self.joint_stracks(self.tracked_stracks, refind_stracks) + self.lost_stracks = self.sub_stracks(self.lost_stracks, self.tracked_stracks) + self.lost_stracks.extend(lost_stracks) + self.lost_stracks = self.sub_stracks(self.lost_stracks, self.removed_stracks) + self.tracked_stracks, self.lost_stracks = self.remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks) + self.removed_stracks.extend(removed_stracks) + if len(self.removed_stracks) > 1000: + self.removed_stracks = self.removed_stracks[-999:] # clip remove stracks to 1000 maximum + return np.asarray( + [x.tlbr.tolist() + [x.track_id, x.score, x.cls, x.idx] for x in self.tracked_stracks if x.is_activated], + dtype=np.float32) + + def get_kalmanfilter(self): + """Returns a Kalman filter object for tracking bounding boxes.""" + return KalmanFilterXYAH() + + def init_track(self, dets, scores, cls, img=None): + """Initialize object tracking with detections and scores using STrack algorithm.""" + return [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] if len(dets) else [] # detections + + def get_dists(self, tracks, detections): + """Calculates the distance between tracks and detections using IOU and fuses scores.""" + dists = matching.iou_distance(tracks, detections) + # TODO: mot20 + # if not self.args.mot20: + dists = matching.fuse_score(dists, detections) + return dists + + def multi_predict(self, tracks): + """Returns the predicted tracks using the YOLOv8 network.""" + STrack.multi_predict(tracks) + + def reset_id(self): + """Resets the ID counter of STrack.""" + STrack.reset_id() + + @staticmethod + def joint_stracks(tlista, tlistb): + """Combine two lists of stracks into a single one.""" + exists = {} + res = [] + for t in tlista: + exists[t.track_id] = 1 + res.append(t) + for t in tlistb: + tid = t.track_id + if not exists.get(tid, 0): + exists[tid] = 1 + res.append(t) + return res + + @staticmethod + def sub_stracks(tlista, tlistb): + """DEPRECATED CODE in https://github.com/ultralytics/ultralytics/pull/1890/ + stracks = {t.track_id: t for t in tlista} + for t in tlistb: + tid = t.track_id + if stracks.get(tid, 0): + del stracks[tid] + return list(stracks.values()) + """ + track_ids_b = {t.track_id for t in tlistb} + return [t for t in tlista if t.track_id not in track_ids_b] + + @staticmethod + def remove_duplicate_stracks(stracksa, stracksb): + """Remove duplicate stracks with non-maximum IOU distance.""" + pdist = matching.iou_distance(stracksa, stracksb) + pairs = np.where(pdist < 0.15) + dupa, dupb = [], [] + for p, q in zip(*pairs): + timep = stracksa[p].frame_id - stracksa[p].start_frame + timeq = stracksb[q].frame_id - stracksb[q].start_frame + if timep > timeq: + dupb.append(q) + else: + dupa.append(p) + resa = [t for i, t in enumerate(stracksa) if i not in dupa] + resb = [t for i, t in enumerate(stracksb) if i not in dupb] + return resa, resb diff --git a/ultralytics/tracker/utils/__init__.py b/ultralytics/tracker/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ultralytics/tracker/utils/gmc.py b/ultralytics/tracker/utils/gmc.py new file mode 100644 index 0000000..a5c910d --- /dev/null +++ b/ultralytics/tracker/utils/gmc.py @@ -0,0 +1,319 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import copy + +import cv2 +import numpy as np + +from ultralytics.yolo.utils import LOGGER + + +class GMC: + + def __init__(self, method='sparseOptFlow', downscale=2, verbose=None): + """Initialize a video tracker with specified parameters.""" + super().__init__() + + self.method = method + self.downscale = max(1, int(downscale)) + + if self.method == 'orb': + self.detector = cv2.FastFeatureDetector_create(20) + self.extractor = cv2.ORB_create() + self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING) + + elif self.method == 'sift': + self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) + self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) + self.matcher = cv2.BFMatcher(cv2.NORM_L2) + + elif self.method == 'ecc': + number_of_iterations = 5000 + termination_eps = 1e-6 + self.warp_mode = cv2.MOTION_EUCLIDEAN + self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) + + elif self.method == 'sparseOptFlow': + self.feature_params = dict(maxCorners=1000, + qualityLevel=0.01, + minDistance=1, + blockSize=3, + useHarrisDetector=False, + k=0.04) + # self.gmc_file = open('GMC_results.txt', 'w') + + elif self.method in ['file', 'files']: + seqName = verbose[0] + ablation = verbose[1] + if ablation: + filePath = r'tracker/GMC_files/MOT17_ablation' + else: + filePath = r'tracker/GMC_files/MOTChallenge' + + if '-FRCNN' in seqName: + seqName = seqName[:-6] + elif '-DPM' in seqName or '-SDP' in seqName: + seqName = seqName[:-4] + self.gmcFile = open(f'{filePath}/GMC-{seqName}.txt') + + if self.gmcFile is None: + raise ValueError(f'Error: Unable to open GMC file in directory:{filePath}') + elif self.method in ['none', 'None']: + self.method = 'none' + else: + raise ValueError(f'Error: Unknown CMC method:{method}') + + self.prevFrame = None + self.prevKeyPoints = None + self.prevDescriptors = None + + self.initializedFirstFrame = False + + def apply(self, raw_frame, detections=None): + """Apply object detection on a raw frame using specified method.""" + if self.method in ['orb', 'sift']: + return self.applyFeatures(raw_frame, detections) + elif self.method == 'ecc': + return self.applyEcc(raw_frame, detections) + elif self.method == 'sparseOptFlow': + return self.applySparseOptFlow(raw_frame, detections) + elif self.method == 'file': + return self.applyFile(raw_frame, detections) + elif self.method == 'none': + return np.eye(2, 3) + else: + return np.eye(2, 3) + + def applyEcc(self, raw_frame, detections=None): + """Initialize.""" + height, width, _ = raw_frame.shape + frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) + H = np.eye(2, 3, dtype=np.float32) + + # Downscale image (TODO: consider using pyramids) + if self.downscale > 1.0: + frame = cv2.GaussianBlur(frame, (3, 3), 1.5) + frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) + width = width // self.downscale + height = height // self.downscale + + # Handle first frame + if not self.initializedFirstFrame: + # Initialize data + self.prevFrame = frame.copy() + + # Initialization done + self.initializedFirstFrame = True + + return H + + # Run the ECC algorithm. The results are stored in warp_matrix. + # (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria) + try: + (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1) + except Exception as e: + LOGGER.warning(f'WARNING: find transform failed. Set warp as identity {e}') + + return H + + def applyFeatures(self, raw_frame, detections=None): + """Initialize.""" + height, width, _ = raw_frame.shape + frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) + H = np.eye(2, 3) + + # Downscale image (TODO: consider using pyramids) + if self.downscale > 1.0: + # frame = cv2.GaussianBlur(frame, (3, 3), 1.5) + frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) + width = width // self.downscale + height = height // self.downscale + + # Find the keypoints + mask = np.zeros_like(frame) + # mask[int(0.05 * height): int(0.95 * height), int(0.05 * width): int(0.95 * width)] = 255 + mask[int(0.02 * height):int(0.98 * height), int(0.02 * width):int(0.98 * width)] = 255 + if detections is not None: + for det in detections: + tlbr = (det[:4] / self.downscale).astype(np.int_) + mask[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2]] = 0 + + keypoints = self.detector.detect(frame, mask) + + # Compute the descriptors + keypoints, descriptors = self.extractor.compute(frame, keypoints) + + # Handle first frame + if not self.initializedFirstFrame: + # Initialize data + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + self.prevDescriptors = copy.copy(descriptors) + + # Initialization done + self.initializedFirstFrame = True + + return H + + # Match descriptors. + knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2) + + # Filtered matches based on smallest spatial distance + matches = [] + spatialDistances = [] + + maxSpatialDistance = 0.25 * np.array([width, height]) + + # Handle empty matches case + if len(knnMatches) == 0: + # Store to next iteration + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + self.prevDescriptors = copy.copy(descriptors) + + return H + + for m, n in knnMatches: + if m.distance < 0.9 * n.distance: + prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt + currKeyPointLocation = keypoints[m.trainIdx].pt + + spatialDistance = (prevKeyPointLocation[0] - currKeyPointLocation[0], + prevKeyPointLocation[1] - currKeyPointLocation[1]) + + if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and \ + (np.abs(spatialDistance[1]) < maxSpatialDistance[1]): + spatialDistances.append(spatialDistance) + matches.append(m) + + meanSpatialDistances = np.mean(spatialDistances, 0) + stdSpatialDistances = np.std(spatialDistances, 0) + + inliers = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances + + goodMatches = [] + prevPoints = [] + currPoints = [] + for i in range(len(matches)): + if inliers[i, 0] and inliers[i, 1]: + goodMatches.append(matches[i]) + prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt) + currPoints.append(keypoints[matches[i].trainIdx].pt) + + prevPoints = np.array(prevPoints) + currPoints = np.array(currPoints) + + # Draw the keypoint matches on the output image + # if False: + # import matplotlib.pyplot as plt + # matches_img = np.hstack((self.prevFrame, frame)) + # matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR) + # W = np.size(self.prevFrame, 1) + # for m in goodMatches: + # prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_) + # curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_) + # curr_pt[0] += W + # color = np.random.randint(0, 255, 3) + # color = (int(color[0]), int(color[1]), int(color[2])) + # + # matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA) + # matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1) + # matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1) + # + # plt.figure() + # plt.imshow(matches_img) + # plt.show() + + # Find rigid matrix + if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)): + H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) + + # Handle downscale + if self.downscale > 1.0: + H[0, 2] *= self.downscale + H[1, 2] *= self.downscale + else: + LOGGER.warning('WARNING: not enough matching points') + + # Store to next iteration + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + self.prevDescriptors = copy.copy(descriptors) + + return H + + def applySparseOptFlow(self, raw_frame, detections=None): + """Initialize.""" + # t0 = time.time() + height, width, _ = raw_frame.shape + frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) + H = np.eye(2, 3) + + # Downscale image + if self.downscale > 1.0: + # frame = cv2.GaussianBlur(frame, (3, 3), 1.5) + frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) + + # Find the keypoints + keypoints = cv2.goodFeaturesToTrack(frame, mask=None, **self.feature_params) + + # Handle first frame + if not self.initializedFirstFrame: + # Initialize data + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + + # Initialization done + self.initializedFirstFrame = True + + return H + + # Find correspondences + matchedKeypoints, status, err = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None) + + # Leave good correspondences only + prevPoints = [] + currPoints = [] + + for i in range(len(status)): + if status[i]: + prevPoints.append(self.prevKeyPoints[i]) + currPoints.append(matchedKeypoints[i]) + + prevPoints = np.array(prevPoints) + currPoints = np.array(currPoints) + + # Find rigid matrix + if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)): + H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) + + # Handle downscale + if self.downscale > 1.0: + H[0, 2] *= self.downscale + H[1, 2] *= self.downscale + else: + LOGGER.warning('WARNING: not enough matching points') + + # Store to next iteration + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + + # gmc_line = str(1000 * (time.time() - t0)) + "\t" + str(H[0, 0]) + "\t" + str(H[0, 1]) + "\t" + str( + # H[0, 2]) + "\t" + str(H[1, 0]) + "\t" + str(H[1, 1]) + "\t" + str(H[1, 2]) + "\n" + # self.gmc_file.write(gmc_line) + + return H + + def applyFile(self, raw_frame, detections=None): + """Return the homography matrix based on the GCPs in the next line of the input GMC file.""" + line = self.gmcFile.readline() + tokens = line.split('\t') + H = np.eye(2, 3, dtype=np.float_) + H[0, 0] = float(tokens[1]) + H[0, 1] = float(tokens[2]) + H[0, 2] = float(tokens[3]) + H[1, 0] = float(tokens[4]) + H[1, 1] = float(tokens[5]) + H[1, 2] = float(tokens[6]) + + return H diff --git a/ultralytics/tracker/utils/kalman_filter.py b/ultralytics/tracker/utils/kalman_filter.py new file mode 100644 index 0000000..a0ee498 --- /dev/null +++ b/ultralytics/tracker/utils/kalman_filter.py @@ -0,0 +1,462 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import numpy as np +import scipy.linalg + +# Table for the 0.95 quantile of the chi-square distribution with N degrees of freedom (contains values for N=1, ..., 9) +# Taken from MATLAB/Octave's chi2inv function and used as Mahalanobis gating threshold. +chi2inv95 = {1: 3.8415, 2: 5.9915, 3: 7.8147, 4: 9.4877, 5: 11.070, 6: 12.592, 7: 14.067, 8: 15.507, 9: 16.919} + + +class KalmanFilterXYAH: + """ + For bytetrack + A simple Kalman filter for tracking bounding boxes in image space. + + The 8-dimensional state space + + x, y, a, h, vx, vy, va, vh + + contains the bounding box center position (x, y), aspect ratio a, height h, + and their respective velocities. + + Object motion follows a constant velocity model. The bounding box location + (x, y, a, h) is taken as direct observation of the state space (linear + observation model). + + """ + + def __init__(self): + """Initialize Kalman filter model matrices with motion and observation uncertainty weights.""" + ndim, dt = 4, 1. + + # Create Kalman filter model matrices. + self._motion_mat = np.eye(2 * ndim, 2 * ndim) + for i in range(ndim): + self._motion_mat[i, ndim + i] = dt + self._update_mat = np.eye(ndim, 2 * ndim) + + # Motion and observation uncertainty are chosen relative to the current + # state estimate. These weights control the amount of uncertainty in + # the model. This is a bit hacky. + self._std_weight_position = 1. / 20 + self._std_weight_velocity = 1. / 160 + + def initiate(self, measurement): + """Create track from unassociated measurement. + + Parameters + ---------- + measurement : ndarray + Bounding box coordinates (x, y, a, h) with center position (x, y), + aspect ratio a, and height h. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector (8 dimensional) and covariance matrix (8x8 + dimensional) of the new track. Unobserved velocities are initialized + to 0 mean. + + """ + mean_pos = measurement + mean_vel = np.zeros_like(mean_pos) + mean = np.r_[mean_pos, mean_vel] + + std = [ + 2 * self._std_weight_position * measurement[3], 2 * self._std_weight_position * measurement[3], 1e-2, + 2 * self._std_weight_position * measurement[3], 10 * self._std_weight_velocity * measurement[3], + 10 * self._std_weight_velocity * measurement[3], 1e-5, 10 * self._std_weight_velocity * measurement[3]] + covariance = np.diag(np.square(std)) + return mean, covariance + + def predict(self, mean, covariance): + """Run Kalman filter prediction step. + + Parameters + ---------- + mean : ndarray + The 8 dimensional mean vector of the object state at the previous + time step. + covariance : ndarray + The 8x8 dimensional covariance matrix of the object state at the + previous time step. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + + """ + std_pos = [ + self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-2, + self._std_weight_position * mean[3]] + std_vel = [ + self._std_weight_velocity * mean[3], self._std_weight_velocity * mean[3], 1e-5, + self._std_weight_velocity * mean[3]] + motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) + + # mean = np.dot(self._motion_mat, mean) + mean = np.dot(mean, self._motion_mat.T) + covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov + + return mean, covariance + + def project(self, mean, covariance): + """Project state distribution to measurement space. + + Parameters + ---------- + mean : ndarray + The state's mean vector (8 dimensional array). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + + Returns + ------- + (ndarray, ndarray) + Returns the projected mean and covariance matrix of the given state + estimate. + + """ + std = [ + self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-1, + self._std_weight_position * mean[3]] + innovation_cov = np.diag(np.square(std)) + + mean = np.dot(self._update_mat, mean) + covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T)) + return mean, covariance + innovation_cov + + def multi_predict(self, mean, covariance): + """Run Kalman filter prediction step (Vectorized version). + Parameters + ---------- + mean : ndarray + The Nx8 dimensional mean matrix of the object states at the previous + time step. + covariance : ndarray + The Nx8x8 dimensional covariance matrix of the object states at the + previous time step. + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + """ + std_pos = [ + self._std_weight_position * mean[:, 3], self._std_weight_position * mean[:, 3], + 1e-2 * np.ones_like(mean[:, 3]), self._std_weight_position * mean[:, 3]] + std_vel = [ + self._std_weight_velocity * mean[:, 3], self._std_weight_velocity * mean[:, 3], + 1e-5 * np.ones_like(mean[:, 3]), self._std_weight_velocity * mean[:, 3]] + sqr = np.square(np.r_[std_pos, std_vel]).T + + motion_cov = [np.diag(sqr[i]) for i in range(len(mean))] + motion_cov = np.asarray(motion_cov) + + mean = np.dot(mean, self._motion_mat.T) + left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) + covariance = np.dot(left, self._motion_mat.T) + motion_cov + + return mean, covariance + + def update(self, mean, covariance, measurement): + """Run Kalman filter correction step. + + Parameters + ---------- + mean : ndarray + The predicted state's mean vector (8 dimensional). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + measurement : ndarray + The 4 dimensional measurement vector (x, y, a, h), where (x, y) + is the center position, a the aspect ratio, and h the height of the + bounding box. + + Returns + ------- + (ndarray, ndarray) + Returns the measurement-corrected state distribution. + + """ + projected_mean, projected_cov = self.project(mean, covariance) + + chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False) + kalman_gain = scipy.linalg.cho_solve((chol_factor, lower), + np.dot(covariance, self._update_mat.T).T, + check_finite=False).T + innovation = measurement - projected_mean + + new_mean = mean + np.dot(innovation, kalman_gain.T) + new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T)) + return new_mean, new_covariance + + def gating_distance(self, mean, covariance, measurements, only_position=False, metric='maha'): + """Compute gating distance between state distribution and measurements. + A suitable distance threshold can be obtained from `chi2inv95`. If + `only_position` is False, the chi-square distribution has 4 degrees of + freedom, otherwise 2. + Parameters + ---------- + mean : ndarray + Mean vector over the state distribution (8 dimensional). + covariance : ndarray + Covariance of the state distribution (8x8 dimensional). + measurements : ndarray + An Nx4 dimensional matrix of N measurements, each in + format (x, y, a, h) where (x, y) is the bounding box center + position, a the aspect ratio, and h the height. + only_position : Optional[bool] + If True, distance computation is done with respect to the bounding + box center position only. + Returns + ------- + ndarray + Returns an array of length N, where the i-th element contains the + squared Mahalanobis distance between (mean, covariance) and + `measurements[i]`. + """ + mean, covariance = self.project(mean, covariance) + if only_position: + mean, covariance = mean[:2], covariance[:2, :2] + measurements = measurements[:, :2] + + d = measurements - mean + if metric == 'gaussian': + return np.sum(d * d, axis=1) + elif metric == 'maha': + cholesky_factor = np.linalg.cholesky(covariance) + z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True) + return np.sum(z * z, axis=0) # square maha + else: + raise ValueError('invalid distance metric') + + +class KalmanFilterXYWH: + """ + For BoT-SORT + A simple Kalman filter for tracking bounding boxes in image space. + + The 8-dimensional state space + + x, y, w, h, vx, vy, vw, vh + + contains the bounding box center position (x, y), width w, height h, + and their respective velocities. + + Object motion follows a constant velocity model. The bounding box location + (x, y, w, h) is taken as direct observation of the state space (linear + observation model). + + """ + + def __init__(self): + """Initialize Kalman filter model matrices with motion and observation uncertainties.""" + ndim, dt = 4, 1. + + # Create Kalman filter model matrices. + self._motion_mat = np.eye(2 * ndim, 2 * ndim) + for i in range(ndim): + self._motion_mat[i, ndim + i] = dt + self._update_mat = np.eye(ndim, 2 * ndim) + + # Motion and observation uncertainty are chosen relative to the current + # state estimate. These weights control the amount of uncertainty in + # the model. This is a bit hacky. + self._std_weight_position = 1. / 20 + self._std_weight_velocity = 1. / 160 + + def initiate(self, measurement): + """Create track from unassociated measurement. + + Parameters + ---------- + measurement : ndarray + Bounding box coordinates (x, y, w, h) with center position (x, y), + width w, and height h. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector (8 dimensional) and covariance matrix (8x8 + dimensional) of the new track. Unobserved velocities are initialized + to 0 mean. + + """ + mean_pos = measurement + mean_vel = np.zeros_like(mean_pos) + mean = np.r_[mean_pos, mean_vel] + + std = [ + 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3], + 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3], + 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3], + 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3]] + covariance = np.diag(np.square(std)) + return mean, covariance + + def predict(self, mean, covariance): + """Run Kalman filter prediction step. + + Parameters + ---------- + mean : ndarray + The 8 dimensional mean vector of the object state at the previous + time step. + covariance : ndarray + The 8x8 dimensional covariance matrix of the object state at the + previous time step. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + + """ + std_pos = [ + self._std_weight_position * mean[2], self._std_weight_position * mean[3], + self._std_weight_position * mean[2], self._std_weight_position * mean[3]] + std_vel = [ + self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3], + self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3]] + motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) + + mean = np.dot(mean, self._motion_mat.T) + covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov + + return mean, covariance + + def project(self, mean, covariance): + """Project state distribution to measurement space. + + Parameters + ---------- + mean : ndarray + The state's mean vector (8 dimensional array). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + + Returns + ------- + (ndarray, ndarray) + Returns the projected mean and covariance matrix of the given state + estimate. + + """ + std = [ + self._std_weight_position * mean[2], self._std_weight_position * mean[3], + self._std_weight_position * mean[2], self._std_weight_position * mean[3]] + innovation_cov = np.diag(np.square(std)) + + mean = np.dot(self._update_mat, mean) + covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T)) + return mean, covariance + innovation_cov + + def multi_predict(self, mean, covariance): + """Run Kalman filter prediction step (Vectorized version). + Parameters + ---------- + mean : ndarray + The Nx8 dimensional mean matrix of the object states at the previous + time step. + covariance : ndarray + The Nx8x8 dimensional covariance matrix of the object states at the + previous time step. + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + """ + std_pos = [ + self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3], + self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3]] + std_vel = [ + self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3], + self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3]] + sqr = np.square(np.r_[std_pos, std_vel]).T + + motion_cov = [np.diag(sqr[i]) for i in range(len(mean))] + motion_cov = np.asarray(motion_cov) + + mean = np.dot(mean, self._motion_mat.T) + left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) + covariance = np.dot(left, self._motion_mat.T) + motion_cov + + return mean, covariance + + def update(self, mean, covariance, measurement): + """Run Kalman filter correction step. + + Parameters + ---------- + mean : ndarray + The predicted state's mean vector (8 dimensional). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + measurement : ndarray + The 4 dimensional measurement vector (x, y, w, h), where (x, y) + is the center position, w the width, and h the height of the + bounding box. + + Returns + ------- + (ndarray, ndarray) + Returns the measurement-corrected state distribution. + + """ + projected_mean, projected_cov = self.project(mean, covariance) + + chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False) + kalman_gain = scipy.linalg.cho_solve((chol_factor, lower), + np.dot(covariance, self._update_mat.T).T, + check_finite=False).T + innovation = measurement - projected_mean + + new_mean = mean + np.dot(innovation, kalman_gain.T) + new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T)) + return new_mean, new_covariance + + def gating_distance(self, mean, covariance, measurements, only_position=False, metric='maha'): + """Compute gating distance between state distribution and measurements. + A suitable distance threshold can be obtained from `chi2inv95`. If + `only_position` is False, the chi-square distribution has 4 degrees of + freedom, otherwise 2. + Parameters + ---------- + mean : ndarray + Mean vector over the state distribution (8 dimensional). + covariance : ndarray + Covariance of the state distribution (8x8 dimensional). + measurements : ndarray + An Nx4 dimensional matrix of N measurements, each in + format (x, y, a, h) where (x, y) is the bounding box center + position, a the aspect ratio, and h the height. + only_position : Optional[bool] + If True, distance computation is done with respect to the bounding + box center position only. + Returns + ------- + ndarray + Returns an array of length N, where the i-th element contains the + squared Mahalanobis distance between (mean, covariance) and + `measurements[i]`. + """ + mean, covariance = self.project(mean, covariance) + if only_position: + mean, covariance = mean[:2], covariance[:2, :2] + measurements = measurements[:, :2] + + d = measurements - mean + if metric == 'gaussian': + return np.sum(d * d, axis=1) + elif metric == 'maha': + cholesky_factor = np.linalg.cholesky(covariance) + z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True) + return np.sum(z * z, axis=0) # square maha + else: + raise ValueError('invalid distance metric') diff --git a/ultralytics/tracker/utils/matching.py b/ultralytics/tracker/utils/matching.py new file mode 100644 index 0000000..0b22b3d --- /dev/null +++ b/ultralytics/tracker/utils/matching.py @@ -0,0 +1,229 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import numpy as np +import scipy +from scipy.spatial.distance import cdist + +from .kalman_filter import chi2inv95 + +try: + import lap # for linear_assignment + + assert lap.__version__ # verify package is not directory +except (ImportError, AssertionError, AttributeError): + from ultralytics.yolo.utils.checks import check_requirements + + check_requirements('lapx>=0.5.2') # update to lap package from https://github.com/rathaROG/lapx + import lap + + +def merge_matches(m1, m2, shape): + """Merge two sets of matches and return matched and unmatched indices.""" + O, P, Q = shape + m1 = np.asarray(m1) + m2 = np.asarray(m2) + + M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P)) + M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q)) + + mask = M1 * M2 + match = mask.nonzero() + match = list(zip(match[0], match[1])) + unmatched_O = tuple(set(range(O)) - {i for i, j in match}) + unmatched_Q = tuple(set(range(Q)) - {j for i, j in match}) + + return match, unmatched_O, unmatched_Q + + +def _indices_to_matches(cost_matrix, indices, thresh): + """_indices_to_matches: Return matched and unmatched indices given a cost matrix, indices, and a threshold.""" + matched_cost = cost_matrix[tuple(zip(*indices))] + matched_mask = (matched_cost <= thresh) + + matches = indices[matched_mask] + unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0])) + unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1])) + + return matches, unmatched_a, unmatched_b + + +def linear_assignment(cost_matrix, thresh, use_lap=True): + """Linear assignment implementations with scipy and lap.lapjv.""" + if cost_matrix.size == 0: + return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1])) + + if use_lap: + _, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh) + matches = [[ix, mx] for ix, mx in enumerate(x) if mx >= 0] + unmatched_a = np.where(x < 0)[0] + unmatched_b = np.where(y < 0)[0] + else: + # Scipy linear sum assignment is NOT working correctly, DO NOT USE + y, x = scipy.optimize.linear_sum_assignment(cost_matrix) # row y, col x + matches = np.asarray([[i, x] for i, x in enumerate(x) if cost_matrix[i, x] <= thresh]) + unmatched = np.ones(cost_matrix.shape) + for i, xi in matches: + unmatched[i, xi] = 0.0 + unmatched_a = np.where(unmatched.all(1))[0] + unmatched_b = np.where(unmatched.all(0))[0] + + return matches, unmatched_a, unmatched_b + + +def ious(atlbrs, btlbrs): + """ + Compute cost based on IoU + :type atlbrs: list[tlbr] | np.ndarray + :type atlbrs: list[tlbr] | np.ndarray + + :rtype ious np.ndarray + """ + ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32) + if ious.size == 0: + return ious + + ious = bbox_ious(np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32)) + return ious + + +def iou_distance(atracks, btracks): + """ + Compute cost based on IoU + :type atracks: list[STrack] + :type btracks: list[STrack] + + :rtype cost_matrix np.ndarray + """ + + if (len(atracks) > 0 and isinstance(atracks[0], np.ndarray)) \ + or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): + atlbrs = atracks + btlbrs = btracks + else: + atlbrs = [track.tlbr for track in atracks] + btlbrs = [track.tlbr for track in btracks] + _ious = ious(atlbrs, btlbrs) + return 1 - _ious # cost matrix + + +def v_iou_distance(atracks, btracks): + """ + Compute cost based on IoU + :type atracks: list[STrack] + :type btracks: list[STrack] + + :rtype cost_matrix np.ndarray + """ + + if (len(atracks) > 0 and isinstance(atracks[0], np.ndarray)) \ + or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): + atlbrs = atracks + btlbrs = btracks + else: + atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks] + btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks] + _ious = ious(atlbrs, btlbrs) + return 1 - _ious # cost matrix + + +def embedding_distance(tracks, detections, metric='cosine'): + """ + :param tracks: list[STrack] + :param detections: list[BaseTrack] + :param metric: + :return: cost_matrix np.ndarray + """ + + cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32) + if cost_matrix.size == 0: + return cost_matrix + det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32) + # for i, track in enumerate(tracks): + # cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric)) + track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32) + cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Normalized features + return cost_matrix + + +def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False): + """Apply gating to the cost matrix based on predicted tracks and detected objects.""" + if cost_matrix.size == 0: + return cost_matrix + gating_dim = 2 if only_position else 4 + gating_threshold = chi2inv95[gating_dim] + measurements = np.asarray([det.to_xyah() for det in detections]) + for row, track in enumerate(tracks): + gating_distance = kf.gating_distance(track.mean, track.covariance, measurements, only_position) + cost_matrix[row, gating_distance > gating_threshold] = np.inf + return cost_matrix + + +def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98): + """Fuse motion between tracks and detections with gating and Kalman filtering.""" + if cost_matrix.size == 0: + return cost_matrix + gating_dim = 2 if only_position else 4 + gating_threshold = chi2inv95[gating_dim] + measurements = np.asarray([det.to_xyah() for det in detections]) + for row, track in enumerate(tracks): + gating_distance = kf.gating_distance(track.mean, track.covariance, measurements, only_position, metric='maha') + cost_matrix[row, gating_distance > gating_threshold] = np.inf + cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance + return cost_matrix + + +def fuse_iou(cost_matrix, tracks, detections): + """Fuses ReID and IoU similarity matrices to yield a cost matrix for object tracking.""" + if cost_matrix.size == 0: + return cost_matrix + reid_sim = 1 - cost_matrix + iou_dist = iou_distance(tracks, detections) + iou_sim = 1 - iou_dist + fuse_sim = reid_sim * (1 + iou_sim) / 2 + # det_scores = np.array([det.score for det in detections]) + # det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) + return 1 - fuse_sim # fuse cost + + +def fuse_score(cost_matrix, detections): + """Fuses cost matrix with detection scores to produce a single similarity matrix.""" + if cost_matrix.size == 0: + return cost_matrix + iou_sim = 1 - cost_matrix + det_scores = np.array([det.score for det in detections]) + det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) + fuse_sim = iou_sim * det_scores + return 1 - fuse_sim # fuse_cost + + +def bbox_ious(box1, box2, eps=1e-7): + """ + Calculate the Intersection over Union (IoU) between pairs of bounding boxes. + + Args: + box1 (np.array): A numpy array of shape (n, 4) representing 'n' bounding boxes. + Each row is in the format (x1, y1, x2, y2). + box2 (np.array): A numpy array of shape (m, 4) representing 'm' bounding boxes. + Each row is in the format (x1, y1, x2, y2). + eps (float, optional): A small constant to prevent division by zero. Defaults to 1e-7. + + Returns: + (np.array): A numpy array of shape (n, m) representing the IoU scores for each pair + of bounding boxes from box1 and box2. + + Note: + The bounding box coordinates are expected to be in the format (x1, y1, x2, y2). + """ + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1.T + b2_x1, b2_y1, b2_x2, b2_y2 = box2.T + + # Intersection area + inter_area = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * \ + (np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1)).clip(0) + + # box2 area + box1_area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1) + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + return inter_area / (box2_area + box1_area[:, None] - inter_area + eps) diff --git a/ultralytics/vit/__init__.py b/ultralytics/vit/__init__.py new file mode 100644 index 0000000..8e96f91 --- /dev/null +++ b/ultralytics/vit/__init__.py @@ -0,0 +1,6 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .rtdetr import RTDETR +from .sam import SAM + +__all__ = 'RTDETR', 'SAM' # allow simpler import diff --git a/ultralytics/vit/rtdetr/__init__.py b/ultralytics/vit/rtdetr/__init__.py new file mode 100644 index 0000000..4d12115 --- /dev/null +++ b/ultralytics/vit/rtdetr/__init__.py @@ -0,0 +1,7 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .model import RTDETR +from .predict import RTDETRPredictor +from .val import RTDETRValidator + +__all__ = 'RTDETRPredictor', 'RTDETRValidator', 'RTDETR' diff --git a/ultralytics/vit/rtdetr/model.py b/ultralytics/vit/rtdetr/model.py new file mode 100644 index 0000000..259c7c9 --- /dev/null +++ b/ultralytics/vit/rtdetr/model.py @@ -0,0 +1,173 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +RT-DETR model interface +""" + +from pathlib import Path + +import torch.nn as nn + +from ultralytics.nn.tasks import RTDETRDetectionModel, attempt_load_one_weight, yaml_model_load +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.engine.exporter import Exporter +from ultralytics.yolo.utils import DEFAULT_CFG, DEFAULT_CFG_DICT, LOGGER, RANK, ROOT, is_git_dir +from ultralytics.yolo.utils.checks import check_imgsz +from ultralytics.yolo.utils.torch_utils import model_info, smart_inference_mode + +from .predict import RTDETRPredictor +from .train import RTDETRTrainer +from .val import RTDETRValidator + + +class RTDETR: + + def __init__(self, model='rtdetr-l.pt') -> None: + if model and not model.endswith('.pt') and not model.endswith('.yaml'): + raise NotImplementedError('RT-DETR only supports creating from pt file or yaml file.') + # Load or create new YOLO model + self.predictor = None + self.ckpt = None + suffix = Path(model).suffix + if suffix == '.yaml': + self._new(model) + else: + self._load(model) + + def _new(self, cfg: str, verbose=True): + cfg_dict = yaml_model_load(cfg) + self.cfg = cfg + self.task = 'detect' + self.model = RTDETRDetectionModel(cfg_dict, verbose=verbose) # build model + + # Below added to allow export from YAMLs + self.model.args = DEFAULT_CFG_DICT # attach args to model + self.model.task = self.task + + @smart_inference_mode() + def _load(self, weights: str): + self.model, self.ckpt = attempt_load_one_weight(weights) + self.model.args = DEFAULT_CFG_DICT # attach args to model + self.task = self.model.args['task'] + + @smart_inference_mode() + def load(self, weights='yolov8n.pt'): + """ + Transfers parameters with matching names and shapes from 'weights' to model. + """ + if isinstance(weights, (str, Path)): + weights, self.ckpt = attempt_load_one_weight(weights) + self.model.load(weights) + return self + + @smart_inference_mode() + def predict(self, source=None, stream=False, **kwargs): + """ + Perform prediction using the YOLO model. + + Args: + source (str | int | PIL | np.ndarray): The source of the image to make predictions on. + Accepts all source types accepted by the YOLO model. + stream (bool): Whether to stream the predictions or not. Defaults to False. + **kwargs : Additional keyword arguments passed to the predictor. + Check the 'configuration' section in the documentation for all available options. + + Returns: + (List[ultralytics.yolo.engine.results.Results]): The prediction results. + """ + if source is None: + source = ROOT / 'assets' if is_git_dir() else 'https://ultralytics.com/images/bus.jpg' + LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using 'source={source}'.") + overrides = dict(conf=0.25, task='detect', mode='predict') + overrides.update(kwargs) # prefer kwargs + if not self.predictor: + self.predictor = RTDETRPredictor(overrides=overrides) + self.predictor.setup_model(model=self.model) + else: # only update args if predictor is already setup + self.predictor.args = get_cfg(self.predictor.args, overrides) + return self.predictor(source, stream=stream) + + def train(self, **kwargs): + """ + Trains the model on a given dataset. + + Args: + **kwargs (Any): Any number of arguments representing the training configuration. + """ + overrides = dict(task='detect', mode='train') + overrides.update(kwargs) + overrides['deterministic'] = False + if not overrides.get('data'): + raise AttributeError("Dataset required but missing, i.e. pass 'data=coco128.yaml'") + if overrides.get('resume'): + overrides['resume'] = self.ckpt_path + self.task = overrides.get('task') or self.task + self.trainer = RTDETRTrainer(overrides=overrides) + if not overrides.get('resume'): # manually set model only if not resuming + self.trainer.model = self.trainer.get_model(weights=self.model if self.ckpt else None, cfg=self.model.yaml) + self.model = self.trainer.model + self.trainer.train() + # Update model and cfg after training + if RANK in (-1, 0): + self.model, _ = attempt_load_one_weight(str(self.trainer.best)) + self.overrides = self.model.args + self.metrics = getattr(self.trainer.validator, 'metrics', None) # TODO: no metrics returned by DDP + + def val(self, **kwargs): + """Run validation given dataset.""" + overrides = dict(task='detect', mode='val') + overrides.update(kwargs) # prefer kwargs + args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides) + args.imgsz = check_imgsz(args.imgsz, max_dim=1) + validator = RTDETRValidator(args=args) + validator(model=self.model) + self.metrics = validator.metrics + return validator.metrics + + def info(self, verbose=True): + """Get model info""" + return model_info(self.model, verbose=verbose) + + def _check_is_pytorch_model(self): + """ + Raises TypeError is model is not a PyTorch model + """ + pt_str = isinstance(self.model, (str, Path)) and Path(self.model).suffix == '.pt' + pt_module = isinstance(self.model, nn.Module) + if not (pt_module or pt_str): + raise TypeError(f"model='{self.model}' must be a *.pt PyTorch model, but is a different type. " + f'PyTorch models can be used to train, val, predict and export, i.e. ' + f"'yolo export model=yolov8n.pt', but exported formats like ONNX, TensorRT etc. only " + f"support 'predict' and 'val' modes, i.e. 'yolo predict model=yolov8n.onnx'.") + + def fuse(self): + """Fuse PyTorch Conv2d and BatchNorm2d layers.""" + self._check_is_pytorch_model() + self.model.fuse() + + @smart_inference_mode() + def export(self, **kwargs): + """ + Export model. + + Args: + **kwargs : Any other args accepted by the predictors. To see all args check 'configuration' section in docs + """ + overrides = dict(task='detect') + overrides.update(kwargs) + overrides['mode'] = 'export' + args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides) + args.task = self.task + if args.imgsz == DEFAULT_CFG.imgsz: + args.imgsz = self.model.args['imgsz'] # use trained imgsz unless custom value is passed + if args.batch == DEFAULT_CFG.batch: + args.batch = 1 # default to 1 if not modified + return Exporter(overrides=args)(model=self.model) + + def __call__(self, source=None, stream=False, **kwargs): + """Calls the 'predict' function with given arguments to perform object detection.""" + return self.predict(source, stream, **kwargs) + + def __getattr__(self, attr): + """Raises error if object has no requested attribute.""" + name = self.__class__.__name__ + raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") diff --git a/ultralytics/vit/rtdetr/predict.py b/ultralytics/vit/rtdetr/predict.py new file mode 100644 index 0000000..77c02c2 --- /dev/null +++ b/ultralytics/vit/rtdetr/predict.py @@ -0,0 +1,44 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch + +from ultralytics.yolo.data.augment import LetterBox +from ultralytics.yolo.engine.predictor import BasePredictor +from ultralytics.yolo.engine.results import Results +from ultralytics.yolo.utils import ops + + +class RTDETRPredictor(BasePredictor): + + def postprocess(self, preds, img, orig_imgs): + """Postprocess predictions and returns a list of Results objects.""" + nd = preds[0].shape[-1] + bboxes, scores = preds[0].split((4, nd - 4), dim=-1) + results = [] + for i, bbox in enumerate(bboxes): # (300, 4) + bbox = ops.xywh2xyxy(bbox) + score, cls = scores[i].max(-1, keepdim=True) # (300, 1) + idx = score.squeeze(-1) > self.args.conf # (300, ) + if self.args.classes is not None: + idx = (cls == torch.tensor(self.args.classes, device=cls.device)).any(1) & idx + pred = torch.cat([bbox, score, cls], dim=-1)[idx] # filter + orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs + oh, ow = orig_img.shape[:2] + if not isinstance(orig_imgs, torch.Tensor): + pred[..., [0, 2]] *= ow + pred[..., [1, 3]] *= oh + path = self.batch[0] + img_path = path[i] if isinstance(path, list) else path + results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred)) + return results + + def pre_transform(self, im): + """Pre-transform input image before inference. + + Args: + im (List(np.ndarray)): (N, 3, h, w) for tensor, [(h, w, 3) x N] for list. + + Return: A list of transformed imgs. + """ + # The size must be square(640) and scaleFilled. + return [LetterBox(self.imgsz, auto=False, scaleFill=True)(image=x) for x in im] diff --git a/ultralytics/vit/rtdetr/train.py b/ultralytics/vit/rtdetr/train.py new file mode 100644 index 0000000..54eeaf4 --- /dev/null +++ b/ultralytics/vit/rtdetr/train.py @@ -0,0 +1,80 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from copy import copy + +import torch + +from ultralytics.nn.tasks import RTDETRDetectionModel +from ultralytics.yolo.utils import DEFAULT_CFG, RANK, colorstr +from ultralytics.yolo.v8.detect import DetectionTrainer + +from .val import RTDETRDataset, RTDETRValidator + + +class RTDETRTrainer(DetectionTrainer): + + def get_model(self, cfg=None, weights=None, verbose=True): + """Return a YOLO detection model.""" + model = RTDETRDetectionModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1) + if weights: + model.load(weights) + return model + + def build_dataset(self, img_path, mode='val', batch=None): + """Build RTDETR Dataset + + Args: + img_path (str): Path to the folder containing images. + mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode. + batch (int, optional): Size of batches, this is for `rect`. Defaults to None. + """ + return RTDETRDataset( + img_path=img_path, + imgsz=self.args.imgsz, + batch_size=batch, + augment=mode == 'train', # no augmentation + hyp=self.args, + rect=False, # no rect + cache=self.args.cache or None, + prefix=colorstr(f'{mode}: '), + data=self.data) + + def get_validator(self): + """Returns a DetectionValidator for RTDETR model validation.""" + self.loss_names = 'giou_loss', 'cls_loss', 'l1_loss' + return RTDETRValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args)) + + def preprocess_batch(self, batch): + """Preprocesses a batch of images by scaling and converting to float.""" + batch = super().preprocess_batch(batch) + bs = len(batch['img']) + batch_idx = batch['batch_idx'] + gt_bbox, gt_class = [], [] + for i in range(bs): + gt_bbox.append(batch['bboxes'][batch_idx == i].to(batch_idx.device)) + gt_class.append(batch['cls'][batch_idx == i].to(device=batch_idx.device, dtype=torch.long)) + return batch + + +def train(cfg=DEFAULT_CFG, use_python=False): + """Train and optimize RTDETR model given training data and device.""" + model = 'rtdetr-l.yaml' + data = cfg.data or 'coco128.yaml' # or yolo.ClassificationDataset("mnist") + device = cfg.device if cfg.device is not None else '' + + # NOTE: F.grid_sample which is in rt-detr does not support deterministic=True + # NOTE: amp training causes nan outputs and end with error while doing bipartite graph matching + args = dict(model=model, + data=data, + device=device, + imgsz=640, + exist_ok=True, + batch=4, + deterministic=False, + amp=False) + trainer = RTDETRTrainer(overrides=args) + trainer.train() + + +if __name__ == '__main__': + train() diff --git a/ultralytics/vit/rtdetr/val.py b/ultralytics/vit/rtdetr/val.py new file mode 100644 index 0000000..cfee292 --- /dev/null +++ b/ultralytics/vit/rtdetr/val.py @@ -0,0 +1,151 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from pathlib import Path + +import cv2 +import numpy as np +import torch + +from ultralytics.yolo.data import YOLODataset +from ultralytics.yolo.data.augment import Compose, Format, v8_transforms +from ultralytics.yolo.utils import colorstr, ops +from ultralytics.yolo.v8.detect import DetectionValidator + +__all__ = 'RTDETRValidator', # tuple or list + + +# TODO: Temporarily, RT-DETR does not need padding. +class RTDETRDataset(YOLODataset): + + def __init__(self, *args, data=None, **kwargs): + super().__init__(*args, data=data, use_segments=False, use_keypoints=False, **kwargs) + + # NOTE: add stretch version load_image for rtdetr mosaic + def load_image(self, i): + """Loads 1 image from dataset index 'i', returns (im, resized hw).""" + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + if im is None: + raise FileNotFoundError(f'Image Not Found {f}') + h0, w0 = im.shape[:2] # orig hw + im = cv2.resize(im, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR) + + # Add to buffer if training with augmentations + if self.augment: + self.ims[i], self.im_hw0[i], self.im_hw[i] = im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + self.buffer.append(i) + if len(self.buffer) >= self.max_buffer_length: + j = self.buffer.pop(0) + self.ims[j], self.im_hw0[j], self.im_hw[j] = None, None, None + + return im, (h0, w0), im.shape[:2] + + return self.ims[i], self.im_hw0[i], self.im_hw[i] + + def build_transforms(self, hyp=None): + """Temporarily, only for evaluation.""" + if self.augment: + hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0 + hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0 + transforms = v8_transforms(self, self.imgsz, hyp, stretch=True) + else: + # transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), auto=False, scaleFill=True)]) + transforms = Compose([]) + transforms.append( + Format(bbox_format='xywh', + normalize=True, + return_mask=self.use_segments, + return_keypoint=self.use_keypoints, + batch_idx=True, + mask_ratio=hyp.mask_ratio, + mask_overlap=hyp.overlap_mask)) + return transforms + + +class RTDETRValidator(DetectionValidator): + + def build_dataset(self, img_path, mode='val', batch=None): + """Build YOLO Dataset + + Args: + img_path (str): Path to the folder containing images. + mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode. + batch (int, optional): Size of batches, this is for `rect`. Defaults to None. + """ + return RTDETRDataset( + img_path=img_path, + imgsz=self.args.imgsz, + batch_size=batch, + augment=False, # no augmentation + hyp=self.args, + rect=False, # no rect + cache=self.args.cache or None, + prefix=colorstr(f'{mode}: '), + data=self.data) + + def postprocess(self, preds): + """Apply Non-maximum suppression to prediction outputs.""" + bs, _, nd = preds[0].shape + bboxes, scores = preds[0].split((4, nd - 4), dim=-1) + bboxes *= self.args.imgsz + outputs = [torch.zeros((0, 6), device=bboxes.device)] * bs + for i, bbox in enumerate(bboxes): # (300, 4) + bbox = ops.xywh2xyxy(bbox) + score, cls = scores[i].max(-1) # (300, ) + # Do not need threshold for evaluation as only got 300 boxes here. + # idx = score > self.args.conf + pred = torch.cat([bbox, score[..., None], cls[..., None]], dim=-1) # filter + # sort by confidence to correctly get internal metrics. + pred = pred[score.argsort(descending=True)] + outputs[i] = pred # [idx] + + return outputs + + def update_metrics(self, preds, batch): + """Metrics.""" + for si, pred in enumerate(preds): + idx = batch['batch_idx'] == si + cls = batch['cls'][idx] + bbox = batch['bboxes'][idx] + nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions + shape = batch['ori_shape'][si] + correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init + self.seen += 1 + + if npr == 0: + if nl: + self.stats.append((correct_bboxes, *torch.zeros((2, 0), device=self.device), cls.squeeze(-1))) + if self.args.plots: + self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1)) + continue + + # Predictions + if self.args.single_cls: + pred[:, 5] = 0 + predn = pred.clone() + predn[..., [0, 2]] *= shape[1] / self.args.imgsz # native-space pred + predn[..., [1, 3]] *= shape[0] / self.args.imgsz # native-space pred + + # Evaluate + if nl: + tbox = ops.xywh2xyxy(bbox) # target boxes + tbox[..., [0, 2]] *= shape[1] # native-space pred + tbox[..., [1, 3]] *= shape[0] # native-space pred + labelsn = torch.cat((cls, tbox), 1) # native-space labels + # NOTE: To get correct metrics, the inputs of `_process_batch` should always be float32 type. + correct_bboxes = self._process_batch(predn.float(), labelsn) + # TODO: maybe remove these `self.` arguments as they already are member variable + if self.args.plots: + self.confusion_matrix.process_batch(predn, labelsn) + self.stats.append((correct_bboxes, pred[:, 4], pred[:, 5], cls.squeeze(-1))) # (conf, pcls, tcls) + + # Save + if self.args.save_json: + self.pred_to_json(predn, batch['im_file'][si]) + if self.args.save_txt: + file = self.save_dir / 'labels' / f'{Path(batch["im_file"][si]).stem}.txt' + self.save_one_txt(predn, self.args.save_conf, shape, file) diff --git a/ultralytics/vit/sam/__init__.py b/ultralytics/vit/sam/__init__.py new file mode 100644 index 0000000..35f4efa --- /dev/null +++ b/ultralytics/vit/sam/__init__.py @@ -0,0 +1,8 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .model import SAM +from .predict import Predictor + +# from .build import build_sam + +__all__ = 'SAM', 'Predictor' # tuple or list diff --git a/ultralytics/vit/sam/amg.py b/ultralytics/vit/sam/amg.py new file mode 100644 index 0000000..29f0bcf --- /dev/null +++ b/ultralytics/vit/sam/amg.py @@ -0,0 +1,311 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import math +from copy import deepcopy +from itertools import product +from typing import Any, Dict, Generator, ItemsView, List, Tuple + +import numpy as np +import torch + + +class MaskData: + """ + A structure for storing masks and their related data in batched format. + Implements basic filtering and concatenation. + """ + + def __init__(self, **kwargs) -> None: + """Initialize a MaskData object, ensuring all values are supported types.""" + for v in kwargs.values(): + assert isinstance( + v, (list, np.ndarray, torch.Tensor)), 'MaskData only supports list, numpy arrays, and torch tensors.' + self._stats = dict(**kwargs) + + def __setitem__(self, key: str, item: Any) -> None: + """Set an item in the MaskData object, ensuring it is a supported type.""" + assert isinstance( + item, (list, np.ndarray, torch.Tensor)), 'MaskData only supports list, numpy arrays, and torch tensors.' + self._stats[key] = item + + def __delitem__(self, key: str) -> None: + """Delete an item from the MaskData object.""" + del self._stats[key] + + def __getitem__(self, key: str) -> Any: + """Get an item from the MaskData object.""" + return self._stats[key] + + def items(self) -> ItemsView[str, Any]: + """Return an ItemsView of the MaskData object.""" + return self._stats.items() + + def filter(self, keep: torch.Tensor) -> None: + """Filter the MaskData object based on the given boolean tensor.""" + for k, v in self._stats.items(): + if v is None: + self._stats[k] = None + elif isinstance(v, torch.Tensor): + self._stats[k] = v[torch.as_tensor(keep, device=v.device)] + elif isinstance(v, np.ndarray): + self._stats[k] = v[keep.detach().cpu().numpy()] + elif isinstance(v, list) and keep.dtype == torch.bool: + self._stats[k] = [a for i, a in enumerate(v) if keep[i]] + elif isinstance(v, list): + self._stats[k] = [v[i] for i in keep] + else: + raise TypeError(f'MaskData key {k} has an unsupported type {type(v)}.') + + def cat(self, new_stats: 'MaskData') -> None: + """Concatenate a new MaskData object to the current one.""" + for k, v in new_stats.items(): + if k not in self._stats or self._stats[k] is None: + self._stats[k] = deepcopy(v) + elif isinstance(v, torch.Tensor): + self._stats[k] = torch.cat([self._stats[k], v], dim=0) + elif isinstance(v, np.ndarray): + self._stats[k] = np.concatenate([self._stats[k], v], axis=0) + elif isinstance(v, list): + self._stats[k] = self._stats[k] + deepcopy(v) + else: + raise TypeError(f'MaskData key {k} has an unsupported type {type(v)}.') + + def to_numpy(self) -> None: + """Convert all torch tensors in the MaskData object to numpy arrays.""" + for k, v in self._stats.items(): + if isinstance(v, torch.Tensor): + self._stats[k] = v.detach().cpu().numpy() + + +def is_box_near_crop_edge(boxes: torch.Tensor, + crop_box: List[int], + orig_box: List[int], + atol: float = 20.0) -> torch.Tensor: + """Return a boolean tensor indicating if boxes are near the crop edge.""" + crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) + orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) + boxes = uncrop_boxes_xyxy(boxes, crop_box).float() + near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) + near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) + near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) + return torch.any(near_crop_edge, dim=1) + + +def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor: + """Convert bounding boxes from XYXY format to XYWH format.""" + box_xywh = deepcopy(box_xyxy) + box_xywh[2] = box_xywh[2] - box_xywh[0] + box_xywh[3] = box_xywh[3] - box_xywh[1] + return box_xywh + + +def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: + """Yield batches of data from the input arguments.""" + assert args and all(len(a) == len(args[0]) for a in args), 'Batched iteration must have same-size inputs.' + n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) + for b in range(n_batches): + yield [arg[b * batch_size:(b + 1) * batch_size] for arg in args] + + +def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: + """Encode masks as uncompressed RLEs in the format expected by pycocotools.""" + # Put in fortran order and flatten h,w + b, h, w = tensor.shape + tensor = tensor.permute(0, 2, 1).flatten(1) + + # Compute change indices + diff = tensor[:, 1:] ^ tensor[:, :-1] + change_indices = diff.nonzero() + + # Encode run length + out = [] + for i in range(b): + cur_idxs = change_indices[change_indices[:, 0] == i, 1] + cur_idxs = torch.cat([ + torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device), + cur_idxs + 1, + torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), ]) + btw_idxs = cur_idxs[1:] - cur_idxs[:-1] + counts = [] if tensor[i, 0] == 0 else [0] + counts.extend(btw_idxs.detach().cpu().tolist()) + out.append({'size': [h, w], 'counts': counts}) + return out + + +def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: + """Compute a binary mask from an uncompressed RLE.""" + h, w = rle['size'] + mask = np.empty(h * w, dtype=bool) + idx = 0 + parity = False + for count in rle['counts']: + mask[idx:idx + count] = parity + idx += count + parity ^= True + mask = mask.reshape(w, h) + return mask.transpose() # Put in C order + + +def area_from_rle(rle: Dict[str, Any]) -> int: + """Calculate the area of a mask from its uncompressed RLE.""" + return sum(rle['counts'][1::2]) + + +def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor: + """ + Computes the stability score for a batch of masks. The stability + score is the IoU between the binary masks obtained by thresholding + the predicted mask logits at high and low values. + """ + # One mask is always contained inside the other. + # Save memory by preventing unnecessary cast to torch.int64 + intersections = ((masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, + dtype=torch.int32)) + unions = ((masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)) + return intersections / unions + + +def build_point_grid(n_per_side: int) -> np.ndarray: + """Generate a 2D grid of evenly spaced points in the range [0,1]x[0,1].""" + offset = 1 / (2 * n_per_side) + points_one_side = np.linspace(offset, 1 - offset, n_per_side) + points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) + points_y = np.tile(points_one_side[:, None], (1, n_per_side)) + return np.stack([points_x, points_y], axis=-1).reshape(-1, 2) + + +def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> List[np.ndarray]: + """Generate point grids for all crop layers.""" + return [build_point_grid(int(n_per_side / (scale_per_layer ** i))) for i in range(n_layers + 1)] + + +def generate_crop_boxes(im_size: Tuple[int, ...], n_layers: int, + overlap_ratio: float) -> Tuple[List[List[int]], List[int]]: + """Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.""" + crop_boxes, layer_idxs = [], [] + im_h, im_w = im_size + short_side = min(im_h, im_w) + + # Original image + crop_boxes.append([0, 0, im_w, im_h]) + layer_idxs.append(0) + + def crop_len(orig_len, n_crops, overlap): + """Crops bounding boxes to the size of the input image.""" + return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops)) + + for i_layer in range(n_layers): + n_crops_per_side = 2 ** (i_layer + 1) + overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) + + crop_w = crop_len(im_w, n_crops_per_side, overlap) + crop_h = crop_len(im_h, n_crops_per_side, overlap) + + crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)] + crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)] + + # Crops in XYWH format + for x0, y0 in product(crop_box_x0, crop_box_y0): + box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)] + crop_boxes.append(box) + layer_idxs.append(i_layer + 1) + + return crop_boxes, layer_idxs + + +def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + """Uncrop bounding boxes by adding the crop box offset.""" + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device) + # Check if boxes has a channel dimension + if len(boxes.shape) == 3: + offset = offset.unsqueeze(1) + return boxes + offset + + +def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + """Uncrop points by adding the crop box offset.""" + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0]], device=points.device) + # Check if points has a channel dimension + if len(points.shape) == 3: + offset = offset.unsqueeze(1) + return points + offset + + +def uncrop_masks(masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int) -> torch.Tensor: + """Uncrop masks by padding them to the original image size.""" + x0, y0, x1, y1 = crop_box + if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h: + return masks + # Coordinate transform masks + pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0) + pad = (x0, pad_x - x0, y0, pad_y - y0) + return torch.nn.functional.pad(masks, pad, value=0) + + +def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tuple[np.ndarray, bool]: + """Remove small disconnected regions or holes in a mask, returning the mask and a modification indicator.""" + import cv2 # type: ignore + + assert mode in {'holes', 'islands'} + correct_holes = mode == 'holes' + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if not small_regions: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + # If every region is below threshold, keep largest + fill_labels = [i for i in range(n_labels) if i not in fill_labels] or [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + + +def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]: + """Encode uncompressed RLE (run-length encoding) to COCO RLE format.""" + from pycocotools import mask as mask_utils # type: ignore + + h, w = uncompressed_rle['size'] + rle = mask_utils.frPyObjects(uncompressed_rle, h, w) + rle['counts'] = rle['counts'].decode('utf-8') # Necessary to serialize with json + return rle + + +def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor: + """ + Calculates boxes in XYXY format around masks. Return [0,0,0,0] for + an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. + """ + # torch.max below raises an error on empty inputs, just skip in this case + if torch.numel(masks) == 0: + return torch.zeros(*masks.shape[:-2], 4, device=masks.device) + + # Normalize shape to CxHxW + shape = masks.shape + h, w = shape[-2:] + masks = masks.flatten(0, -3) if len(shape) > 2 else masks.unsqueeze(0) + # Get top and bottom edges + in_height, _ = torch.max(masks, dim=-1) + in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :] + bottom_edges, _ = torch.max(in_height_coords, dim=-1) + in_height_coords = in_height_coords + h * (~in_height) + top_edges, _ = torch.min(in_height_coords, dim=-1) + + # Get left and right edges + in_width, _ = torch.max(masks, dim=-2) + in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :] + right_edges, _ = torch.max(in_width_coords, dim=-1) + in_width_coords = in_width_coords + w * (~in_width) + left_edges, _ = torch.min(in_width_coords, dim=-1) + + # If the mask is empty the right edge will be to the left of the left edge. + # Replace these boxes with [0, 0, 0, 0] + empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) + out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) + out = out * (~empty_filter).unsqueeze(-1) + + # Return to original shape + return out.reshape(*shape[:-2], 4) if len(shape) > 2 else out[0] diff --git a/ultralytics/vit/sam/build.py b/ultralytics/vit/sam/build.py new file mode 100644 index 0000000..3572c2e --- /dev/null +++ b/ultralytics/vit/sam/build.py @@ -0,0 +1,157 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from functools import partial + +import torch + +from ...yolo.utils.downloads import attempt_download_asset +from .modules.decoders import MaskDecoder +from .modules.encoders import ImageEncoderViT, PromptEncoder +from .modules.sam import Sam +from .modules.tiny_encoder import TinyViT +from .modules.transformer import TwoWayTransformer + + +def build_sam_vit_h(checkpoint=None): + """Build and return a Segment Anything Model (SAM) h-size model.""" + return _build_sam( + encoder_embed_dim=1280, + encoder_depth=32, + encoder_num_heads=16, + encoder_global_attn_indexes=[7, 15, 23, 31], + checkpoint=checkpoint, + ) + + +def build_sam_vit_l(checkpoint=None): + """Build and return a Segment Anything Model (SAM) l-size model.""" + return _build_sam( + encoder_embed_dim=1024, + encoder_depth=24, + encoder_num_heads=16, + encoder_global_attn_indexes=[5, 11, 17, 23], + checkpoint=checkpoint, + ) + + +def build_sam_vit_b(checkpoint=None): + """Build and return a Segment Anything Model (SAM) b-size model.""" + return _build_sam( + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + encoder_global_attn_indexes=[2, 5, 8, 11], + checkpoint=checkpoint, + ) + + +def build_mobile_sam(checkpoint=None): + """Build and return Mobile Segment Anything Model (Mobile-SAM).""" + return _build_sam( + encoder_embed_dim=[64, 128, 160, 320], + encoder_depth=[2, 2, 6, 2], + encoder_num_heads=[2, 4, 5, 10], + encoder_global_attn_indexes=None, + mobile_sam=True, + checkpoint=checkpoint, + ) + + +def _build_sam(encoder_embed_dim, + encoder_depth, + encoder_num_heads, + encoder_global_attn_indexes, + checkpoint=None, + mobile_sam=False): + """Builds the selected SAM model architecture.""" + prompt_embed_dim = 256 + image_size = 1024 + vit_patch_size = 16 + image_embedding_size = image_size // vit_patch_size + image_encoder = (TinyViT( + img_size=1024, + in_chans=3, + num_classes=1000, + embed_dims=encoder_embed_dim, + depths=encoder_depth, + num_heads=encoder_num_heads, + window_sizes=[7, 7, 14, 7], + mlp_ratio=4.0, + drop_rate=0.0, + drop_path_rate=0.0, + use_checkpoint=False, + mbconv_expand_ratio=4.0, + local_conv_size=3, + layer_lr_decay=0.8, + ) if mobile_sam else ImageEncoderViT( + depth=encoder_depth, + embed_dim=encoder_embed_dim, + img_size=image_size, + mlp_ratio=4, + norm_layer=partial(torch.nn.LayerNorm, eps=1e-6), + num_heads=encoder_num_heads, + patch_size=vit_patch_size, + qkv_bias=True, + use_rel_pos=True, + global_attn_indexes=encoder_global_attn_indexes, + window_size=14, + out_chans=prompt_embed_dim, + )) + sam = Sam( + image_encoder=image_encoder, + prompt_encoder=PromptEncoder( + embed_dim=prompt_embed_dim, + image_embedding_size=(image_embedding_size, image_embedding_size), + input_image_size=(image_size, image_size), + mask_in_chans=16, + ), + mask_decoder=MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + ), + pixel_mean=[123.675, 116.28, 103.53], + pixel_std=[58.395, 57.12, 57.375], + ) + if checkpoint is not None: + checkpoint = attempt_download_asset(checkpoint) + with open(checkpoint, 'rb') as f: + state_dict = torch.load(f) + sam.load_state_dict(state_dict) + sam.eval() + # sam.load_state_dict(torch.load(checkpoint), strict=True) + # sam.eval() + return sam + + +sam_model_map = { + 'sam_h.pt': build_sam_vit_h, + 'sam_l.pt': build_sam_vit_l, + 'sam_b.pt': build_sam_vit_b, + 'mobile_sam.pt': build_mobile_sam, } + + +def build_sam(ckpt='sam_b.pt'): + """Build a SAM model specified by ckpt.""" + model_builder = None + for k in sam_model_map.keys(): + if ckpt.endswith(k): + model_builder = sam_model_map.get(k) + + if not model_builder: + raise FileNotFoundError(f'{ckpt} is not a supported sam model. Available models are: \n {sam_model_map.keys()}') + + return model_builder(ckpt) diff --git a/ultralytics/vit/sam/model.py b/ultralytics/vit/sam/model.py new file mode 100644 index 0000000..925328e --- /dev/null +++ b/ultralytics/vit/sam/model.py @@ -0,0 +1,59 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +SAM model interface +""" + +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.utils.torch_utils import model_info + +from .build import build_sam +from .predict import Predictor + + +class SAM: + + def __init__(self, model='sam_b.pt') -> None: + if model and not model.endswith('.pt') and not model.endswith('.pth'): + # Should raise AssertionError instead? + raise NotImplementedError('Segment anything prediction requires pre-trained checkpoint') + self.model = build_sam(model) + self.task = 'segment' # required + self.predictor = None # reuse predictor + + def predict(self, source, stream=False, bboxes=None, points=None, labels=None, **kwargs): + """Predicts and returns segmentation masks for given image or video source.""" + overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024) + overrides.update(kwargs) # prefer kwargs + if not self.predictor: + self.predictor = Predictor(overrides=overrides) + self.predictor.setup_model(model=self.model) + else: # only update args if predictor is already setup + self.predictor.args = get_cfg(self.predictor.args, overrides) + return self.predictor(source, stream=stream, bboxes=bboxes, points=points, labels=labels) + + def train(self, **kwargs): + """Function trains models but raises an error as SAM models do not support training.""" + raise NotImplementedError("SAM models don't support training") + + def val(self, **kwargs): + """Run validation given dataset.""" + raise NotImplementedError("SAM models don't support validation") + + def __call__(self, source=None, stream=False, bboxes=None, points=None, labels=None, **kwargs): + """Calls the 'predict' function with given arguments to perform object detection.""" + return self.predict(source, stream, bboxes, points, labels, **kwargs) + + def __getattr__(self, attr): + """Raises error if object has no requested attribute.""" + name = self.__class__.__name__ + raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") + + def info(self, detailed=False, verbose=True): + """ + Logs model info. + + Args: + detailed (bool): Show detailed information about model. + verbose (bool): Controls verbosity. + """ + return model_info(self.model, detailed=detailed, verbose=verbose) diff --git a/ultralytics/vit/sam/modules/__init__.py b/ultralytics/vit/sam/modules/__init__.py new file mode 100644 index 0000000..9e68dc1 --- /dev/null +++ b/ultralytics/vit/sam/modules/__init__.py @@ -0,0 +1 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license diff --git a/ultralytics/vit/sam/modules/decoders.py b/ultralytics/vit/sam/modules/decoders.py new file mode 100644 index 0000000..cadc0f0 --- /dev/null +++ b/ultralytics/vit/sam/modules/decoders.py @@ -0,0 +1,159 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from typing import List, Tuple, Type + +import torch +from torch import nn +from torch.nn import functional as F + +from ultralytics.nn.modules import LayerNorm2d + + +class MaskDecoder(nn.Module): + + def __init__( + self, + *, + transformer_dim: int, + transformer: nn.Module, + num_multimask_outputs: int = 3, + activation: Type[nn.Module] = nn.GELU, + iou_head_depth: int = 3, + iou_head_hidden_dim: int = 256, + ) -> None: + """ + Predicts masks given an image and prompt embeddings, using a transformer architecture. + + Arguments: + transformer_dim (int): the channel dimension of the transformer module + transformer (nn.Module): the transformer used to predict masks + num_multimask_outputs (int): the number of masks to predict when disambiguating masks + activation (nn.Module): the type of activation to use when upscaling masks + iou_head_depth (int): the depth of the MLP used to predict mask quality + iou_head_hidden_dim (int): the hidden dimension of the MLP used to predict mask quality + """ + super().__init__() + self.transformer_dim = transformer_dim + self.transformer = transformer + + self.num_multimask_outputs = num_multimask_outputs + + self.iou_token = nn.Embedding(1, transformer_dim) + self.num_mask_tokens = num_multimask_outputs + 1 + self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) + + self.output_upscaling = nn.Sequential( + nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), + LayerNorm2d(transformer_dim // 4), + activation(), + nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), + activation(), + ) + self.output_hypernetworks_mlps = nn.ModuleList([ + MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for _ in range(self.num_mask_tokens)]) + + self.iou_prediction_head = MLP(transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth) + + def forward( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Arguments: + image_embeddings (torch.Tensor): the embeddings from the image encoder + image_pe (torch.Tensor): positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes + dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs + multimask_output (bool): Whether to return multiple masks or a single mask. + + Returns: + torch.Tensor: batched predicted masks + torch.Tensor: batched predictions of mask quality + """ + masks, iou_pred = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + ) + + # Select the correct mask or masks for output + mask_slice = slice(1, None) if multimask_output else slice(0, 1) + masks = masks[:, mask_slice, :, :] + iou_pred = iou_pred[:, mask_slice] + + # Prepare output + return masks, iou_pred + + def predict_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predicts masks. See 'forward' for more details.""" + # Concatenate output tokens + output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) + output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) + + # Expand per-image data in batch direction to be per-mask + src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) + src = src + dense_prompt_embeddings + pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + b, c, h, w = src.shape + + # Run the transformer + hs, src = self.transformer(src, pos_src, tokens) + iou_token_out = hs[:, 0, :] + mask_tokens_out = hs[:, 1:(1 + self.num_mask_tokens), :] + + # Upscale mask embeddings and predict masks using the mask tokens + src = src.transpose(1, 2).view(b, c, h, w) + upscaled_embedding = self.output_upscaling(src) + hyper_in_list: List[torch.Tensor] = [ + self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) for i in range(self.num_mask_tokens)] + hyper_in = torch.stack(hyper_in_list, dim=1) + b, c, h, w = upscaled_embedding.shape + masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) + + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + + return masks, iou_pred + + +class MLP(nn.Module): + """ + Lightly adapted from + https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py + """ + + def __init__( + self, + input_dim: int, + hidden_dim: int, + output_dim: int, + num_layers: int, + sigmoid_output: bool = False, + ) -> None: + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + self.sigmoid_output = sigmoid_output + + def forward(self, x): + """Executes feedforward within the neural network module and applies activation.""" + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + if self.sigmoid_output: + x = torch.sigmoid(x) + return x diff --git a/ultralytics/vit/sam/modules/encoders.py b/ultralytics/vit/sam/modules/encoders.py new file mode 100644 index 0000000..0da032d --- /dev/null +++ b/ultralytics/vit/sam/modules/encoders.py @@ -0,0 +1,583 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from typing import Any, Optional, Tuple, Type + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ultralytics.nn.modules import LayerNorm2d, MLPBlock + + +# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa +class ImageEncoderViT(nn.Module): + + def __init__( + self, + img_size: int = 1024, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4.0, + out_chans: int = 256, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_abs_pos: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + global_attn_indexes: Tuple[int, ...] = (), + ) -> None: + """ + Args: + img_size (int): Input image size. + patch_size (int): Patch size. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + depth (int): Depth of ViT. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_abs_pos (bool): If True, use absolute positional embeddings. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. + global_attn_indexes (list): Indexes for blocks using global attention. + """ + super().__init__() + self.img_size = img_size + + self.patch_embed = PatchEmbed( + kernel_size=(patch_size, patch_size), + stride=(patch_size, patch_size), + in_chans=in_chans, + embed_dim=embed_dim, + ) + + self.pos_embed: Optional[nn.Parameter] = None + if use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + self.pos_embed = nn.Parameter(torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)) + + self.blocks = nn.ModuleList() + for i in range(depth): + block = Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + norm_layer=norm_layer, + act_layer=act_layer, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + window_size=window_size if i not in global_attn_indexes else 0, + input_size=(img_size // patch_size, img_size // patch_size), + ) + self.blocks.append(block) + + self.neck = nn.Sequential( + nn.Conv2d( + embed_dim, + out_chans, + kernel_size=1, + bias=False, + ), + LayerNorm2d(out_chans), + nn.Conv2d( + out_chans, + out_chans, + kernel_size=3, + padding=1, + bias=False, + ), + LayerNorm2d(out_chans), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.patch_embed(x) + if self.pos_embed is not None: + x = x + self.pos_embed + + for blk in self.blocks: + x = blk(x) + + x = self.neck(x.permute(0, 3, 1, 2)) + + return x + + +class PromptEncoder(nn.Module): + + def __init__( + self, + embed_dim: int, + image_embedding_size: Tuple[int, int], + input_image_size: Tuple[int, int], + mask_in_chans: int, + activation: Type[nn.Module] = nn.GELU, + ) -> None: + """ + Encodes prompts for input to SAM's mask decoder. + + Arguments: + embed_dim (int): The prompts' embedding dimension + image_embedding_size (tuple(int, int)): The spatial size of the + image embedding, as (H, W). + input_image_size (int): The padded size of the image as input + to the image encoder, as (H, W). + mask_in_chans (int): The number of hidden channels used for + encoding input masks. + activation (nn.Module): The activation to use when encoding + input masks. + """ + super().__init__() + self.embed_dim = embed_dim + self.input_image_size = input_image_size + self.image_embedding_size = image_embedding_size + self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) + + self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners + point_embeddings = [nn.Embedding(1, embed_dim) for _ in range(self.num_point_embeddings)] + self.point_embeddings = nn.ModuleList(point_embeddings) + self.not_a_point_embed = nn.Embedding(1, embed_dim) + + self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1]) + self.mask_downscaling = nn.Sequential( + nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans // 4), + activation(), + nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans), + activation(), + nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), + ) + self.no_mask_embed = nn.Embedding(1, embed_dim) + + def get_dense_pe(self) -> torch.Tensor: + """ + Returns the positional encoding used to encode point prompts, + applied to a dense set of points the shape of the image encoding. + + Returns: + torch.Tensor: Positional encoding with shape + 1x(embed_dim)x(embedding_h)x(embedding_w) + """ + return self.pe_layer(self.image_embedding_size).unsqueeze(0) + + def _embed_points( + self, + points: torch.Tensor, + labels: torch.Tensor, + pad: bool, + ) -> torch.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) + padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) + points = torch.cat([points, padding_point], dim=1) + labels = torch.cat([labels, padding_label], dim=1) + point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size) + point_embedding[labels == -1] = 0.0 + point_embedding[labels == -1] += self.not_a_point_embed.weight + point_embedding[labels == 0] += self.point_embeddings[0].weight + point_embedding[labels == 1] += self.point_embeddings[1].weight + return point_embedding + + def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + coords = boxes.reshape(-1, 2, 2) + corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size) + corner_embedding[:, 0, :] += self.point_embeddings[2].weight + corner_embedding[:, 1, :] += self.point_embeddings[3].weight + return corner_embedding + + def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: + """Embeds mask inputs.""" + return self.mask_downscaling(masks) + + def _get_batch_size( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> int: + """ + Gets the batch size of the output given the batch size of the input prompts. + """ + if points is not None: + return points[0].shape[0] + elif boxes is not None: + return boxes.shape[0] + elif masks is not None: + return masks.shape[0] + else: + return 1 + + def _get_device(self) -> torch.device: + return self.point_embeddings[0].weight.device + + def forward( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense + embeddings. + + Arguments: + points (tuple(torch.Tensor, torch.Tensor), None): point coordinates + and labels to embed. + boxes (torch.Tensor, None): boxes to embed + masks (torch.Tensor, None): masks to embed + + Returns: + torch.Tensor: sparse embeddings for the points and boxes, with shape + BxNx(embed_dim), where N is determined by the number of input points + and boxes. + torch.Tensor: dense embeddings for the masks, in the shape + Bx(embed_dim)x(embed_H)x(embed_W) + """ + bs = self._get_batch_size(points, boxes, masks) + sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device()) + if points is not None: + coords, labels = points + point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) + sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + if boxes is not None: + box_embeddings = self._embed_boxes(boxes) + sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) + + if masks is not None: + dense_embeddings = self._embed_masks(masks) + else: + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, + 1).expand(bs, -1, self.image_embedding_size[0], + self.image_embedding_size[1]) + + return sparse_embeddings, dense_embeddings + + +class PositionEmbeddingRandom(nn.Module): + """ + Positional encoding using random spatial frequencies. + """ + + def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: + super().__init__() + if scale is None or scale <= 0.0: + scale = 1.0 + self.register_buffer( + 'positional_encoding_gaussian_matrix', + scale * torch.randn((2, num_pos_feats)), + ) + + def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: + """Positionally encode points that are normalized to [0,1].""" + # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + coords = 2 * coords - 1 + coords = coords @ self.positional_encoding_gaussian_matrix + coords = 2 * np.pi * coords + # outputs d_1 x ... x d_n x C shape + return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) + + def forward(self, size: Tuple[int, int]) -> torch.Tensor: + """Generate positional encoding for a grid of the specified size.""" + h, w = size + device: Any = self.positional_encoding_gaussian_matrix.device + grid = torch.ones((h, w), device=device, dtype=torch.float32) + y_embed = grid.cumsum(dim=0) - 0.5 + x_embed = grid.cumsum(dim=1) - 0.5 + y_embed = y_embed / h + x_embed = x_embed / w + + pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) + return pe.permute(2, 0, 1) # C x H x W + + def forward_with_coords(self, coords_input: torch.Tensor, image_size: Tuple[int, int]) -> torch.Tensor: + """Positionally encode points that are not normalized to [0,1].""" + coords = coords_input.clone() + coords[:, :, 0] = coords[:, :, 0] / image_size[1] + coords[:, :, 1] = coords[:, :, 1] / image_size[0] + return self._pe_encoding(coords.to(torch.float)) # B x N x C + + +class Block(nn.Module): + """Transformer blocks with support of window attention and residual propagation blocks""" + + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. If it equals 0, then + use global attention. + input_size (tuple(int, int), None): Input resolution for calculating the relative + positional parameter size. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size if window_size == 0 else (window_size, window_size), + ) + + self.norm2 = norm_layer(dim) + self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer) + + self.window_size = window_size + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x + x = self.norm1(x) + # Window partition + if self.window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, self.window_size) + + x = self.attn(x) + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, self.window_size, pad_hw, (H, W)) + + x = shortcut + x + x = x + self.mlp(self.norm2(x)) + + return x + + +class Attention(nn.Module): + """Multi-head Attention block with relative position embeddings.""" + + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (tuple(int, int), None): Input resolution for calculating the relative + positional parameter size. + """ + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + assert (input_size is not None), 'Input size must be provided if using relative positional encoding.' + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, _ = x.shape + # qkv with shape (3, B, nHead, H * W, C) + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + # q, k, v with shape (B * nHead, H * W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) + + attn = (q * self.scale) @ k.transpose(-2, -1) + + if self.use_rel_pos: + attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) + + attn = attn.softmax(dim=-1) + x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) + x = self.proj(x) + + return x + + +def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows, (Hp, Wp) + + +def window_unpartition(windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], + hw: Tuple[int, int]) -> torch.Tensor: + """ + Window unpartition into original sequences and removing padding. + Args: + windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :].contiguous() + return x + + +def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode='linear', + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + +def add_decomposed_rel_pos( + attn: torch.Tensor, + q: torch.Tensor, + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], +) -> torch.Tensor: + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 + Args: + attn (Tensor): attention map. + q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). + rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. + rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. + q_size (Tuple): spatial sequence size of query q with (q_h, q_w). + k_size (Tuple): spatial sequence size of key k with (k_h, k_w). + + Returns: + attn (Tensor): attention map with added relative positional embeddings. + """ + q_h, q_w = q_size + k_h, k_w = k_size + Rh = get_rel_pos(q_h, k_h, rel_pos_h) + Rw = get_rel_pos(q_w, k_w, rel_pos_w) + + B, _, dim = q.shape + r_q = q.reshape(B, q_h, q_w, dim) + rel_h = torch.einsum('bhwc,hkc->bhwk', r_q, Rh) + rel_w = torch.einsum('bhwc,wkc->bhwk', r_q, Rw) + + attn = (attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]).view( + B, q_h * q_w, k_h * k_w) + + return attn + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, + kernel_size: Tuple[int, int] = (16, 16), + stride: Tuple[int, int] = (16, 16), + padding: Tuple[int, int] = (0, 0), + in_chans: int = 3, + embed_dim: int = 768, + ) -> None: + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + """ + super().__init__() + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x diff --git a/ultralytics/vit/sam/modules/sam.py b/ultralytics/vit/sam/modules/sam.py new file mode 100644 index 0000000..49f4bfc --- /dev/null +++ b/ultralytics/vit/sam/modules/sam.py @@ -0,0 +1,173 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Any, Dict, List, Tuple + +import torch +from torch import nn +from torch.nn import functional as F + +from .decoders import MaskDecoder +from .encoders import ImageEncoderViT, PromptEncoder + + +class Sam(nn.Module): + mask_threshold: float = 0.0 + image_format: str = 'RGB' + + def __init__(self, + image_encoder: ImageEncoderViT, + prompt_encoder: PromptEncoder, + mask_decoder: MaskDecoder, + pixel_mean: List[float] = None, + pixel_std: List[float] = None) -> None: + """ + SAM predicts object masks from an image and input prompts. + + Arguments: + image_encoder (ImageEncoderViT): The backbone used to encode the + image into image embeddings that allow for efficient mask prediction. + prompt_encoder (PromptEncoder): Encodes various types of input prompts. + mask_decoder (MaskDecoder): Predicts masks from the image embeddings + and encoded prompts. + pixel_mean (list(float)): Mean values for normalizing pixels in the input image. + pixel_std (list(float)): Std values for normalizing pixels in the input image. + """ + if pixel_mean is None: + pixel_mean = [123.675, 116.28, 103.53] + if pixel_std is None: + pixel_std = [58.395, 57.12, 57.375] + super().__init__() + self.image_encoder = image_encoder + self.prompt_encoder = prompt_encoder + self.mask_decoder = mask_decoder + self.register_buffer('pixel_mean', torch.Tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer('pixel_std', torch.Tensor(pixel_std).view(-1, 1, 1), False) + + @property + def device(self) -> Any: + return self.pixel_mean.device + + @torch.no_grad() + def forward( + self, + batched_input: List[Dict[str, Any]], + multimask_output: bool, + ) -> List[Dict[str, torch.Tensor]]: + """ + Predicts masks end-to-end from provided images and prompts. + If prompts are not known in advance, using SamPredictor is + recommended over calling the model directly. + + Arguments: + batched_input (list(dict)): A list over input images, each a + dictionary with the following keys. A prompt key can be + excluded if it is not present. + 'image': The image as a torch tensor in 3xHxW format, + already transformed for input to the model. + 'original_size': (tuple(int, int)) The original size of + the image before transformation, as (H, W). + 'point_coords': (torch.Tensor) Batched point prompts for + this image, with shape BxNx2. Already transformed to the + input frame of the model. + 'point_labels': (torch.Tensor) Batched labels for point prompts, + with shape BxN. + 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4. + Already transformed to the input frame of the model. + 'mask_inputs': (torch.Tensor) Batched mask inputs to the model, + in the form Bx1xHxW. + multimask_output (bool): Whether the model should predict multiple + disambiguating masks, or return a single mask. + + Returns: + (list(dict)): A list over input images, where each element is + as dictionary with the following keys. + 'masks': (torch.Tensor) Batched binary mask predictions, + with shape BxCxHxW, where B is the number of input prompts, + C is determined by multimask_output, and (H, W) is the + original size of the image. + 'iou_predictions': (torch.Tensor) The model's predictions + of mask quality, in shape BxC. + 'low_res_logits': (torch.Tensor) Low resolution logits with + shape BxCxHxW, where H=W=256. Can be passed as mask input + to subsequent iterations of prediction. + """ + input_images = torch.stack([self.preprocess(x['image']) for x in batched_input], dim=0) + image_embeddings = self.image_encoder(input_images) + + outputs = [] + for image_record, curr_embedding in zip(batched_input, image_embeddings): + if 'point_coords' in image_record: + points = (image_record['point_coords'], image_record['point_labels']) + else: + points = None + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=points, + boxes=image_record.get('boxes', None), + masks=image_record.get('mask_inputs', None), + ) + low_res_masks, iou_predictions = self.mask_decoder( + image_embeddings=curr_embedding.unsqueeze(0), + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + ) + masks = self.postprocess_masks( + low_res_masks, + input_size=image_record['image'].shape[-2:], + original_size=image_record['original_size'], + ) + masks = masks > self.mask_threshold + outputs.append({ + 'masks': masks, + 'iou_predictions': iou_predictions, + 'low_res_logits': low_res_masks, }) + return outputs + + def postprocess_masks( + self, + masks: torch.Tensor, + input_size: Tuple[int, ...], + original_size: Tuple[int, ...], + ) -> torch.Tensor: + """ + Remove padding and upscale masks to the original image size. + + Arguments: + masks (torch.Tensor): Batched masks from the mask_decoder, + in BxCxHxW format. + input_size (tuple(int, int)): The size of the image input to the + model, in (H, W) format. Used to remove padding. + original_size (tuple(int, int)): The original size of the image + before resizing for input to the model, in (H, W) format. + + Returns: + (torch.Tensor): Batched masks in BxCxHxW format, where (H, W) + is given by original_size. + """ + masks = F.interpolate( + masks, + (self.image_encoder.img_size, self.image_encoder.img_size), + mode='bilinear', + align_corners=False, + ) + masks = masks[..., :input_size[0], :input_size[1]] + masks = F.interpolate(masks, original_size, mode='bilinear', align_corners=False) + return masks + + def preprocess(self, x: torch.Tensor) -> torch.Tensor: + """Normalize pixel values and pad to a square input.""" + # Normalize colors + x = (x - self.pixel_mean) / self.pixel_std + + # Pad + h, w = x.shape[-2:] + padh = self.image_encoder.img_size - h + padw = self.image_encoder.img_size - w + return F.pad(x, (0, padw, 0, padh)) diff --git a/ultralytics/vit/sam/modules/tiny_encoder.py b/ultralytics/vit/sam/modules/tiny_encoder.py new file mode 100644 index 0000000..e3f5101 --- /dev/null +++ b/ultralytics/vit/sam/modules/tiny_encoder.py @@ -0,0 +1,653 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +# -------------------------------------------------------- +# TinyViT Model Architecture +# Copyright (c) 2022 Microsoft +# Adapted from LeViT and Swin Transformer +# LeViT: (https://github.com/facebookresearch/levit) +# Swin: (https://github.com/microsoft/swin-transformer) +# Build the TinyViT Model +# -------------------------------------------------------- + +import itertools +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from ultralytics.yolo.utils.instance import to_2tuple + + +class Conv2d_BN(torch.nn.Sequential): + + def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): + super().__init__() + self.add_module('c', torch.nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False)) + bn = torch.nn.BatchNorm2d(b) + torch.nn.init.constant_(bn.weight, bn_weight_init) + torch.nn.init.constant_(bn.bias, 0) + self.add_module('bn', bn) + + @torch.no_grad() + def fuse(self): + c, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / \ + (bn.running_var + bn.eps)**0.5 + m = torch.nn.Conv2d(w.size(1) * self.c.groups, + w.size(0), + w.shape[2:], + stride=self.c.stride, + padding=self.c.padding, + dilation=self.c.dilation, + groups=self.c.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +# NOTE: This module and timm package is needed only for training. +# from ultralytics.yolo.utils.checks import check_requirements +# check_requirements('timm') +# from timm.models.layers import DropPath as TimmDropPath +# from timm.models.layers import trunc_normal_ +# class DropPath(TimmDropPath): +# +# def __init__(self, drop_prob=None): +# super().__init__(drop_prob=drop_prob) +# self.drop_prob = drop_prob +# +# def __repr__(self): +# msg = super().__repr__() +# msg += f'(drop_prob={self.drop_prob})' +# return msg + + +class PatchEmbed(nn.Module): + + def __init__(self, in_chans, embed_dim, resolution, activation): + super().__init__() + img_size: Tuple[int, int] = to_2tuple(resolution) + self.patches_resolution = (img_size[0] // 4, img_size[1] // 4) + self.num_patches = self.patches_resolution[0] * \ + self.patches_resolution[1] + self.in_chans = in_chans + self.embed_dim = embed_dim + n = embed_dim + self.seq = nn.Sequential( + Conv2d_BN(in_chans, n // 2, 3, 2, 1), + activation(), + Conv2d_BN(n // 2, n, 3, 2, 1), + ) + + def forward(self, x): + return self.seq(x) + + +class MBConv(nn.Module): + + def __init__(self, in_chans, out_chans, expand_ratio, activation, drop_path): + super().__init__() + self.in_chans = in_chans + self.hidden_chans = int(in_chans * expand_ratio) + self.out_chans = out_chans + + self.conv1 = Conv2d_BN(in_chans, self.hidden_chans, ks=1) + self.act1 = activation() + + self.conv2 = Conv2d_BN(self.hidden_chans, self.hidden_chans, ks=3, stride=1, pad=1, groups=self.hidden_chans) + self.act2 = activation() + + self.conv3 = Conv2d_BN(self.hidden_chans, out_chans, ks=1, bn_weight_init=0.0) + self.act3 = activation() + + # NOTE: `DropPath` is needed only for training. + # self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.drop_path = nn.Identity() + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.act2(x) + + x = self.conv3(x) + + x = self.drop_path(x) + + x += shortcut + x = self.act3(x) + + return x + + +class PatchMerging(nn.Module): + + def __init__(self, input_resolution, dim, out_dim, activation): + super().__init__() + + self.input_resolution = input_resolution + self.dim = dim + self.out_dim = out_dim + self.act = activation() + self.conv1 = Conv2d_BN(dim, out_dim, 1, 1, 0) + stride_c = 2 + if (out_dim == 320 or out_dim == 448 or out_dim == 576): + stride_c = 1 + self.conv2 = Conv2d_BN(out_dim, out_dim, 3, stride_c, 1, groups=out_dim) + self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0) + + def forward(self, x): + if x.ndim == 3: + H, W = self.input_resolution + B = len(x) + # (B, C, H, W) + x = x.view(B, H, W, -1).permute(0, 3, 1, 2) + + x = self.conv1(x) + x = self.act(x) + + x = self.conv2(x) + x = self.act(x) + x = self.conv3(x) + x = x.flatten(2).transpose(1, 2) + return x + + +class ConvLayer(nn.Module): + + def __init__( + self, + dim, + input_resolution, + depth, + activation, + drop_path=0., + downsample=None, + use_checkpoint=False, + out_dim=None, + conv_expand_ratio=4., + ): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + MBConv( + dim, + dim, + conv_expand_ratio, + activation, + drop_path[i] if isinstance(drop_path, list) else drop_path, + ) for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, out_dim=out_dim, activation=activation) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + +class Mlp(nn.Module): + + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.norm = nn.LayerNorm(in_features) + self.fc1 = nn.Linear(in_features, hidden_features) + self.fc2 = nn.Linear(hidden_features, out_features) + self.act = act_layer() + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.norm(x) + + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(torch.nn.Module): + + def __init__( + self, + dim, + key_dim, + num_heads=8, + attn_ratio=4, + resolution=(14, 14), + ): + super().__init__() + # (h, w) + assert isinstance(resolution, tuple) and len(resolution) == 2 + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + h = self.dh + nh_kd * 2 + + self.norm = nn.LayerNorm(dim) + self.qkv = nn.Linear(dim, h) + self.proj = nn.Linear(self.dh, dim) + + points = list(itertools.product(range(resolution[0]), range(resolution[1]))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False) + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and hasattr(self, 'ab'): + del self.ab + else: + self.ab = self.attention_biases[:, self.attention_bias_idxs] + + def forward(self, x): # x (B,N,C) + B, N, _ = x.shape + + # Normalization + x = self.norm(x) + + qkv = self.qkv(x) + # (B, N, num_heads, d) + q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.d], dim=3) + # (B, num_heads, N, d) + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 1, 3) + v = v.permute(0, 2, 1, 3) + self.ab = self.ab.to(self.attention_biases.device) + + attn = ((q @ k.transpose(-2, -1)) * self.scale + + (self.attention_biases[:, self.attention_bias_idxs] if self.training else self.ab)) + attn = attn.softmax(dim=-1) + x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) + x = self.proj(x) + return x + + +class TinyViTBlock(nn.Module): + r""" TinyViT Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int, int]): Input resolution. + num_heads (int): Number of attention heads. + window_size (int): Window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + drop (float, optional): Dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + local_conv_size (int): the kernel size of the convolution between + Attention and MLP. Default: 3 + activation (torch.nn): the activation function. Default: nn.GELU + """ + + def __init__( + self, + dim, + input_resolution, + num_heads, + window_size=7, + mlp_ratio=4., + drop=0., + drop_path=0., + local_conv_size=3, + activation=nn.GELU, + ): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + assert window_size > 0, 'window_size must be greater than 0' + self.window_size = window_size + self.mlp_ratio = mlp_ratio + + # NOTE: `DropPath` is needed only for training. + # self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.drop_path = nn.Identity() + + assert dim % num_heads == 0, 'dim must be divisible by num_heads' + head_dim = dim // num_heads + + window_resolution = (window_size, window_size) + self.attn = Attention(dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution) + + mlp_hidden_dim = int(dim * mlp_ratio) + mlp_activation = activation + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=mlp_activation, drop=drop) + + pad = local_conv_size // 2 + self.local_conv = Conv2d_BN(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, 'input feature has wrong size' + res_x = x + if H == self.window_size and W == self.window_size: + x = self.attn(x) + else: + x = x.view(B, H, W, C) + pad_b = (self.window_size - H % self.window_size) % self.window_size + pad_r = (self.window_size - W % self.window_size) % self.window_size + padding = pad_b > 0 or pad_r > 0 + + if padding: + x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b)) + + pH, pW = H + pad_b, W + pad_r + nH = pH // self.window_size + nW = pW // self.window_size + # window partition + x = x.view(B, nH, self.window_size, nW, self.window_size, + C).transpose(2, 3).reshape(B * nH * nW, self.window_size * self.window_size, C) + x = self.attn(x) + # window reverse + x = x.view(B, nH, nW, self.window_size, self.window_size, C).transpose(2, 3).reshape(B, pH, pW, C) + + if padding: + x = x[:, :H, :W].contiguous() + + x = x.view(B, L, C) + + x = res_x + self.drop_path(x) + + x = x.transpose(1, 2).reshape(B, C, H, W) + x = self.local_conv(x) + x = x.view(B, C, L).transpose(1, 2) + + x = x + self.drop_path(self.mlp(x)) + return x + + def extra_repr(self) -> str: + return f'dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, ' \ + f'window_size={self.window_size}, mlp_ratio={self.mlp_ratio}' + + +class BasicLayer(nn.Module): + """ A basic TinyViT layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + drop (float, optional): Dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + local_conv_size (int): the kernel size of the depthwise convolution between attention and MLP. Default: 3 + activation (torch.nn): the activation function. Default: nn.GELU + out_dim (int | optional): the output dimension of the layer. Default: None + """ + + def __init__( + self, + dim, + input_resolution, + depth, + num_heads, + window_size, + mlp_ratio=4., + drop=0., + drop_path=0., + downsample=None, + use_checkpoint=False, + local_conv_size=3, + activation=nn.GELU, + out_dim=None, + ): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + TinyViTBlock( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + mlp_ratio=mlp_ratio, + drop=drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + local_conv_size=local_conv_size, + activation=activation, + ) for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, out_dim=out_dim, activation=activation) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f'dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}' + + +class LayerNorm2d(nn.Module): + + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +class TinyViT(nn.Module): + + def __init__( + self, + img_size=224, + in_chans=3, + num_classes=1000, + embed_dims=[96, 192, 384, 768], + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_sizes=[7, 7, 14, 7], + mlp_ratio=4., + drop_rate=0., + drop_path_rate=0.1, + use_checkpoint=False, + mbconv_expand_ratio=4.0, + local_conv_size=3, + layer_lr_decay=1.0, + ): + super().__init__() + self.img_size = img_size + self.num_classes = num_classes + self.depths = depths + self.num_layers = len(depths) + self.mlp_ratio = mlp_ratio + + activation = nn.GELU + + self.patch_embed = PatchEmbed(in_chans=in_chans, + embed_dim=embed_dims[0], + resolution=img_size, + activation=activation) + + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + kwargs = dict( + dim=embed_dims[i_layer], + input_resolution=(patches_resolution[0] // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)), + patches_resolution[1] // (2 ** (i_layer - 1 if i_layer == 3 else i_layer))), + # input_resolution=(patches_resolution[0] // (2 ** i_layer), + # patches_resolution[1] // (2 ** i_layer)), + depth=depths[i_layer], + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint, + out_dim=embed_dims[min(i_layer + 1, + len(embed_dims) - 1)], + activation=activation, + ) + if i_layer == 0: + layer = ConvLayer( + conv_expand_ratio=mbconv_expand_ratio, + **kwargs, + ) + else: + layer = BasicLayer(num_heads=num_heads[i_layer], + window_size=window_sizes[i_layer], + mlp_ratio=self.mlp_ratio, + drop=drop_rate, + local_conv_size=local_conv_size, + **kwargs) + self.layers.append(layer) + + # Classifier head + self.norm_head = nn.LayerNorm(embed_dims[-1]) + self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity() + + # init weights + self.apply(self._init_weights) + self.set_layer_lr_decay(layer_lr_decay) + self.neck = nn.Sequential( + nn.Conv2d( + embed_dims[-1], + 256, + kernel_size=1, + bias=False, + ), + LayerNorm2d(256), + nn.Conv2d( + 256, + 256, + kernel_size=3, + padding=1, + bias=False, + ), + LayerNorm2d(256), + ) + + def set_layer_lr_decay(self, layer_lr_decay): + decay_rate = layer_lr_decay + + # layers -> blocks (depth) + depth = sum(self.depths) + lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)] + + def _set_lr_scale(m, scale): + for p in m.parameters(): + p.lr_scale = scale + + self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0])) + i = 0 + for layer in self.layers: + for block in layer.blocks: + block.apply(lambda x: _set_lr_scale(x, lr_scales[i])) + i += 1 + if layer.downsample is not None: + layer.downsample.apply(lambda x: _set_lr_scale(x, lr_scales[i - 1])) + assert i == depth + for m in [self.norm_head, self.head]: + m.apply(lambda x: _set_lr_scale(x, lr_scales[-1])) + + for k, p in self.named_parameters(): + p.param_name = k + + def _check_lr_scale(m): + for p in m.parameters(): + assert hasattr(p, 'lr_scale'), p.param_name + + self.apply(_check_lr_scale) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + # NOTE: This initialization is needed only for training. + # trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'attention_biases'} + + def forward_features(self, x): + # x: (N, C, H, W) + x = self.patch_embed(x) + + x = self.layers[0](x) + start_i = 1 + + for i in range(start_i, len(self.layers)): + layer = self.layers[i] + x = layer(x) + B, _, C = x.size() + x = x.view(B, 64, 64, C) + x = x.permute(0, 3, 1, 2) + x = self.neck(x) + return x + + def forward(self, x): + x = self.forward_features(x) + return x diff --git a/ultralytics/vit/sam/modules/transformer.py b/ultralytics/vit/sam/modules/transformer.py new file mode 100644 index 0000000..d5275bf --- /dev/null +++ b/ultralytics/vit/sam/modules/transformer.py @@ -0,0 +1,235 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import math +from typing import Tuple, Type + +import torch +from torch import Tensor, nn + +from ultralytics.nn.modules import MLPBlock + + +class TwoWayTransformer(nn.Module): + + def __init__( + self, + depth: int, + embedding_dim: int, + num_heads: int, + mlp_dim: int, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + ) -> None: + """ + A transformer decoder that attends to an input image using + queries whose positional embedding is supplied. + + Args: + depth (int): number of layers in the transformer + embedding_dim (int): the channel dimension for the input embeddings + num_heads (int): the number of heads for multihead attention. Must + divide embedding_dim + mlp_dim (int): the channel dimension internal to the MLP block + activation (nn.Module): the activation to use in the MLP block + """ + super().__init__() + self.depth = depth + self.embedding_dim = embedding_dim + self.num_heads = num_heads + self.mlp_dim = mlp_dim + self.layers = nn.ModuleList() + + for i in range(depth): + self.layers.append( + TwoWayAttentionBlock( + embedding_dim=embedding_dim, + num_heads=num_heads, + mlp_dim=mlp_dim, + activation=activation, + attention_downsample_rate=attention_downsample_rate, + skip_first_layer_pe=(i == 0), + )) + + self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) + self.norm_final_attn = nn.LayerNorm(embedding_dim) + + def forward( + self, + image_embedding: Tensor, + image_pe: Tensor, + point_embedding: Tensor, + ) -> Tuple[Tensor, Tensor]: + """ + Args: + image_embedding (torch.Tensor): image to attend to. Should be shape + B x embedding_dim x h x w for any h and w. + image_pe (torch.Tensor): the positional encoding to add to the image. Must + have the same shape as image_embedding. + point_embedding (torch.Tensor): the embedding to add to the query points. + Must have shape B x N_points x embedding_dim for any N_points. + + Returns: + torch.Tensor: the processed point_embedding + torch.Tensor: the processed image_embedding + """ + # BxCxHxW -> BxHWxC == B x N_image_tokens x C + bs, c, h, w = image_embedding.shape + image_embedding = image_embedding.flatten(2).permute(0, 2, 1) + image_pe = image_pe.flatten(2).permute(0, 2, 1) + + # Prepare queries + queries = point_embedding + keys = image_embedding + + # Apply transformer blocks and final layernorm + for layer in self.layers: + queries, keys = layer( + queries=queries, + keys=keys, + query_pe=point_embedding, + key_pe=image_pe, + ) + + # Apply the final attention layer from the points to the image + q = queries + point_embedding + k = keys + image_pe + attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm_final_attn(queries) + + return queries, keys + + +class TwoWayAttentionBlock(nn.Module): + + def __init__( + self, + embedding_dim: int, + num_heads: int, + mlp_dim: int = 2048, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + skip_first_layer_pe: bool = False, + ) -> None: + """ + A transformer block with four layers: (1) self-attention of sparse + inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp + block on sparse inputs, and (4) cross attention of dense inputs to sparse + inputs. + + Arguments: + embedding_dim (int): the channel dimension of the embeddings + num_heads (int): the number of heads in the attention layers + mlp_dim (int): the hidden dimension of the mlp block + activation (nn.Module): the activation of the mlp block + skip_first_layer_pe (bool): skip the PE on the first layer + """ + super().__init__() + self.self_attn = Attention(embedding_dim, num_heads) + self.norm1 = nn.LayerNorm(embedding_dim) + + self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) + self.norm2 = nn.LayerNorm(embedding_dim) + + self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) + self.norm3 = nn.LayerNorm(embedding_dim) + + self.norm4 = nn.LayerNorm(embedding_dim) + self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) + + self.skip_first_layer_pe = skip_first_layer_pe + + def forward(self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor) -> Tuple[Tensor, Tensor]: + """Apply self-attention and cross-attention to queries and keys and return the processed embeddings.""" + + # Self attention block + if self.skip_first_layer_pe: + queries = self.self_attn(q=queries, k=queries, v=queries) + else: + q = queries + query_pe + attn_out = self.self_attn(q=q, k=q, v=queries) + queries = queries + attn_out + queries = self.norm1(queries) + + # Cross attention block, tokens attending to image embedding + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm2(queries) + + # MLP block + mlp_out = self.mlp(queries) + queries = queries + mlp_out + queries = self.norm3(queries) + + # Cross attention block, image embedding attending to tokens + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) + keys = keys + attn_out + keys = self.norm4(keys) + + return queries, keys + + +class Attention(nn.Module): + """ + An attention layer that allows for downscaling the size of the embedding + after projection to queries, keys, and values. + """ + + def __init__( + self, + embedding_dim: int, + num_heads: int, + downsample_rate: int = 1, + ) -> None: + super().__init__() + self.embedding_dim = embedding_dim + self.internal_dim = embedding_dim // downsample_rate + self.num_heads = num_heads + assert self.internal_dim % num_heads == 0, 'num_heads must divide embedding_dim.' + + self.q_proj = nn.Linear(embedding_dim, self.internal_dim) + self.k_proj = nn.Linear(embedding_dim, self.internal_dim) + self.v_proj = nn.Linear(embedding_dim, self.internal_dim) + self.out_proj = nn.Linear(self.internal_dim, embedding_dim) + + def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: + """Separate the input tensor into the specified number of attention heads.""" + b, n, c = x.shape + x = x.reshape(b, n, num_heads, c // num_heads) + return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head + + def _recombine_heads(self, x: Tensor) -> Tensor: + """Recombine the separated attention heads into a single tensor.""" + b, n_heads, n_tokens, c_per_head = x.shape + x = x.transpose(1, 2) + return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: + """Compute the attention output given the input query, key, and value tensors.""" + + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + # Attention + _, _, _, c_per_head = q.shape + attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens + attn = attn / math.sqrt(c_per_head) + attn = torch.softmax(attn, dim=-1) + + # Get output + out = attn @ v + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out diff --git a/ultralytics/vit/sam/predict.py b/ultralytics/vit/sam/predict.py new file mode 100644 index 0000000..47a9d55 --- /dev/null +++ b/ultralytics/vit/sam/predict.py @@ -0,0 +1,396 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision + +from ultralytics.yolo.data.augment import LetterBox +from ultralytics.yolo.engine.predictor import BasePredictor +from ultralytics.yolo.engine.results import Results +from ultralytics.yolo.utils import DEFAULT_CFG, ops +from ultralytics.yolo.utils.torch_utils import select_device + +from .amg import (batch_iterator, batched_mask_to_box, build_all_layer_point_grids, calculate_stability_score, + generate_crop_boxes, is_box_near_crop_edge, remove_small_regions, uncrop_boxes_xyxy, uncrop_masks) +from .build import build_sam + + +class Predictor(BasePredictor): + + def __init__(self, cfg=DEFAULT_CFG, overrides={}, _callbacks=None): + overrides.update(dict(task='segment', mode='predict', imgsz=1024)) + super().__init__(cfg, overrides, _callbacks) + # SAM needs retina_masks=True, or the results would be a mess. + self.args.retina_masks = True + # Args for set_image + self.im = None + self.features = None + # Args for segment everything + self.segment_all = False + + def preprocess(self, im): + """Prepares input image before inference. + + Args: + im (torch.Tensor | List(np.ndarray)): BCHW for tensor, [(HWC) x B] for list. + """ + if self.im is not None: + return self.im + not_tensor = not isinstance(im, torch.Tensor) + if not_tensor: + im = np.stack(self.pre_transform(im)) + im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW, (n, 3, h, w) + im = np.ascontiguousarray(im) # contiguous + im = torch.from_numpy(im) + + img = im.to(self.device) + img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32 + if not_tensor: + img = (img - self.mean) / self.std + return img + + def pre_transform(self, im): + """Pre-transform input image before inference. + + Args: + im (List(np.ndarray)): (N, 3, h, w) for tensor, [(h, w, 3) x N] for list. + + Return: A list of transformed imgs. + """ + assert len(im) == 1, 'SAM model has not supported batch inference yet!' + return [LetterBox(self.args.imgsz, auto=False, center=False)(image=x) for x in im] + + def inference(self, im, bboxes=None, points=None, labels=None, masks=None, multimask_output=False, *args, **kwargs): + """ + Predict masks for the given input prompts, using the currently set image. + + Args: + im (torch.Tensor): The preprocessed image, (N, C, H, W). + bboxes (np.ndarray | List, None): (N, 4), in XYXY format. + points (np.ndarray | List, None): (N, 2), Each point is in (X,Y) in pixels. + labels (np.ndarray | List, None): (N, ), labels for the point prompts. + 1 indicates a foreground point and 0 indicates a background point. + masks (np.ndarray, None): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form (N, H, W), where + for SAM, H=W=256. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + + Returns: + (np.ndarray): The output masks in CxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (np.ndarray): An array of length C containing the model's + predictions for the quality of each mask. + (np.ndarray): An array of shape CxHxW, where C is the number + of masks and H=W=256. These low resolution logits can be passed to + a subsequent iteration as mask input. + """ + if all([i is None for i in [bboxes, points, masks]]): + return self.generate(im, *args, **kwargs) + return self.prompt_inference(im, bboxes, points, labels, masks, multimask_output) + + def prompt_inference(self, im, bboxes=None, points=None, labels=None, masks=None, multimask_output=False): + """ + Predict masks for the given input prompts, using the currently set image. + + Args: + im (torch.Tensor): The preprocessed image, (N, C, H, W). + bboxes (np.ndarray | List, None): (N, 4), in XYXY format. + points (np.ndarray | List, None): (N, 2), Each point is in (X,Y) in pixels. + labels (np.ndarray | List, None): (N, ), labels for the point prompts. + 1 indicates a foreground point and 0 indicates a background point. + masks (np.ndarray, None): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form (N, H, W), where + for SAM, H=W=256. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + + Returns: + (np.ndarray): The output masks in CxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (np.ndarray): An array of length C containing the model's + predictions for the quality of each mask. + (np.ndarray): An array of shape CxHxW, where C is the number + of masks and H=W=256. These low resolution logits can be passed to + a subsequent iteration as mask input. + """ + features = self.model.image_encoder(im) if self.features is None else self.features + + src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:] + r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1]) + # Transform input prompts + if points is not None: + points = torch.as_tensor(points, dtype=torch.float32, device=self.device) + points = points[None] if points.ndim == 1 else points + # Assuming labels are all positive if users don't pass labels. + if labels is None: + labels = np.ones(points.shape[0]) + labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device) + points *= r + # (N, 2) --> (N, 1, 2), (N, ) --> (N, 1) + points, labels = points[:, None, :], labels[:, None] + if bboxes is not None: + bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device) + bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes + bboxes *= r + if masks is not None: + masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device) + masks = masks[:, None, :, :] + + points = (points, labels) if points is not None else None + # Embed prompts + sparse_embeddings, dense_embeddings = self.model.prompt_encoder( + points=points, + boxes=bboxes, + masks=masks, + ) + + # Predict masks + pred_masks, pred_scores = self.model.mask_decoder( + image_embeddings=features, + image_pe=self.model.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + ) + + # (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, ) + # `d` could be 1 or 3 depends on `multimask_output`. + return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1) + + def generate(self, + im, + crop_n_layers=0, + crop_overlap_ratio=512 / 1500, + crop_downscale_factor=1, + point_grids=None, + points_stride=32, + points_batch_size=64, + conf_thres=0.88, + stability_score_thresh=0.95, + stability_score_offset=0.95, + crop_nms_thresh=0.7): + """Segment the whole image. + + Args: + im (torch.Tensor): The preprocessed image, (N, C, H, W). + crop_n_layers (int): If >0, mask prediction will be run again on + crops of the image. Sets the number of layers to run, where each + layer has 2**i_layer number of image crops. + crop_overlap_ratio (float): Sets the degree to which crops overlap. + In the first crop layer, crops will overlap by this fraction of + the image length. Later layers with more crops scale down this overlap. + crop_downscale_factor (int): The number of points-per-side + sampled in layer n is scaled down by crop_n_points_downscale_factor**n. + point_grids (list(np.ndarray), None): A list over explicit grids + of points used for sampling, normalized to [0,1]. The nth grid in the + list is used in the nth crop layer. Exclusive with points_per_side. + points_stride (int, None): The number of points to be sampled + along one side of the image. The total number of points is + points_per_side**2. If None, 'point_grids' must provide explicit + point sampling. + points_batch_size (int): Sets the number of points run simultaneously + by the model. Higher numbers may be faster but use more GPU memory. + conf_thres (float): A filtering threshold in [0,1], using the + model's predicted mask quality. + stability_score_thresh (float): A filtering threshold in [0,1], using + the stability of the mask under changes to the cutoff used to binarize + the model's mask predictions. + stability_score_offset (float): The amount to shift the cutoff when + calculated the stability score. + crop_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks between different crops. + """ + self.segment_all = True + ih, iw = im.shape[2:] + crop_regions, layer_idxs = generate_crop_boxes((ih, iw), crop_n_layers, crop_overlap_ratio) + if point_grids is None: + point_grids = build_all_layer_point_grids( + points_stride, + crop_n_layers, + crop_downscale_factor, + ) + pred_masks, pred_scores, pred_bboxes, region_areas = [], [], [], [] + for crop_region, layer_idx in zip(crop_regions, layer_idxs): + x1, y1, x2, y2 = crop_region + w, h = x2 - x1, y2 - y1 + area = torch.tensor(w * h, device=im.device) + points_scale = np.array([[w, h]]) # w, h + # Crop image and interpolate to input size + crop_im = F.interpolate(im[..., y1:y2, x1:x2], (ih, iw), mode='bilinear', align_corners=False) + # (num_points, 2) + points_for_image = point_grids[layer_idx] * points_scale + crop_masks, crop_scores, crop_bboxes = [], [], [] + for (points, ) in batch_iterator(points_batch_size, points_for_image): + pred_mask, pred_score = self.prompt_inference(crop_im, points=points, multimask_output=True) + # Interpolate predicted masks to input size + pred_mask = F.interpolate(pred_mask[None], (h, w), mode='bilinear', align_corners=False)[0] + idx = pred_score > conf_thres + pred_mask, pred_score = pred_mask[idx], pred_score[idx] + + stability_score = calculate_stability_score(pred_mask, self.model.mask_threshold, + stability_score_offset) + idx = stability_score > stability_score_thresh + pred_mask, pred_score = pred_mask[idx], pred_score[idx] + # Bool type is much more memory-efficient. + pred_mask = pred_mask > self.model.mask_threshold + # (N, 4) + pred_bbox = batched_mask_to_box(pred_mask).float() + keep_mask = ~is_box_near_crop_edge(pred_bbox, crop_region, [0, 0, iw, ih]) + if not torch.all(keep_mask): + pred_bbox = pred_bbox[keep_mask] + pred_mask = pred_mask[keep_mask] + pred_score = pred_score[keep_mask] + + crop_masks.append(pred_mask) + crop_bboxes.append(pred_bbox) + crop_scores.append(pred_score) + + # Do nms within this crop + crop_masks = torch.cat(crop_masks) + crop_bboxes = torch.cat(crop_bboxes) + crop_scores = torch.cat(crop_scores) + keep = torchvision.ops.nms(crop_bboxes, crop_scores, self.args.iou) # NMS + crop_bboxes = uncrop_boxes_xyxy(crop_bboxes[keep], crop_region) + crop_masks = uncrop_masks(crop_masks[keep], crop_region, ih, iw) + crop_scores = crop_scores[keep] + + pred_masks.append(crop_masks) + pred_bboxes.append(crop_bboxes) + pred_scores.append(crop_scores) + region_areas.append(area.expand(len(crop_masks))) + + pred_masks = torch.cat(pred_masks) + pred_bboxes = torch.cat(pred_bboxes) + pred_scores = torch.cat(pred_scores) + region_areas = torch.cat(region_areas) + + # Remove duplicate masks between crops + if len(crop_regions) > 1: + scores = 1 / region_areas + keep = torchvision.ops.nms(pred_bboxes, scores, crop_nms_thresh) + pred_masks = pred_masks[keep] + pred_bboxes = pred_bboxes[keep] + pred_scores = pred_scores[keep] + + return pred_masks, pred_scores, pred_bboxes + + def setup_model(self, model): + """Set up YOLO model with specified thresholds and device.""" + device = select_device(self.args.device) + if model is None: + model = build_sam(self.args.model) + model.eval() + self.model = model.to(device) + self.device = device + self.mean = torch.tensor([123.675, 116.28, 103.53]).view(-1, 1, 1).to(device) + self.std = torch.tensor([58.395, 57.12, 57.375]).view(-1, 1, 1).to(device) + # TODO: Temporary settings for compatibility + self.model.pt = False + self.model.triton = False + self.model.stride = 32 + self.model.fp16 = False + self.done_warmup = True + + def postprocess(self, preds, img, orig_imgs): + """Postprocesses inference output predictions to create detection masks for objects.""" + # (N, 1, H, W), (N, 1) + pred_masks, pred_scores = preds[:2] + pred_bboxes = preds[2] if self.segment_all else None + names = dict(enumerate([str(i) for i in range(len(pred_masks))])) + results = [] + for i, masks in enumerate([pred_masks]): + orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs + if pred_bboxes is not None: + pred_bboxes = ops.scale_boxes(img.shape[2:], pred_bboxes.float(), orig_img.shape, padding=False) + cls = torch.arange(len(pred_masks), dtype=torch.int32, device=pred_masks.device) + pred_bboxes = torch.cat([pred_bboxes, pred_scores[:, None], cls[:, None]], dim=-1) + + masks = ops.scale_masks(masks[None].float(), orig_img.shape[:2], padding=False)[0] + masks = masks > self.model.mask_threshold # to bool + path = self.batch[0] + img_path = path[i] if isinstance(path, list) else path + results.append(Results(orig_img=orig_img, path=img_path, names=names, masks=masks, boxes=pred_bboxes)) + # Reset segment-all mode. + self.segment_all = False + return results + + def setup_source(self, source): + """Sets up source and inference mode.""" + if source is not None: + super().setup_source(source) + + def set_image(self, image): + """Set image in advance. + Args: + + image (str | np.ndarray): image file path or np.ndarray image by cv2. + """ + if self.model is None: + model = build_sam(self.args.model) + self.setup_model(model) + self.setup_source(image) + assert len(self.dataset) == 1, '`set_image` only supports setting one image!' + for batch in self.dataset: + im = self.preprocess(batch[1]) + self.features = self.model.image_encoder(im) + self.im = im + break + + def reset_image(self): + self.im = None + self.features = None + + @staticmethod + def remove_small_regions(masks, min_area=0, nms_thresh=0.7): + """ + Removes small disconnected regions and holes in masks, then reruns + box NMS to remove any new duplicates. Requires open-cv as a dependency. + + Args: + masks (torch.Tensor): Masks, (N, H, W). + min_area (int): Minimum area threshold. + nms_thresh (float): NMS threshold. + """ + if len(masks) == 0: + return masks + + # Filter small disconnected regions and holes + new_masks = [] + scores = [] + for mask in masks: + mask = mask.cpu().numpy() + mask, changed = remove_small_regions(mask, min_area, mode='holes') + unchanged = not changed + mask, changed = remove_small_regions(mask, min_area, mode='islands') + unchanged = unchanged and not changed + + new_masks.append(torch.as_tensor(mask).unsqueeze(0)) + # Give score=0 to changed masks and score=1 to unchanged masks + # so NMS will prefer ones that didn't need postprocessing + scores.append(float(unchanged)) + + # Recalculate boxes and remove any new duplicates + new_masks = torch.cat(new_masks, dim=0) + boxes = batched_mask_to_box(new_masks) + keep = torchvision.ops.nms( + boxes.float(), + torch.as_tensor(scores), + nms_thresh, + ) + + # Only recalculate masks for masks that have changed + for i in keep: + if scores[i] == 0.0: + masks[i] = new_masks[i] + + return masks[keep] diff --git a/ultralytics/vit/utils/__init__.py b/ultralytics/vit/utils/__init__.py new file mode 100644 index 0000000..9e68dc1 --- /dev/null +++ b/ultralytics/vit/utils/__init__.py @@ -0,0 +1 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license diff --git a/ultralytics/vit/utils/loss.py b/ultralytics/vit/utils/loss.py new file mode 100644 index 0000000..cb2de20 --- /dev/null +++ b/ultralytics/vit/utils/loss.py @@ -0,0 +1,294 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ultralytics.vit.utils.ops import HungarianMatcher +from ultralytics.yolo.utils.loss import FocalLoss, VarifocalLoss +from ultralytics.yolo.utils.metrics import bbox_iou + + +class DETRLoss(nn.Module): + + def __init__(self, + nc=80, + loss_gain=None, + aux_loss=True, + use_fl=True, + use_vfl=False, + use_uni_match=False, + uni_match_ind=0): + """ + DETR loss function. + + Args: + nc (int): The number of classes. + loss_gain (dict): The coefficient of loss. + aux_loss (bool): If 'aux_loss = True', loss at each decoder layer are to be used. + use_vfl (bool): Use VarifocalLoss or not. + use_uni_match (bool): Whether to use a fixed layer to assign labels for auxiliary branch. + uni_match_ind (int): The fixed indices of a layer. + """ + super().__init__() + + if loss_gain is None: + loss_gain = {'class': 1, 'bbox': 5, 'giou': 2, 'no_object': 0.1, 'mask': 1, 'dice': 1} + self.nc = nc + self.matcher = HungarianMatcher(cost_gain={'class': 2, 'bbox': 5, 'giou': 2}) + self.loss_gain = loss_gain + self.aux_loss = aux_loss + self.fl = FocalLoss() if use_fl else None + self.vfl = VarifocalLoss() if use_vfl else None + + self.use_uni_match = use_uni_match + self.uni_match_ind = uni_match_ind + self.device = None + + def _get_loss_class(self, pred_scores, targets, gt_scores, num_gts, postfix=''): + # logits: [b, query, num_classes], gt_class: list[[n, 1]] + name_class = f'loss_class{postfix}' + bs, nq = pred_scores.shape[:2] + # one_hot = F.one_hot(targets, self.nc + 1)[..., :-1] # (bs, num_queries, num_classes) + one_hot = torch.zeros((bs, nq, self.nc + 1), dtype=torch.int64, device=targets.device) + one_hot.scatter_(2, targets.unsqueeze(-1), 1) + one_hot = one_hot[..., :-1] + gt_scores = gt_scores.view(bs, nq, 1) * one_hot + + if self.fl: + if num_gts and self.vfl: + loss_cls = self.vfl(pred_scores, gt_scores, one_hot) + else: + loss_cls = self.fl(pred_scores, one_hot.float()) + loss_cls /= max(num_gts, 1) / nq + else: + loss_cls = nn.BCEWithLogitsLoss(reduction='none')(pred_scores, gt_scores).mean(1).sum() # YOLO CLS loss + + return {name_class: loss_cls.squeeze() * self.loss_gain['class']} + + def _get_loss_bbox(self, pred_bboxes, gt_bboxes, postfix=''): + # boxes: [b, query, 4], gt_bbox: list[[n, 4]] + name_bbox = f'loss_bbox{postfix}' + name_giou = f'loss_giou{postfix}' + + loss = {} + if len(gt_bboxes) == 0: + loss[name_bbox] = torch.tensor(0., device=self.device) + loss[name_giou] = torch.tensor(0., device=self.device) + return loss + + loss[name_bbox] = self.loss_gain['bbox'] * F.l1_loss(pred_bboxes, gt_bboxes, reduction='sum') / len(gt_bboxes) + loss[name_giou] = 1.0 - bbox_iou(pred_bboxes, gt_bboxes, xywh=True, GIoU=True) + loss[name_giou] = loss[name_giou].sum() / len(gt_bboxes) + loss[name_giou] = self.loss_gain['giou'] * loss[name_giou] + loss = {k: v.squeeze() for k, v in loss.items()} + return loss + + def _get_loss_mask(self, masks, gt_mask, match_indices, postfix=''): + # masks: [b, query, h, w], gt_mask: list[[n, H, W]] + name_mask = f'loss_mask{postfix}' + name_dice = f'loss_dice{postfix}' + + loss = {} + if sum(len(a) for a in gt_mask) == 0: + loss[name_mask] = torch.tensor(0., device=self.device) + loss[name_dice] = torch.tensor(0., device=self.device) + return loss + + num_gts = len(gt_mask) + src_masks, target_masks = self._get_assigned_bboxes(masks, gt_mask, match_indices) + src_masks = F.interpolate(src_masks.unsqueeze(0), size=target_masks.shape[-2:], mode='bilinear')[0] + # TODO: torch does not have `sigmoid_focal_loss`, but it's not urgent since we don't use mask branch for now. + loss[name_mask] = self.loss_gain['mask'] * F.sigmoid_focal_loss(src_masks, target_masks, + torch.tensor([num_gts], dtype=torch.float32)) + loss[name_dice] = self.loss_gain['dice'] * self._dice_loss(src_masks, target_masks, num_gts) + return loss + + def _dice_loss(self, inputs, targets, num_gts): + inputs = F.sigmoid(inputs) + inputs = inputs.flatten(1) + targets = targets.flatten(1) + numerator = 2 * (inputs * targets).sum(1) + denominator = inputs.sum(-1) + targets.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + return loss.sum() / num_gts + + def _get_loss_aux(self, + pred_bboxes, + pred_scores, + gt_bboxes, + gt_cls, + gt_groups, + match_indices=None, + postfix='', + masks=None, + gt_mask=None): + """Get auxiliary losses""" + # NOTE: loss class, bbox, giou, mask, dice + loss = torch.zeros(5 if masks is not None else 3, device=pred_bboxes.device) + if match_indices is None and self.use_uni_match: + match_indices = self.matcher(pred_bboxes[self.uni_match_ind], + pred_scores[self.uni_match_ind], + gt_bboxes, + gt_cls, + gt_groups, + masks=masks[self.uni_match_ind] if masks is not None else None, + gt_mask=gt_mask) + for i, (aux_bboxes, aux_scores) in enumerate(zip(pred_bboxes, pred_scores)): + aux_masks = masks[i] if masks is not None else None + loss_ = self._get_loss(aux_bboxes, + aux_scores, + gt_bboxes, + gt_cls, + gt_groups, + masks=aux_masks, + gt_mask=gt_mask, + postfix=postfix, + match_indices=match_indices) + loss[0] += loss_[f'loss_class{postfix}'] + loss[1] += loss_[f'loss_bbox{postfix}'] + loss[2] += loss_[f'loss_giou{postfix}'] + # if masks is not None and gt_mask is not None: + # loss_ = self._get_loss_mask(aux_masks, gt_mask, match_indices, postfix) + # loss[3] += loss_[f'loss_mask{postfix}'] + # loss[4] += loss_[f'loss_dice{postfix}'] + + loss = { + f'loss_class_aux{postfix}': loss[0], + f'loss_bbox_aux{postfix}': loss[1], + f'loss_giou_aux{postfix}': loss[2]} + # if masks is not None and gt_mask is not None: + # loss[f'loss_mask_aux{postfix}'] = loss[3] + # loss[f'loss_dice_aux{postfix}'] = loss[4] + return loss + + def _get_index(self, match_indices): + batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(match_indices)]) + src_idx = torch.cat([src for (src, _) in match_indices]) + dst_idx = torch.cat([dst for (_, dst) in match_indices]) + return (batch_idx, src_idx), dst_idx + + def _get_assigned_bboxes(self, pred_bboxes, gt_bboxes, match_indices): + pred_assigned = torch.cat([ + t[I] if len(I) > 0 else torch.zeros(0, t.shape[-1], device=self.device) + for t, (I, _) in zip(pred_bboxes, match_indices)]) + gt_assigned = torch.cat([ + t[J] if len(J) > 0 else torch.zeros(0, t.shape[-1], device=self.device) + for t, (_, J) in zip(gt_bboxes, match_indices)]) + return pred_assigned, gt_assigned + + def _get_loss(self, + pred_bboxes, + pred_scores, + gt_bboxes, + gt_cls, + gt_groups, + masks=None, + gt_mask=None, + postfix='', + match_indices=None): + """Get losses""" + if match_indices is None: + match_indices = self.matcher(pred_bboxes, + pred_scores, + gt_bboxes, + gt_cls, + gt_groups, + masks=masks, + gt_mask=gt_mask) + + idx, gt_idx = self._get_index(match_indices) + pred_bboxes, gt_bboxes = pred_bboxes[idx], gt_bboxes[gt_idx] + + bs, nq = pred_scores.shape[:2] + targets = torch.full((bs, nq), self.nc, device=pred_scores.device, dtype=gt_cls.dtype) + targets[idx] = gt_cls[gt_idx] + + gt_scores = torch.zeros([bs, nq], device=pred_scores.device) + if len(gt_bboxes): + gt_scores[idx] = bbox_iou(pred_bboxes.detach(), gt_bboxes, xywh=True).squeeze(-1) + + loss = {} + loss.update(self._get_loss_class(pred_scores, targets, gt_scores, len(gt_bboxes), postfix)) + loss.update(self._get_loss_bbox(pred_bboxes, gt_bboxes, postfix)) + # if masks is not None and gt_mask is not None: + # loss.update(self._get_loss_mask(masks, gt_mask, match_indices, postfix)) + return loss + + def forward(self, pred_bboxes, pred_scores, batch, postfix='', **kwargs): + """ + Args: + pred_bboxes (torch.Tensor): [l, b, query, 4] + pred_scores (torch.Tensor): [l, b, query, num_classes] + batch (dict): A dict includes: + gt_cls (torch.Tensor) with shape [num_gts, ], + gt_bboxes (torch.Tensor): [num_gts, 4], + gt_groups (List(int)): a list of batch size length includes the number of gts of each image. + postfix (str): postfix of loss name. + """ + self.device = pred_bboxes.device + match_indices = kwargs.get('match_indices', None) + gt_cls, gt_bboxes, gt_groups = batch['cls'], batch['bboxes'], batch['gt_groups'] + + total_loss = self._get_loss(pred_bboxes[-1], + pred_scores[-1], + gt_bboxes, + gt_cls, + gt_groups, + postfix=postfix, + match_indices=match_indices) + + if self.aux_loss: + total_loss.update( + self._get_loss_aux(pred_bboxes[:-1], pred_scores[:-1], gt_bboxes, gt_cls, gt_groups, match_indices, + postfix)) + + return total_loss + + +class RTDETRDetectionLoss(DETRLoss): + + def forward(self, preds, batch, dn_bboxes=None, dn_scores=None, dn_meta=None): + pred_bboxes, pred_scores = preds + total_loss = super().forward(pred_bboxes, pred_scores, batch) + + if dn_meta is not None: + dn_pos_idx, dn_num_group = dn_meta['dn_pos_idx'], dn_meta['dn_num_group'] + assert len(batch['gt_groups']) == len(dn_pos_idx) + + # denoising match indices + match_indices = self.get_dn_match_indices(dn_pos_idx, dn_num_group, batch['gt_groups']) + + # compute denoising training loss + dn_loss = super().forward(dn_bboxes, dn_scores, batch, postfix='_dn', match_indices=match_indices) + total_loss.update(dn_loss) + else: + total_loss.update({f'{k}_dn': torch.tensor(0., device=self.device) for k in total_loss.keys()}) + + return total_loss + + @staticmethod + def get_dn_match_indices(dn_pos_idx, dn_num_group, gt_groups): + """Get the match indices for denoising. + + Args: + dn_pos_idx (List[torch.Tensor]): A list includes positive indices of denoising. + dn_num_group (int): The number of groups of denoising. + gt_groups (List(int)): a list of batch size length includes the number of gts of each image. + + Returns: + dn_match_indices (List(tuple)): Matched indices. + + """ + dn_match_indices = [] + idx_groups = torch.as_tensor([0, *gt_groups[:-1]]).cumsum_(0) + for i, num_gt in enumerate(gt_groups): + if num_gt > 0: + gt_idx = torch.arange(end=num_gt, dtype=torch.long) + idx_groups[i] + gt_idx = gt_idx.repeat(dn_num_group) + assert len(dn_pos_idx[i]) == len(gt_idx), 'Expected the same length, ' + f'but got {len(dn_pos_idx[i])} and {len(gt_idx)} respectively.' + dn_match_indices.append((dn_pos_idx[i], gt_idx)) + else: + dn_match_indices.append((torch.zeros([0], dtype=torch.long), torch.zeros([0], dtype=torch.long))) + return dn_match_indices diff --git a/ultralytics/vit/utils/ops.py b/ultralytics/vit/utils/ops.py new file mode 100644 index 0000000..4b37931 --- /dev/null +++ b/ultralytics/vit/utils/ops.py @@ -0,0 +1,260 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch +import torch.nn as nn +import torch.nn.functional as F +from scipy.optimize import linear_sum_assignment + +from ultralytics.yolo.utils.metrics import bbox_iou +from ultralytics.yolo.utils.ops import xywh2xyxy, xyxy2xywh + + +class HungarianMatcher(nn.Module): + """ + A module implementing the HungarianMatcher, which is a differentiable module to solve the assignment problem in + an end-to-end fashion. + + HungarianMatcher performs optimal assignment over predicted and ground truth bounding boxes using a cost function + that considers classification scores, bounding box coordinates, and optionally, mask predictions. + + Attributes: + cost_gain (dict): Dictionary of cost coefficients for different components: 'class', 'bbox', 'giou', 'mask', and 'dice'. + use_fl (bool): Indicates whether to use Focal Loss for the classification cost calculation. + with_mask (bool): Indicates whether the model makes mask predictions. + num_sample_points (int): The number of sample points used in mask cost calculation. + alpha (float): The alpha factor in Focal Loss calculation. + gamma (float): The gamma factor in Focal Loss calculation. + + Methods: + forward(pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=None, gt_mask=None): Computes the assignment + between predictions and ground truths for a batch. + _cost_mask(bs, num_gts, masks=None, gt_mask=None): Computes the mask cost and dice cost if masks are predicted. + """ + + def __init__(self, cost_gain=None, use_fl=True, with_mask=False, num_sample_points=12544, alpha=0.25, gamma=2.0): + super().__init__() + if cost_gain is None: + cost_gain = {'class': 1, 'bbox': 5, 'giou': 2, 'mask': 1, 'dice': 1} + self.cost_gain = cost_gain + self.use_fl = use_fl + self.with_mask = with_mask + self.num_sample_points = num_sample_points + self.alpha = alpha + self.gamma = gamma + + def forward(self, pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=None, gt_mask=None): + """ + Forward pass for HungarianMatcher. This function computes costs based on prediction and ground truth + (classification cost, L1 cost between boxes and GIoU cost between boxes) and finds the optimal matching + between predictions and ground truth based on these costs. + + Args: + pred_bboxes (Tensor): Predicted bounding boxes with shape [batch_size, num_queries, 4]. + pred_scores (Tensor): Predicted scores with shape [batch_size, num_queries, num_classes]. + gt_cls (torch.Tensor): Ground truth classes with shape [num_gts, ]. + gt_bboxes (torch.Tensor): Ground truth bounding boxes with shape [num_gts, 4]. + gt_groups (List[int]): List of length equal to batch size, containing the number of ground truths for + each image. + masks (Tensor, optional): Predicted masks with shape [batch_size, num_queries, height, width]. + Defaults to None. + gt_mask (List[Tensor], optional): List of ground truth masks, each with shape [num_masks, Height, Width]. + Defaults to None. + + Returns: + (List[Tuple[Tensor, Tensor]]): A list of size batch_size, each element is a tuple (index_i, index_j), where: + - index_i is the tensor of indices of the selected predictions (in order) + - index_j is the tensor of indices of the corresponding selected ground truth targets (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + + bs, nq, nc = pred_scores.shape + + if sum(gt_groups) == 0: + return [(torch.tensor([], dtype=torch.long), torch.tensor([], dtype=torch.long)) for _ in range(bs)] + + # We flatten to compute the cost matrices in a batch + # [batch_size * num_queries, num_classes] + pred_scores = pred_scores.detach().view(-1, nc) + pred_scores = F.sigmoid(pred_scores) if self.use_fl else F.softmax(pred_scores, dim=-1) + # [batch_size * num_queries, 4] + pred_bboxes = pred_bboxes.detach().view(-1, 4) + + # Compute the classification cost + pred_scores = pred_scores[:, gt_cls] + if self.use_fl: + neg_cost_class = (1 - self.alpha) * (pred_scores ** self.gamma) * (-(1 - pred_scores + 1e-8).log()) + pos_cost_class = self.alpha * ((1 - pred_scores) ** self.gamma) * (-(pred_scores + 1e-8).log()) + cost_class = pos_cost_class - neg_cost_class + else: + cost_class = -pred_scores + + # Compute the L1 cost between boxes + cost_bbox = (pred_bboxes.unsqueeze(1) - gt_bboxes.unsqueeze(0)).abs().sum(-1) # (bs*num_queries, num_gt) + + # Compute the GIoU cost between boxes, (bs*num_queries, num_gt) + cost_giou = 1.0 - bbox_iou(pred_bboxes.unsqueeze(1), gt_bboxes.unsqueeze(0), xywh=True, GIoU=True).squeeze(-1) + + # Final cost matrix + C = self.cost_gain['class'] * cost_class + \ + self.cost_gain['bbox'] * cost_bbox + \ + self.cost_gain['giou'] * cost_giou + # Compute the mask cost and dice cost + if self.with_mask: + C += self._cost_mask(bs, gt_groups, masks, gt_mask) + + C = C.view(bs, nq, -1).cpu() + indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(gt_groups, -1))] + gt_groups = torch.as_tensor([0, *gt_groups[:-1]]).cumsum_(0) + # (idx for queries, idx for gt) + return [(torch.tensor(i, dtype=torch.long), torch.tensor(j, dtype=torch.long) + gt_groups[k]) + for k, (i, j) in enumerate(indices)] + + def _cost_mask(self, bs, num_gts, masks=None, gt_mask=None): + assert masks is not None and gt_mask is not None, 'Make sure the input has `mask` and `gt_mask`' + # all masks share the same set of points for efficient matching + sample_points = torch.rand([bs, 1, self.num_sample_points, 2]) + sample_points = 2.0 * sample_points - 1.0 + + out_mask = F.grid_sample(masks.detach(), sample_points, align_corners=False).squeeze(-2) + out_mask = out_mask.flatten(0, 1) + + tgt_mask = torch.cat(gt_mask).unsqueeze(1) + sample_points = torch.cat([a.repeat(b, 1, 1, 1) for a, b in zip(sample_points, num_gts) if b > 0]) + tgt_mask = F.grid_sample(tgt_mask, sample_points, align_corners=False).squeeze([1, 2]) + + with torch.cuda.amp.autocast(False): + # binary cross entropy cost + pos_cost_mask = F.binary_cross_entropy_with_logits(out_mask, torch.ones_like(out_mask), reduction='none') + neg_cost_mask = F.binary_cross_entropy_with_logits(out_mask, torch.zeros_like(out_mask), reduction='none') + cost_mask = torch.matmul(pos_cost_mask, tgt_mask.T) + torch.matmul(neg_cost_mask, 1 - tgt_mask.T) + cost_mask /= self.num_sample_points + + # dice cost + out_mask = F.sigmoid(out_mask) + numerator = 2 * torch.matmul(out_mask, tgt_mask.T) + denominator = out_mask.sum(-1, keepdim=True) + tgt_mask.sum(-1).unsqueeze(0) + cost_dice = 1 - (numerator + 1) / (denominator + 1) + + C = self.cost_gain['mask'] * cost_mask + self.cost_gain['dice'] * cost_dice + return C + + +def get_cdn_group(batch, + num_classes, + num_queries, + class_embed, + num_dn=100, + cls_noise_ratio=0.5, + box_noise_scale=1.0, + training=False): + """ + Get contrastive denoising training group. This function creates a contrastive denoising training group with + positive and negative samples from the ground truths (gt). It applies noise to the class labels and bounding + box coordinates, and returns the modified labels, bounding boxes, attention mask and meta information. + + Args: + batch (dict): A dict that includes 'gt_cls' (torch.Tensor with shape [num_gts, ]), 'gt_bboxes' + (torch.Tensor with shape [num_gts, 4]), 'gt_groups' (List(int)) which is a list of batch size length + indicating the number of gts of each image. + num_classes (int): Number of classes. + num_queries (int): Number of queries. + class_embed (torch.Tensor): Embedding weights to map class labels to embedding space. + num_dn (int, optional): Number of denoising. Defaults to 100. + cls_noise_ratio (float, optional): Noise ratio for class labels. Defaults to 0.5. + box_noise_scale (float, optional): Noise scale for bounding box coordinates. Defaults to 1.0. + training (bool, optional): If it's in training mode. Defaults to False. + + Returns: + (Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Dict]]): The modified class embeddings, + bounding boxes, attention mask and meta information for denoising. If not in training mode or 'num_dn' + is less than or equal to 0, the function returns None for all elements in the tuple. + """ + + if (not training) or num_dn <= 0: + return None, None, None, None + gt_groups = batch['gt_groups'] + total_num = sum(gt_groups) + max_nums = max(gt_groups) + if max_nums == 0: + return None, None, None, None + + num_group = num_dn // max_nums + num_group = 1 if num_group == 0 else num_group + # pad gt to max_num of a batch + bs = len(gt_groups) + gt_cls = batch['cls'] # (bs*num, ) + gt_bbox = batch['bboxes'] # bs*num, 4 + b_idx = batch['batch_idx'] + + # each group has positive and negative queries. + dn_cls = gt_cls.repeat(2 * num_group) # (2*num_group*bs*num, ) + dn_bbox = gt_bbox.repeat(2 * num_group, 1) # 2*num_group*bs*num, 4 + dn_b_idx = b_idx.repeat(2 * num_group).view(-1) # (2*num_group*bs*num, ) + + # positive and negative mask + # (bs*num*num_group, ), the second total_num*num_group part as negative samples + neg_idx = torch.arange(total_num * num_group, dtype=torch.long, device=gt_bbox.device) + num_group * total_num + + if cls_noise_ratio > 0: + # half of bbox prob + mask = torch.rand(dn_cls.shape) < (cls_noise_ratio * 0.5) + idx = torch.nonzero(mask).squeeze(-1) + # randomly put a new one here + new_label = torch.randint_like(idx, 0, num_classes, dtype=dn_cls.dtype, device=dn_cls.device) + dn_cls[idx] = new_label + + if box_noise_scale > 0: + known_bbox = xywh2xyxy(dn_bbox) + + diff = (dn_bbox[..., 2:] * 0.5).repeat(1, 2) * box_noise_scale # 2*num_group*bs*num, 4 + + rand_sign = torch.randint_like(dn_bbox, 0, 2) * 2.0 - 1.0 + rand_part = torch.rand_like(dn_bbox) + rand_part[neg_idx] += 1.0 + rand_part *= rand_sign + known_bbox += rand_part * diff + known_bbox.clip_(min=0.0, max=1.0) + dn_bbox = xyxy2xywh(known_bbox) + dn_bbox = inverse_sigmoid(dn_bbox) + + # total denoising queries + num_dn = int(max_nums * 2 * num_group) + # class_embed = torch.cat([class_embed, torch.zeros([1, class_embed.shape[-1]], device=class_embed.device)]) + dn_cls_embed = class_embed[dn_cls] # bs*num * 2 * num_group, 256 + padding_cls = torch.zeros(bs, num_dn, dn_cls_embed.shape[-1], device=gt_cls.device) + padding_bbox = torch.zeros(bs, num_dn, 4, device=gt_bbox.device) + + map_indices = torch.cat([torch.tensor(range(num), dtype=torch.long) for num in gt_groups]) + pos_idx = torch.stack([map_indices + max_nums * i for i in range(num_group)], dim=0) + + map_indices = torch.cat([map_indices + max_nums * i for i in range(2 * num_group)]) + padding_cls[(dn_b_idx, map_indices)] = dn_cls_embed + padding_bbox[(dn_b_idx, map_indices)] = dn_bbox + + tgt_size = num_dn + num_queries + attn_mask = torch.zeros([tgt_size, tgt_size], dtype=torch.bool) + # match query cannot see the reconstruct + attn_mask[num_dn:, :num_dn] = True + # reconstruct cannot see each other + for i in range(num_group): + if i == 0: + attn_mask[max_nums * 2 * i:max_nums * 2 * (i + 1), max_nums * 2 * (i + 1):num_dn] = True + if i == num_group - 1: + attn_mask[max_nums * 2 * i:max_nums * 2 * (i + 1), :max_nums * i * 2] = True + else: + attn_mask[max_nums * 2 * i:max_nums * 2 * (i + 1), max_nums * 2 * (i + 1):num_dn] = True + attn_mask[max_nums * 2 * i:max_nums * 2 * (i + 1), :max_nums * 2 * i] = True + dn_meta = { + 'dn_pos_idx': [p.reshape(-1) for p in pos_idx.cpu().split(list(gt_groups), dim=1)], + 'dn_num_group': num_group, + 'dn_num_split': [num_dn, num_queries]} + + return padding_cls.to(class_embed.device), padding_bbox.to(class_embed.device), attn_mask.to( + class_embed.device), dn_meta + + +def inverse_sigmoid(x, eps=1e-6): + """Inverse sigmoid function.""" + x = x.clip(min=0., max=1.) + return torch.log(x / (1 - x + eps) + eps) diff --git a/ultralytics/yolo/__init__.py b/ultralytics/yolo/__init__.py new file mode 100644 index 0000000..d1fa558 --- /dev/null +++ b/ultralytics/yolo/__init__.py @@ -0,0 +1,5 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from . import v8 + +__all__ = 'v8', # tuple or list diff --git a/ultralytics/yolo/cfg/__init__.py b/ultralytics/yolo/cfg/__init__.py new file mode 100644 index 0000000..71a9022 --- /dev/null +++ b/ultralytics/yolo/cfg/__init__.py @@ -0,0 +1,421 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import contextlib +import re +import shutil +import sys +from difflib import get_close_matches +from pathlib import Path +from types import SimpleNamespace +from typing import Dict, List, Union + +from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_PATH, LOGGER, ROOT, USER_CONFIG_DIR, + IterableSimpleNamespace, __version__, checks, colorstr, deprecation_warn, + get_settings, yaml_load, yaml_print) + +# Define valid tasks and modes +MODES = 'train', 'val', 'predict', 'export', 'track', 'benchmark' +TASKS = 'detect', 'segment', 'classify', 'pose' +TASK2DATA = {'detect': 'coco8.yaml', 'segment': 'coco8-seg.yaml', 'classify': 'imagenet100', 'pose': 'coco8-pose.yaml'} +TASK2MODEL = { + 'detect': 'yolov8n.pt', + 'segment': 'yolov8n-seg.pt', + 'classify': 'yolov8n-cls.pt', + 'pose': 'yolov8n-pose.pt'} +TASK2METRIC = { + 'detect': 'metrics/mAP50-95(B)', + 'segment': 'metrics/mAP50-95(M)', + 'classify': 'metrics/accuracy_top1', + 'pose': 'metrics/mAP50-95(P)'} + + +CLI_HELP_MSG = \ + f""" + Arguments received: {str(['yolo'] + sys.argv[1:])}. Ultralytics 'yolo' commands use the following syntax: + + yolo TASK MODE ARGS + + Where TASK (optional) is one of {TASKS} + MODE (required) is one of {MODES} + ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults. + See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg' + + 1. Train a detection model for 10 epochs with an initial learning_rate of 0.01 + yolo train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01 + + 2. Predict a YouTube video using a pretrained segmentation model at image size 320: + yolo predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320 + + 3. Val a pretrained detection model at batch-size 1 and image size 640: + yolo val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640 + + 4. Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) + yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 + + 5. Run special commands: + yolo help + yolo checks + yolo version + yolo settings + yolo copy-cfg + yolo cfg + + Docs: https://docs.ultralytics.com + Community: https://community.ultralytics.com + GitHub: https://github.com/ultralytics/ultralytics + """ + +# Define keys for arg type checks +CFG_FLOAT_KEYS = 'warmup_epochs', 'box', 'cls', 'dfl', 'degrees', 'shear' +CFG_FRACTION_KEYS = ('dropout', 'iou', 'lr0', 'lrf', 'momentum', 'weight_decay', 'warmup_momentum', 'warmup_bias_lr', + 'label_smoothing', 'hsv_h', 'hsv_s', 'hsv_v', 'translate', 'scale', 'perspective', 'flipud', + 'fliplr', 'mosaic', 'mixup', 'copy_paste', 'conf', 'iou', 'fraction') # fraction floats 0.0 - 1.0 +CFG_INT_KEYS = ('epochs', 'patience', 'batch', 'workers', 'seed', 'close_mosaic', 'mask_ratio', 'max_det', 'vid_stride', + 'line_width', 'workspace', 'nbs', 'save_period') +CFG_BOOL_KEYS = ('save', 'exist_ok', 'verbose', 'deterministic', 'single_cls', 'rect', 'cos_lr', 'overlap_mask', 'val', + 'save_json', 'save_hybrid', 'half', 'dnn', 'plots', 'show', 'save_txt', 'save_conf', 'save_crop', + 'show_labels', 'show_conf', 'visualize', 'augment', 'agnostic_nms', 'retina_masks', 'boxes', 'keras', + 'optimize', 'int8', 'dynamic', 'simplify', 'nms', 'v5loader', 'profile') + + +def cfg2dict(cfg): + """ + Convert a configuration object to a dictionary, whether it is a file path, a string, or a SimpleNamespace object. + + Args: + cfg (str | Path | SimpleNamespace): Configuration object to be converted to a dictionary. + + Returns: + cfg (dict): Configuration object in dictionary format. + """ + if isinstance(cfg, (str, Path)): + cfg = yaml_load(cfg) # load dict + elif isinstance(cfg, SimpleNamespace): + cfg = vars(cfg) # convert to dict + return cfg + + +def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, overrides: Dict = None): + """ + Load and merge configuration data from a file or dictionary. + + Args: + cfg (str | Path | Dict | SimpleNamespace): Configuration data. + overrides (str | Dict | optional): Overrides in the form of a file name or a dictionary. Default is None. + + Returns: + (SimpleNamespace): Training arguments namespace. + """ + cfg = cfg2dict(cfg) + + # Merge overrides + if overrides: + overrides = cfg2dict(overrides) + check_cfg_mismatch(cfg, overrides) + cfg = {**cfg, **overrides} # merge cfg and overrides dicts (prefer overrides) + + # Special handling for numeric project/name + for k in 'project', 'name': + if k in cfg and isinstance(cfg[k], (int, float)): + cfg[k] = str(cfg[k]) + if cfg.get('name') == 'model': # assign model to 'name' arg + cfg['name'] = cfg.get('model', '').split('.')[0] + LOGGER.warning(f"WARNING ⚠️ 'name=model' automatically updated to 'name={cfg['name']}'.") + + # Type and Value checks + for k, v in cfg.items(): + if v is not None: # None values may be from optional args + if k in CFG_FLOAT_KEYS and not isinstance(v, (int, float)): + raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. " + f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')") + elif k in CFG_FRACTION_KEYS: + if not isinstance(v, (int, float)): + raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. " + f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')") + if not (0.0 <= v <= 1.0): + raise ValueError(f"'{k}={v}' is an invalid value. " + f"Valid '{k}' values are between 0.0 and 1.0.") + elif k in CFG_INT_KEYS and not isinstance(v, int): + raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. " + f"'{k}' must be an int (i.e. '{k}=8')") + elif k in CFG_BOOL_KEYS and not isinstance(v, bool): + raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. " + f"'{k}' must be a bool (i.e. '{k}=True' or '{k}=False')") + + # Return instance + return IterableSimpleNamespace(**cfg) + + +def _handle_deprecation(custom): + """ + Hardcoded function to handle deprecated config keys + """ + + for key in custom.copy().keys(): + if key == 'hide_labels': + deprecation_warn(key, 'show_labels') + custom['show_labels'] = custom.pop('hide_labels') == 'False' + if key == 'hide_conf': + deprecation_warn(key, 'show_conf') + custom['show_conf'] = custom.pop('hide_conf') == 'False' + if key == 'line_thickness': + deprecation_warn(key, 'line_width') + custom['line_width'] = custom.pop('line_thickness') + + return custom + + +def check_cfg_mismatch(base: Dict, custom: Dict, e=None): + """ + This function checks for any mismatched keys between a custom configuration list and a base configuration list. + If any mismatched keys are found, the function prints out similar keys from the base list and exits the program. + + Args: + custom (dict): a dictionary of custom configuration options + base (dict): a dictionary of base configuration options + """ + custom = _handle_deprecation(custom) + base, custom = (set(x.keys()) for x in (base, custom)) + mismatched = [x for x in custom if x not in base] + if mismatched: + string = '' + for x in mismatched: + matches = get_close_matches(x, base) # key list + matches = [f'{k}={DEFAULT_CFG_DICT[k]}' if DEFAULT_CFG_DICT.get(k) is not None else k for k in matches] + match_str = f'Similar arguments are i.e. {matches}.' if matches else '' + string += f"'{colorstr('red', 'bold', x)}' is not a valid YOLO argument. {match_str}\n" + raise SyntaxError(string + CLI_HELP_MSG) from e + + +def merge_equals_args(args: List[str]) -> List[str]: + """ + Merges arguments around isolated '=' args in a list of strings. + The function considers cases where the first argument ends with '=' or the second starts with '=', + as well as when the middle one is an equals sign. + + Args: + args (List[str]): A list of strings where each element is an argument. + + Returns: + List[str]: A list of strings where the arguments around isolated '=' are merged. + """ + new_args = [] + for i, arg in enumerate(args): + if arg == '=' and 0 < i < len(args) - 1: # merge ['arg', '=', 'val'] + new_args[-1] += f'={args[i + 1]}' + del args[i + 1] + elif arg.endswith('=') and i < len(args) - 1 and '=' not in args[i + 1]: # merge ['arg=', 'val'] + new_args.append(f'{arg}{args[i + 1]}') + del args[i + 1] + elif arg.startswith('=') and i > 0: # merge ['arg', '=val'] + new_args[-1] += arg + else: + new_args.append(arg) + return new_args + + +def handle_yolo_hub(args: List[str]) -> None: + """ + Handle Ultralytics HUB command-line interface (CLI) commands. + + This function processes Ultralytics HUB CLI commands such as login and logout. + It should be called when executing a script with arguments related to HUB authentication. + + Args: + args (List[str]): A list of command line arguments + + Example: + python my_script.py hub login your_api_key + """ + from ultralytics import hub + + if args[0] == 'login': + key = args[1] if len(args) > 1 else '' + # Log in to Ultralytics HUB using the provided API key + hub.login(key) + elif args[0] == 'logout': + # Log out from Ultralytics HUB + hub.logout() + + +def handle_yolo_settings(args: List[str]) -> None: + """ + Handle YOLO settings command-line interface (CLI) commands. + + This function processes YOLO settings CLI commands such as reset. + It should be called when executing a script with arguments related to YOLO settings management. + + Args: + args (List[str]): A list of command line arguments for YOLO settings management. + + Example: + python my_script.py yolo settings reset + """ + path = USER_CONFIG_DIR / 'settings.yaml' # get SETTINGS YAML file path + if any(args) and args[0] == 'reset': + path.unlink() # delete the settings file + get_settings() # create new settings + LOGGER.info('Settings reset successfully') # inform the user that settings have been reset + yaml_print(path) # print the current settings + + +def entrypoint(debug=''): + """ + This function is the ultralytics package entrypoint, it's responsible for parsing the command line arguments passed + to the package. + + This function allows for: + - passing mandatory YOLO args as a list of strings + - specifying the task to be performed, either 'detect', 'segment' or 'classify' + - specifying the mode, either 'train', 'val', 'test', or 'predict' + - running special modes like 'checks' + - passing overrides to the package's configuration + + It uses the package's default cfg and initializes it using the passed overrides. + Then it calls the CLI function with the composed cfg + """ + args = (debug.split(' ') if debug else sys.argv)[1:] + if not args: # no arguments passed + LOGGER.info(CLI_HELP_MSG) + return + + special = { + 'help': lambda: LOGGER.info(CLI_HELP_MSG), + 'checks': checks.check_yolo, + 'version': lambda: LOGGER.info(__version__), + 'settings': lambda: handle_yolo_settings(args[1:]), + 'cfg': lambda: yaml_print(DEFAULT_CFG_PATH), + 'hub': lambda: handle_yolo_hub(args[1:]), + 'login': lambda: handle_yolo_hub(args), + 'copy-cfg': copy_default_cfg} + full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special} + + # Define common mis-uses of special commands, i.e. -h, -help, --help + special.update({k[0]: v for k, v in special.items()}) # singular + special.update({k[:-1]: v for k, v in special.items() if len(k) > 1 and k.endswith('s')}) # singular + special = {**special, **{f'-{k}': v for k, v in special.items()}, **{f'--{k}': v for k, v in special.items()}} + + overrides = {} # basic overrides, i.e. imgsz=320 + for a in merge_equals_args(args): # merge spaces around '=' sign + if a.startswith('--'): + LOGGER.warning(f"WARNING ⚠️ '{a}' does not require leading dashes '--', updating to '{a[2:]}'.") + a = a[2:] + if a.endswith(','): + LOGGER.warning(f"WARNING ⚠️ '{a}' does not require trailing comma ',', updating to '{a[:-1]}'.") + a = a[:-1] + if '=' in a: + try: + re.sub(r' *= *', '=', a) # remove spaces around equals sign + k, v = a.split('=', 1) # split on first '=' sign + assert v, f"missing '{k}' value" + if k == 'cfg': # custom.yaml passed + LOGGER.info(f'Overriding {DEFAULT_CFG_PATH} with {v}') + overrides = {k: val for k, val in yaml_load(checks.check_yaml(v)).items() if k != 'cfg'} + else: + if v.lower() == 'none': + v = None + elif v.lower() == 'true': + v = True + elif v.lower() == 'false': + v = False + else: + with contextlib.suppress(Exception): + v = eval(v) + overrides[k] = v + except (NameError, SyntaxError, ValueError, AssertionError) as e: + check_cfg_mismatch(full_args_dict, {a: ''}, e) + + elif a in TASKS: + overrides['task'] = a + elif a in MODES: + overrides['mode'] = a + elif a.lower() in special: + special[a.lower()]() + return + elif a in DEFAULT_CFG_DICT and isinstance(DEFAULT_CFG_DICT[a], bool): + overrides[a] = True # auto-True for default bool args, i.e. 'yolo show' sets show=True + elif a in DEFAULT_CFG_DICT: + raise SyntaxError(f"'{colorstr('red', 'bold', a)}' is a valid YOLO argument but is missing an '=' sign " + f"to set its value, i.e. try '{a}={DEFAULT_CFG_DICT[a]}'\n{CLI_HELP_MSG}") + else: + check_cfg_mismatch(full_args_dict, {a: ''}) + + # Check keys + check_cfg_mismatch(full_args_dict, overrides) + + # Mode + mode = overrides.get('mode', None) + if mode is None: + mode = DEFAULT_CFG.mode or 'predict' + LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {MODES}. Using default 'mode={mode}'.") + elif mode not in MODES: + if mode not in ('checks', checks): + raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}") + LOGGER.warning("WARNING ⚠️ 'yolo mode=checks' is deprecated. Use 'yolo checks' instead.") + checks.check_yolo() + return + + # Task + task = overrides.pop('task', None) + if task: + if task not in TASKS: + raise ValueError(f"Invalid 'task={task}'. Valid tasks are {TASKS}.\n{CLI_HELP_MSG}") + if 'model' not in overrides: + overrides['model'] = TASK2MODEL[task] + + # Model + model = overrides.pop('model', DEFAULT_CFG.model) + if model is None: + model = 'yolov8n.pt' + LOGGER.warning(f"WARNING ⚠️ 'model' is missing. Using default 'model={model}'.") + overrides['model'] = model + if 'rtdetr' in model.lower(): # guess architecture + from ultralytics import RTDETR + model = RTDETR(model) # no task argument + elif 'sam' in model.lower(): + from ultralytics import SAM + model = SAM(model) + else: + from ultralytics import YOLO + model = YOLO(model, task=task) + if isinstance(overrides.get('pretrained'), str): + model.load(overrides['pretrained']) + + # Task Update + if task != model.task: + if task: + LOGGER.warning(f"WARNING ⚠️ conflicting 'task={task}' passed with 'task={model.task}' model. " + f"Ignoring 'task={task}' and updating to 'task={model.task}' to match model.") + task = model.task + + # Mode + if mode in ('predict', 'track') and 'source' not in overrides: + overrides['source'] = DEFAULT_CFG.source or ROOT / 'assets' if (ROOT / 'assets').exists() \ + else 'https://ultralytics.com/images/bus.jpg' + LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using default 'source={overrides['source']}'.") + elif mode in ('train', 'val'): + if 'data' not in overrides: + overrides['data'] = TASK2DATA.get(task or DEFAULT_CFG.task, DEFAULT_CFG.data) + LOGGER.warning(f"WARNING ⚠️ 'data' is missing. Using default 'data={overrides['data']}'.") + elif mode == 'export': + if 'format' not in overrides: + overrides['format'] = DEFAULT_CFG.format or 'torchscript' + LOGGER.warning(f"WARNING ⚠️ 'format' is missing. Using default 'format={overrides['format']}'.") + + # Run command in python + # getattr(model, mode)(**vars(get_cfg(overrides=overrides))) # default args using default.yaml + getattr(model, mode)(**overrides) # default args from model + + +# Special modes -------------------------------------------------------------------------------------------------------- +def copy_default_cfg(): + """Copy and create a new default configuration file with '_copy' appended to its name.""" + new_file = Path.cwd() / DEFAULT_CFG_PATH.name.replace('.yaml', '_copy.yaml') + shutil.copy2(DEFAULT_CFG_PATH, new_file) + LOGGER.info(f'{DEFAULT_CFG_PATH} copied to {new_file}\n' + f"Example YOLO command with this new custom cfg:\n yolo cfg='{new_file}' imgsz=320 batch=8") + + +if __name__ == '__main__': + # Example Usage: entrypoint(debug='yolo predict model=yolov8n.pt') + entrypoint(debug='') diff --git a/ultralytics/yolo/cfg/default.yaml b/ultralytics/yolo/cfg/default.yaml new file mode 100644 index 0000000..11f508f --- /dev/null +++ b/ultralytics/yolo/cfg/default.yaml @@ -0,0 +1,117 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Default training settings and hyperparameters for medium-augmentation COCO training + +task: detect # (str) YOLO task, i.e. detect, segment, classify, pose +mode: train # (str) YOLO mode, i.e. train, val, predict, export, track, benchmark + +# Train settings ------------------------------------------------------------------------------------------------------- +model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml +data: # (str, optional) path to data file, i.e. coco128.yaml +epochs: 100 # (int) number of epochs to train for +patience: 50 # (int) epochs to wait for no observable improvement for early stopping of training +batch: 16 # (int) number of images per batch (-1 for AutoBatch) +imgsz: 640 # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes +save: True # (bool) save train checkpoints and predict results +save_period: -1 # (int) Save checkpoint every x epochs (disabled if < 1) +cache: False # (bool) True/ram, disk or False. Use cache for data loading +device: # (int | str | list, optional) device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu +workers: 8 # (int) number of worker threads for data loading (per RANK if DDP) +project: # (str, optional) project name +name: # (str, optional) experiment name, results saved to 'project/name' directory +exist_ok: False # (bool) whether to overwrite existing experiment +pretrained: True # (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str) +optimizer: auto # (str) optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto] +verbose: True # (bool) whether to print verbose output +seed: 0 # (int) random seed for reproducibility +deterministic: True # (bool) whether to enable deterministic mode +single_cls: False # (bool) train multi-class data as single-class +rect: False # (bool) rectangular training if mode='train' or rectangular validation if mode='val' +cos_lr: False # (bool) use cosine learning rate scheduler +close_mosaic: 10 # (int) disable mosaic augmentation for final epochs +resume: False # (bool) resume training from last checkpoint +amp: True # (bool) Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check +fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images in train set) +profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers +# Segmentation +overlap_mask: True # (bool) masks should overlap during training (segment train only) +mask_ratio: 4 # (int) mask downsample ratio (segment train only) +# Classification +dropout: 0.0 # (float) use dropout regularization (classify train only) + +# Val/Test settings ---------------------------------------------------------------------------------------------------- +val: True # (bool) validate/test during training +split: val # (str) dataset split to use for validation, i.e. 'val', 'test' or 'train' +save_json: False # (bool) save results to JSON file +save_hybrid: False # (bool) save hybrid version of labels (labels + additional predictions) +conf: # (float, optional) object confidence threshold for detection (default 0.25 predict, 0.001 val) +iou: 0.7 # (float) intersection over union (IoU) threshold for NMS +max_det: 300 # (int) maximum number of detections per image +half: True # (bool) use half precision (FP16) +dnn: False # (bool) use OpenCV DNN for ONNX inference +plots: True # (bool) save plots during train/val + +# Prediction settings -------------------------------------------------------------------------------------------------- +source: # (str, optional) source directory for images or videos +show: False # (bool) show results if possible +save_txt: False # (bool) save results as .txt file +save_conf: False # (bool) save results with confidence scores +save_crop: False # (bool) save cropped images with results +show_labels: True # (bool) show object labels in plots +show_conf: True # (bool) show object confidence scores in plots +vid_stride: 1 # (int) video frame-rate stride +line_width: # (int, optional) line width of the bounding boxes, auto if missing +visualize: False # (bool) visualize model features +augment: False # (bool) apply image augmentation to prediction sources +agnostic_nms: False # (bool) class-agnostic NMS +classes: # (int | list[int], optional) filter results by class, i.e. class=0, or class=[0,2,3] +retina_masks: False # (bool) use high-resolution segmentation masks +boxes: True # (bool) Show boxes in segmentation predictions + +# Export settings ------------------------------------------------------------------------------------------------------ +format: torchscript # (str) format to export to, choices at https://docs.ultralytics.com/modes/export/#export-formats +keras: False # (bool) use Kera=s +optimize: False # (bool) TorchScript: optimize for mobile +int8: False # (bool) CoreML/TF INT8 quantization +dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes +simplify: False # (bool) ONNX: simplify model +opset: # (int, optional) ONNX: opset version +workspace: 4 # (int) TensorRT: workspace size (GB) +nms: False # (bool) CoreML: add NMS + +# Hyperparameters ------------------------------------------------------------------------------------------------------ +lr0: 0.01 # (float) initial learning rate (i.e. SGD=1E-2, Adam=1E-3) +lrf: 0.01 # (float) final learning rate (lr0 * lrf) +momentum: 0.937 # (float) SGD momentum/Adam beta1 +weight_decay: 0.0005 # (float) optimizer weight decay 5e-4 +warmup_epochs: 3.0 # (float) warmup epochs (fractions ok) +warmup_momentum: 0.8 # (float) warmup initial momentum +warmup_bias_lr: 0.1 # (float) warmup initial bias lr +box: 7.5 # (float) box loss gain +cls: 0.5 # (float) cls loss gain (scale with pixels) +dfl: 1.5 # (float) dfl loss gain +pose: 1.0 # (float) pose loss gain +kobj: 1.0 # (float) keypoint obj loss gain +label_smoothing: 0.0 # (float) label smoothing (fraction) +nbs: 64 # (int) nominal batch size +hsv_h: 0.015 # (float) image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # (float) image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # (float) image HSV-Value augmentation (fraction) +degrees: 0.0 # (float) image rotation (+/- deg) +translate: 0.1 # (float) image translation (+/- fraction) +scale: 0.5 # (float) image scale (+/- gain) +shear: 0.0 # (float) image shear (+/- deg) +perspective: 0.0 # (float) image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # (float) image flip up-down (probability) +fliplr: 0.5 # (float) image flip left-right (probability) +mosaic: 1.0 # (float) image mosaic (probability) +mixup: 0.0 # (float) image mixup (probability) +copy_paste: 0.0 # (float) segment copy-paste (probability) + +# Custom config.yaml --------------------------------------------------------------------------------------------------- +cfg: # (str, optional) for overriding defaults.yaml + +# Debug, do not modify ------------------------------------------------------------------------------------------------- +v5loader: False # (bool) use legacy YOLOv5 dataloader (deprecated) + +# Tracker settings ------------------------------------------------------------------------------------------------------ +tracker: botsort.yaml # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml] diff --git a/ultralytics/yolo/data/__init__.py b/ultralytics/yolo/data/__init__.py new file mode 100644 index 0000000..f1d9dee --- /dev/null +++ b/ultralytics/yolo/data/__init__.py @@ -0,0 +1,9 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .base import BaseDataset +from .build import build_dataloader, build_yolo_dataset, load_inference_source +from .dataset import ClassificationDataset, SemanticDataset, YOLODataset +from .dataset_wrappers import MixAndRectDataset + +__all__ = ('BaseDataset', 'ClassificationDataset', 'MixAndRectDataset', 'SemanticDataset', 'YOLODataset', + 'build_yolo_dataset', 'build_dataloader', 'load_inference_source') diff --git a/ultralytics/yolo/data/annotator.py b/ultralytics/yolo/data/annotator.py new file mode 100644 index 0000000..f69f325 --- /dev/null +++ b/ultralytics/yolo/data/annotator.py @@ -0,0 +1,39 @@ +from pathlib import Path + +from ultralytics import SAM, YOLO + + +def auto_annotate(data, det_model='yolov8x.pt', sam_model='sam_b.pt', device='', output_dir=None): + """ + Automatically annotates images using a YOLO object detection model and a SAM segmentation model. + Args: + data (str): Path to a folder containing images to be annotated. + det_model (str, optional): Pre-trained YOLO detection model. Defaults to 'yolov8x.pt'. + sam_model (str, optional): Pre-trained SAM segmentation model. Defaults to 'sam_b.pt'. + device (str, optional): Device to run the models on. Defaults to an empty string (CPU or GPU, if available). + output_dir (str | None | optional): Directory to save the annotated results. + Defaults to a 'labels' folder in the same directory as 'data'. + """ + det_model = YOLO(det_model) + sam_model = SAM(sam_model) + + if not output_dir: + output_dir = Path(str(data)).parent / 'labels' + Path(output_dir).mkdir(exist_ok=True, parents=True) + + det_results = det_model(data, stream=True, device=device) + + for result in det_results: + boxes = result.boxes.xyxy # Boxes object for bbox outputs + class_ids = result.boxes.cls.int().tolist() # noqa + if len(class_ids): + sam_results = sam_model(result.orig_img, bboxes=boxes, verbose=False, save=False, device=device) + segments = sam_results[0].masks.xyn # noqa + + with open(str(Path(output_dir) / Path(result.path).stem) + '.txt', 'w') as f: + for i in range(len(segments)): + s = segments[i] + if len(s) == 0: + continue + segment = map(str, segments[i].reshape(-1).tolist()) + f.write(f'{class_ids[i]} ' + ' '.join(segment) + '\n') diff --git a/ultralytics/yolo/data/augment.py b/ultralytics/yolo/data/augment.py new file mode 100644 index 0000000..d688159 --- /dev/null +++ b/ultralytics/yolo/data/augment.py @@ -0,0 +1,905 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import math +import random +from copy import deepcopy + +import cv2 +import numpy as np +import torch +import torchvision.transforms as T + +from ..utils import LOGGER, colorstr +from ..utils.checks import check_version +from ..utils.instance import Instances +from ..utils.metrics import bbox_ioa +from ..utils.ops import segment2box +from .utils import polygons2masks, polygons2masks_overlap + +POSE_FLIPLR_INDEX = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] + + +# TODO: we might need a BaseTransform to make all these augments be compatible with both classification and semantic +class BaseTransform: + + def __init__(self) -> None: + pass + + def apply_image(self, labels): + """Applies image transformation to labels.""" + pass + + def apply_instances(self, labels): + """Applies transformations to input 'labels' and returns object instances.""" + pass + + def apply_semantic(self, labels): + """Applies semantic segmentation to an image.""" + pass + + def __call__(self, labels): + """Applies label transformations to an image, instances and semantic masks.""" + self.apply_image(labels) + self.apply_instances(labels) + self.apply_semantic(labels) + + +class Compose: + + def __init__(self, transforms): + """Initializes the Compose object with a list of transforms.""" + self.transforms = transforms + + def __call__(self, data): + """Applies a series of transformations to input data.""" + for t in self.transforms: + data = t(data) + return data + + def append(self, transform): + """Appends a new transform to the existing list of transforms.""" + self.transforms.append(transform) + + def tolist(self): + """Converts list of transforms to a standard Python list.""" + return self.transforms + + def __repr__(self): + """Return string representation of object.""" + format_string = f'{self.__class__.__name__}(' + for t in self.transforms: + format_string += '\n' + format_string += f' {t}' + format_string += '\n)' + return format_string + + +class BaseMixTransform: + """This implementation is from mmyolo.""" + + def __init__(self, dataset, pre_transform=None, p=0.0) -> None: + self.dataset = dataset + self.pre_transform = pre_transform + self.p = p + + def __call__(self, labels): + """Applies pre-processing transforms and mixup/mosaic transforms to labels data.""" + if random.uniform(0, 1) > self.p: + return labels + + # Get index of one or three other images + indexes = self.get_indexes() + if isinstance(indexes, int): + indexes = [indexes] + + # Get images information will be used for Mosaic or MixUp + mix_labels = [self.dataset.get_image_and_label(i) for i in indexes] + + if self.pre_transform is not None: + for i, data in enumerate(mix_labels): + mix_labels[i] = self.pre_transform(data) + labels['mix_labels'] = mix_labels + + # Mosaic or MixUp + labels = self._mix_transform(labels) + labels.pop('mix_labels', None) + return labels + + def _mix_transform(self, labels): + """Applies MixUp or Mosaic augmentation to the label dictionary.""" + raise NotImplementedError + + def get_indexes(self): + """Gets a list of shuffled indexes for mosaic augmentation.""" + raise NotImplementedError + + +class Mosaic(BaseMixTransform): + """ + Mosaic augmentation. + + This class performs mosaic augmentation by combining multiple (4 or 9) images into a single mosaic image. + The augmentation is applied to a dataset with a given probability. + + Attributes: + dataset: The dataset on which the mosaic augmentation is applied. + imgsz (int, optional): Image size (height and width) after mosaic pipeline of a single image. Default to 640. + p (float, optional): Probability of applying the mosaic augmentation. Must be in the range 0-1. Default to 1.0. + n (int, optional): The grid size, either 4 (for 2x2) or 9 (for 3x3). + """ + + def __init__(self, dataset, imgsz=640, p=1.0, n=4): + """Initializes the object with a dataset, image size, probability, and border.""" + assert 0 <= p <= 1.0, f'The probability should be in range [0, 1], but got {p}.' + assert n in (4, 9), 'grid must be equal to 4 or 9.' + super().__init__(dataset=dataset, p=p) + self.dataset = dataset + self.imgsz = imgsz + self.border = (-imgsz // 2, -imgsz // 2) # width, height + self.n = n + + def get_indexes(self, buffer=True): + """Return a list of random indexes from the dataset.""" + if buffer: # select images from buffer + return random.choices(list(self.dataset.buffer), k=self.n - 1) + else: # select any images + return [random.randint(0, len(self.dataset) - 1) for _ in range(self.n - 1)] + + def _mix_transform(self, labels): + """Apply mixup transformation to the input image and labels.""" + assert labels.get('rect_shape', None) is None, 'rect and mosaic are mutually exclusive.' + assert len(labels.get('mix_labels', [])), 'There are no other images for mosaic augment.' + return self._mosaic4(labels) if self.n == 4 else self._mosaic9(labels) + + def _mosaic4(self, labels): + """Create a 2x2 image mosaic.""" + mosaic_labels = [] + s = self.imgsz + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.border) # mosaic center x, y + for i in range(4): + labels_patch = labels if i == 0 else labels['mix_labels'][i - 1] + # Load image + img = labels_patch['img'] + h, w = labels_patch.pop('resized_shape') + + # Place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + labels_patch = self._update_labels(labels_patch, padw, padh) + mosaic_labels.append(labels_patch) + final_labels = self._cat_labels(mosaic_labels) + final_labels['img'] = img4 + return final_labels + + def _mosaic9(self, labels): + """Create a 3x3 image mosaic.""" + mosaic_labels = [] + s = self.imgsz + hp, wp = -1, -1 # height, width previous + for i in range(9): + labels_patch = labels if i == 0 else labels['mix_labels'][i - 1] + # Load image + img = labels_patch['img'] + h, w = labels_patch.pop('resized_shape') + + # Place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padw, padh = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Image + img9[y1:y2, x1:x2] = img[y1 - padh:, x1 - padw:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous for next iteration + + # Labels assuming imgsz*2 mosaic size + labels_patch = self._update_labels(labels_patch, padw + self.border[0], padh + self.border[1]) + mosaic_labels.append(labels_patch) + final_labels = self._cat_labels(mosaic_labels) + + final_labels['img'] = img9[-self.border[0]:self.border[0], -self.border[1]:self.border[1]] + return final_labels + + @staticmethod + def _update_labels(labels, padw, padh): + """Update labels.""" + nh, nw = labels['img'].shape[:2] + labels['instances'].convert_bbox(format='xyxy') + labels['instances'].denormalize(nw, nh) + labels['instances'].add_padding(padw, padh) + return labels + + def _cat_labels(self, mosaic_labels): + """Return labels with mosaic border instances clipped.""" + if len(mosaic_labels) == 0: + return {} + cls = [] + instances = [] + imgsz = self.imgsz * 2 # mosaic imgsz + for labels in mosaic_labels: + cls.append(labels['cls']) + instances.append(labels['instances']) + final_labels = { + 'im_file': mosaic_labels[0]['im_file'], + 'ori_shape': mosaic_labels[0]['ori_shape'], + 'resized_shape': (imgsz, imgsz), + 'cls': np.concatenate(cls, 0), + 'instances': Instances.concatenate(instances, axis=0), + 'mosaic_border': self.border} # final_labels + final_labels['instances'].clip(imgsz, imgsz) + good = final_labels['instances'].remove_zero_area_boxes() + final_labels['cls'] = final_labels['cls'][good] + return final_labels + + +class MixUp(BaseMixTransform): + + def __init__(self, dataset, pre_transform=None, p=0.0) -> None: + super().__init__(dataset=dataset, pre_transform=pre_transform, p=p) + + def get_indexes(self): + """Get a random index from the dataset.""" + return random.randint(0, len(self.dataset) - 1) + + def _mix_transform(self, labels): + """Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf.""" + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + labels2 = labels['mix_labels'][0] + labels['img'] = (labels['img'] * r + labels2['img'] * (1 - r)).astype(np.uint8) + labels['instances'] = Instances.concatenate([labels['instances'], labels2['instances']], axis=0) + labels['cls'] = np.concatenate([labels['cls'], labels2['cls']], 0) + return labels + + +class RandomPerspective: + + def __init__(self, + degrees=0.0, + translate=0.1, + scale=0.5, + shear=0.0, + perspective=0.0, + border=(0, 0), + pre_transform=None): + self.degrees = degrees + self.translate = translate + self.scale = scale + self.shear = shear + self.perspective = perspective + # Mosaic border + self.border = border + self.pre_transform = pre_transform + + def affine_transform(self, img, border): + """Center.""" + C = np.eye(3, dtype=np.float32) + + C[0, 2] = -img.shape[1] / 2 # x translation (pixels) + C[1, 2] = -img.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3, dtype=np.float32) + P[2, 0] = random.uniform(-self.perspective, self.perspective) # x perspective (about y) + P[2, 1] = random.uniform(-self.perspective, self.perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3, dtype=np.float32) + a = random.uniform(-self.degrees, self.degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - self.scale, 1 + self.scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3, dtype=np.float32) + S[0, 1] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3, dtype=np.float32) + T[0, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[0] # x translation (pixels) + T[1, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[1] # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + # Affine image + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if self.perspective: + img = cv2.warpPerspective(img, M, dsize=self.size, borderValue=(114, 114, 114)) + else: # affine + img = cv2.warpAffine(img, M[:2], dsize=self.size, borderValue=(114, 114, 114)) + return img, M, s + + def apply_bboxes(self, bboxes, M): + """ + Apply affine to bboxes only. + + Args: + bboxes (ndarray): list of bboxes, xyxy format, with shape (num_bboxes, 4). + M (ndarray): affine matrix. + + Returns: + new_bboxes (ndarray): bboxes after affine, [num_bboxes, 4]. + """ + n = len(bboxes) + if n == 0: + return bboxes + + xy = np.ones((n * 4, 3), dtype=bboxes.dtype) + xy[:, :2] = bboxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if self.perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # Create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + return np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1)), dtype=bboxes.dtype).reshape(4, n).T + + def apply_segments(self, segments, M): + """ + Apply affine to segments and generate new bboxes from segments. + + Args: + segments (ndarray): list of segments, [num_samples, 500, 2]. + M (ndarray): affine matrix. + + Returns: + new_segments (ndarray): list of segments after affine, [num_samples, 500, 2]. + new_bboxes (ndarray): bboxes after affine, [N, 4]. + """ + n, num = segments.shape[:2] + if n == 0: + return [], segments + + xy = np.ones((n * num, 3), dtype=segments.dtype) + segments = segments.reshape(-1, 2) + xy[:, :2] = segments + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] + segments = xy.reshape(n, -1, 2) + bboxes = np.stack([segment2box(xy, self.size[0], self.size[1]) for xy in segments], 0) + return bboxes, segments + + def apply_keypoints(self, keypoints, M): + """ + Apply affine to keypoints. + + Args: + keypoints (ndarray): keypoints, [N, 17, 3]. + M (ndarray): affine matrix. + + Return: + new_keypoints (ndarray): keypoints after affine, [N, 17, 3]. + """ + n, nkpt = keypoints.shape[:2] + if n == 0: + return keypoints + xy = np.ones((n * nkpt, 3), dtype=keypoints.dtype) + visible = keypoints[..., 2].reshape(n * nkpt, 1) + xy[:, :2] = keypoints[..., :2].reshape(n * nkpt, 2) + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] # perspective rescale or affine + out_mask = (xy[:, 0] < 0) | (xy[:, 1] < 0) | (xy[:, 0] > self.size[0]) | (xy[:, 1] > self.size[1]) + visible[out_mask] = 0 + return np.concatenate([xy, visible], axis=-1).reshape(n, nkpt, 3) + + def __call__(self, labels): + """ + Affine images and targets. + + Args: + labels (dict): a dict of `bboxes`, `segments`, `keypoints`. + """ + if self.pre_transform and 'mosaic_border' not in labels: + labels = self.pre_transform(labels) + labels.pop('ratio_pad', None) # do not need ratio pad + + img = labels['img'] + cls = labels['cls'] + instances = labels.pop('instances') + # Make sure the coord formats are right + instances.convert_bbox(format='xyxy') + instances.denormalize(*img.shape[:2][::-1]) + + border = labels.pop('mosaic_border', self.border) + self.size = img.shape[1] + border[1] * 2, img.shape[0] + border[0] * 2 # w, h + # M is affine matrix + # scale for func:`box_candidates` + img, M, scale = self.affine_transform(img, border) + + bboxes = self.apply_bboxes(instances.bboxes, M) + + segments = instances.segments + keypoints = instances.keypoints + # Update bboxes if there are segments. + if len(segments): + bboxes, segments = self.apply_segments(segments, M) + + if keypoints is not None: + keypoints = self.apply_keypoints(keypoints, M) + new_instances = Instances(bboxes, segments, keypoints, bbox_format='xyxy', normalized=False) + # Clip + new_instances.clip(*self.size) + + # Filter instances + instances.scale(scale_w=scale, scale_h=scale, bbox_only=True) + # Make the bboxes have the same scale with new_bboxes + i = self.box_candidates(box1=instances.bboxes.T, + box2=new_instances.bboxes.T, + area_thr=0.01 if len(segments) else 0.10) + labels['instances'] = new_instances[i] + labels['cls'] = cls[i] + labels['img'] = img + labels['resized_shape'] = img.shape[:2] + return labels + + def box_candidates(self, box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute box candidates: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +class RandomHSV: + + def __init__(self, hgain=0.5, sgain=0.5, vgain=0.5) -> None: + self.hgain = hgain + self.sgain = sgain + self.vgain = vgain + + def __call__(self, labels): + """Applies random horizontal or vertical flip to an image with a given probability.""" + img = labels['img'] + if self.hgain or self.sgain or self.vgain: + r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + return labels + + +class RandomFlip: + + def __init__(self, p=0.5, direction='horizontal', flip_idx=None) -> None: + assert direction in ['horizontal', 'vertical'], f'Support direction `horizontal` or `vertical`, got {direction}' + assert 0 <= p <= 1.0 + + self.p = p + self.direction = direction + self.flip_idx = flip_idx + + def __call__(self, labels): + """Resize image and padding for detection, instance segmentation, pose.""" + img = labels['img'] + instances = labels.pop('instances') + instances.convert_bbox(format='xywh') + h, w = img.shape[:2] + h = 1 if instances.normalized else h + w = 1 if instances.normalized else w + + # Flip up-down + if self.direction == 'vertical' and random.random() < self.p: + img = np.flipud(img) + instances.flipud(h) + if self.direction == 'horizontal' and random.random() < self.p: + img = np.fliplr(img) + instances.fliplr(w) + # For keypoints + if self.flip_idx is not None and instances.keypoints is not None: + instances.keypoints = np.ascontiguousarray(instances.keypoints[:, self.flip_idx, :]) + labels['img'] = np.ascontiguousarray(img) + labels['instances'] = instances + return labels + + +class LetterBox: + """Resize image and padding for detection, instance segmentation, pose.""" + + def __init__(self, new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, center=True, stride=32): + """Initialize LetterBox object with specific parameters.""" + self.new_shape = new_shape + self.auto = auto + self.scaleFill = scaleFill + self.scaleup = scaleup + self.stride = stride + self.center = center # Put the image in the middle or top-left + + def __call__(self, labels=None, image=None): + """Return updated labels and image with added border.""" + if labels is None: + labels = {} + img = labels.get('img') if image is None else image + shape = img.shape[:2] # current shape [height, width] + new_shape = labels.pop('rect_shape', self.new_shape) + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not self.scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if self.auto: # minimum rectangle + dw, dh = np.mod(dw, self.stride), np.mod(dh, self.stride) # wh padding + elif self.scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + if self.center: + dw /= 2 # divide padding into 2 sides + dh /= 2 + if labels.get('ratio_pad'): + labels['ratio_pad'] = (labels['ratio_pad'], (dw, dh)) # for evaluation + + if shape[::-1] != new_unpad: # resize + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)) if self.center else 0, int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)) if self.center else 0, int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, + value=(114, 114, 114)) # add border + + if len(labels): + labels = self._update_labels(labels, ratio, dw, dh) + labels['img'] = img + labels['resized_shape'] = new_shape + return labels + else: + return img + + def _update_labels(self, labels, ratio, padw, padh): + """Update labels.""" + labels['instances'].convert_bbox(format='xyxy') + labels['instances'].denormalize(*labels['img'].shape[:2][::-1]) + labels['instances'].scale(*ratio) + labels['instances'].add_padding(padw, padh) + return labels + + +class CopyPaste: + + def __init__(self, p=0.5) -> None: + self.p = p + + def __call__(self, labels): + """Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy).""" + im = labels['img'] + cls = labels['cls'] + h, w = im.shape[:2] + instances = labels.pop('instances') + instances.convert_bbox(format='xyxy') + instances.denormalize(w, h) + if self.p and len(instances.segments): + n = len(instances) + _, w, _ = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + + # Calculate ioa first then select indexes randomly + ins_flip = deepcopy(instances) + ins_flip.fliplr(w) + + ioa = bbox_ioa(ins_flip.bboxes, instances.bboxes) # intersection over area, (N, M) + indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, ) + n = len(indexes) + for j in random.sample(list(indexes), k=round(self.p * n)): + cls = np.concatenate((cls, cls[[j]]), axis=0) + instances = Instances.concatenate((instances, ins_flip[[j]]), axis=0) + cv2.drawContours(im_new, instances.segments[[j]].astype(np.int32), -1, (1, 1, 1), cv2.FILLED) + + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + labels['img'] = im + labels['cls'] = cls + labels['instances'] = instances + return labels + + +class Albumentations: + """YOLOv8 Albumentations class (optional, only used if package is installed)""" + + def __init__(self, p=1.0): + """Initialize the transform object for YOLO bbox formatted params.""" + self.p = p + self.transform = None + prefix = colorstr('albumentations: ') + try: + import albumentations as A + + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + T = [ + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + def __call__(self, labels): + """Generates object detections and returns a dictionary with detection results.""" + im = labels['img'] + cls = labels['cls'] + if len(cls): + labels['instances'].convert_bbox('xywh') + labels['instances'].normalize(*im.shape[:2][::-1]) + bboxes = labels['instances'].bboxes + # TODO: add supports of segments and keypoints + if self.transform and random.random() < self.p: + new = self.transform(image=im, bboxes=bboxes, class_labels=cls) # transformed + if len(new['class_labels']) > 0: # skip update if no bbox in new im + labels['img'] = new['image'] + labels['cls'] = np.array(new['class_labels']) + bboxes = np.array(new['bboxes'], dtype=np.float32) + labels['instances'].update(bboxes=bboxes) + return labels + + +# TODO: technically this is not an augmentation, maybe we should put this to another files +class Format: + + def __init__(self, + bbox_format='xywh', + normalize=True, + return_mask=False, + return_keypoint=False, + mask_ratio=4, + mask_overlap=True, + batch_idx=True): + self.bbox_format = bbox_format + self.normalize = normalize + self.return_mask = return_mask # set False when training detection only + self.return_keypoint = return_keypoint + self.mask_ratio = mask_ratio + self.mask_overlap = mask_overlap + self.batch_idx = batch_idx # keep the batch indexes + + def __call__(self, labels): + """Return formatted image, classes, bounding boxes & keypoints to be used by 'collate_fn'.""" + img = labels.pop('img') + h, w = img.shape[:2] + cls = labels.pop('cls') + instances = labels.pop('instances') + instances.convert_bbox(format=self.bbox_format) + instances.denormalize(w, h) + nl = len(instances) + + if self.return_mask: + if nl: + masks, instances, cls = self._format_segments(instances, cls, w, h) + masks = torch.from_numpy(masks) + else: + masks = torch.zeros(1 if self.mask_overlap else nl, img.shape[0] // self.mask_ratio, + img.shape[1] // self.mask_ratio) + labels['masks'] = masks + if self.normalize: + instances.normalize(w, h) + labels['img'] = self._format_img(img) + labels['cls'] = torch.from_numpy(cls) if nl else torch.zeros(nl) + labels['bboxes'] = torch.from_numpy(instances.bboxes) if nl else torch.zeros((nl, 4)) + if self.return_keypoint: + labels['keypoints'] = torch.from_numpy(instances.keypoints) + # Then we can use collate_fn + if self.batch_idx: + labels['batch_idx'] = torch.zeros(nl) + return labels + + def _format_img(self, img): + """Format the image for YOLOv5 from Numpy array to PyTorch tensor.""" + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + img = np.ascontiguousarray(img.transpose(2, 0, 1)[::-1]) + img = torch.from_numpy(img) + return img + + def _format_segments(self, instances, cls, w, h): + """convert polygon points to bitmap.""" + segments = instances.segments + if self.mask_overlap: + masks, sorted_idx = polygons2masks_overlap((h, w), segments, downsample_ratio=self.mask_ratio) + masks = masks[None] # (640, 640) -> (1, 640, 640) + instances = instances[sorted_idx] + cls = cls[sorted_idx] + else: + masks = polygons2masks((h, w), segments, color=1, downsample_ratio=self.mask_ratio) + + return masks, instances, cls + + +def v8_transforms(dataset, imgsz, hyp, stretch=False): + """Convert images to a size suitable for YOLOv8 training.""" + pre_transform = Compose([ + Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic), + CopyPaste(p=hyp.copy_paste), + RandomPerspective( + degrees=hyp.degrees, + translate=hyp.translate, + scale=hyp.scale, + shear=hyp.shear, + perspective=hyp.perspective, + pre_transform=None if stretch else LetterBox(new_shape=(imgsz, imgsz)), + )]) + flip_idx = dataset.data.get('flip_idx', []) # for keypoints augmentation + if dataset.use_keypoints: + kpt_shape = dataset.data.get('kpt_shape', None) + if len(flip_idx) == 0 and hyp.fliplr > 0.0: + hyp.fliplr = 0.0 + LOGGER.warning("WARNING ⚠️ No 'flip_idx' array defined in data.yaml, setting augmentation 'fliplr=0.0'") + elif flip_idx and (len(flip_idx) != kpt_shape[0]): + raise ValueError(f'data.yaml flip_idx={flip_idx} length must be equal to kpt_shape[0]={kpt_shape[0]}') + + return Compose([ + pre_transform, + MixUp(dataset, pre_transform=pre_transform, p=hyp.mixup), + Albumentations(p=1.0), + RandomHSV(hgain=hyp.hsv_h, sgain=hyp.hsv_s, vgain=hyp.hsv_v), + RandomFlip(direction='vertical', p=hyp.flipud), + RandomFlip(direction='horizontal', p=hyp.fliplr, flip_idx=flip_idx)]) # transforms + + +# Classification augmentations ----------------------------------------------------------------------------------------- +def classify_transforms(size=224, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)): # IMAGENET_MEAN, IMAGENET_STD + # Transforms to apply if albumentations not installed + if not isinstance(size, int): + raise TypeError(f'classify_transforms() size {size} must be integer, not (list, tuple)') + if any(mean) or any(std): + return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(mean, std, inplace=True)]) + else: + return T.Compose([CenterCrop(size), ToTensor()]) + + +def hsv2colorjitter(h, s, v): + """Map HSV (hue, saturation, value) jitter into ColorJitter values (brightness, contrast, saturation, hue)""" + return v, v, s, h + + +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + hflip=0.5, + vflip=0.0, + hsv_h=0.015, # image HSV-Hue augmentation (fraction) + hsv_s=0.7, # image HSV-Saturation augmentation (fraction) + hsv_v=0.4, # image HSV-Value augmentation (fraction) + mean=(0.0, 0.0, 0.0), # IMAGENET_MEAN + std=(1.0, 1.0, 1.0), # IMAGENET_STD + auto_aug=False, +): + """YOLOv8 classification Albumentations (optional, only used if package is installed).""" + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentations + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if any((hsv_h, hsv_s, hsv_v)): + T += [A.ColorJitter(*hsv2colorjitter(hsv_h, hsv_s, hsv_v))] # brightness, contrast, saturation, hue + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +class ClassifyLetterBox: + """YOLOv8 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])""" + + def __init__(self, size=(640, 640), auto=False, stride=32): + """Resizes image and crops it to center with max dimensions 'h' and 'w'.""" + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + self.auto = auto # pass max size integer, automatically solve for short side using stride + self.stride = stride # used with auto + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + r = min(self.h / imh, self.w / imw) # ratio of new/old + h, w = round(imh * r), round(imw * r) # resized image + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) + im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) + im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + return im_out + + +class CenterCrop: + """YOLOv8 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])""" + + def __init__(self, size=640): + """Converts an image from numpy array to PyTorch tensor.""" + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + m = min(imh, imw) # min dimension + top, left = (imh - m) // 2, (imw - m) // 2 + return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + + +class ToTensor: + """YOLOv8 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]).""" + + def __init__(self, half=False): + """Initialize YOLOv8 ToTensor object with optional half-precision support.""" + super().__init__() + self.half = half + + def __call__(self, im): # im = np.array HWC in BGR order + im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous + im = torch.from_numpy(im) # to torch + im = im.half() if self.half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0-255 to 0.0-1.0 + return im diff --git a/ultralytics/yolo/data/base.py b/ultralytics/yolo/data/base.py new file mode 100644 index 0000000..d2e9793 --- /dev/null +++ b/ultralytics/yolo/data/base.py @@ -0,0 +1,286 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import glob +import math +import os +import random +from copy import deepcopy +from multiprocessing.pool import ThreadPool +from pathlib import Path +from typing import Optional + +import cv2 +import numpy as np +import psutil +from torch.utils.data import Dataset +from tqdm import tqdm + +from ..utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT +from .utils import HELP_URL, IMG_FORMATS + + +class BaseDataset(Dataset): + """ + Base dataset class for loading and processing image data. + + Args: + img_path (str): Path to the folder containing images. + imgsz (int, optional): Image size. Defaults to 640. + cache (bool, optional): Cache images to RAM or disk during training. Defaults to False. + augment (bool, optional): If True, data augmentation is applied. Defaults to True. + hyp (dict, optional): Hyperparameters to apply data augmentation. Defaults to None. + prefix (str, optional): Prefix to print in log messages. Defaults to ''. + rect (bool, optional): If True, rectangular training is used. Defaults to False. + batch_size (int, optional): Size of batches. Defaults to None. + stride (int, optional): Stride. Defaults to 32. + pad (float, optional): Padding. Defaults to 0.0. + single_cls (bool, optional): If True, single class training is used. Defaults to False. + classes (list): List of included classes. Default is None. + fraction (float): Fraction of dataset to utilize. Default is 1.0 (use all data). + + Attributes: + im_files (list): List of image file paths. + labels (list): List of label data dictionaries. + ni (int): Number of images in the dataset. + ims (list): List of loaded images. + npy_files (list): List of numpy file paths. + transforms (callable): Image transformation function. + """ + + def __init__(self, + img_path, + imgsz=640, + cache=False, + augment=True, + hyp=DEFAULT_CFG, + prefix='', + rect=False, + batch_size=16, + stride=32, + pad=0.5, + single_cls=False, + classes=None, + fraction=1.0): + super().__init__() + self.img_path = img_path + self.imgsz = imgsz + self.augment = augment + self.single_cls = single_cls + self.prefix = prefix + self.fraction = fraction + self.im_files = self.get_img_files(self.img_path) + self.labels = self.get_labels() + self.update_labels(include_class=classes) # single_cls and include_class + self.ni = len(self.labels) # number of images + self.rect = rect + self.batch_size = batch_size + self.stride = stride + self.pad = pad + if self.rect: + assert self.batch_size is not None + self.set_rectangle() + + # Buffer thread for mosaic images + self.buffer = [] # buffer size = batch size + self.max_buffer_length = min((self.ni, self.batch_size * 8, 1000)) if self.augment else 0 + + # Cache stuff + if cache == 'ram' and not self.check_cache_ram(): + cache = False + self.ims, self.im_hw0, self.im_hw = [None] * self.ni, [None] * self.ni, [None] * self.ni + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + if cache: + self.cache_images(cache) + + # Transforms + self.transforms = self.build_transforms(hyp=hyp) + + def get_img_files(self, img_path): + """Read image files.""" + try: + f = [] # image files + for p in img_path if isinstance(img_path, list) else [img_path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # F = list(p.rglob('*.*')) # pathlib + elif p.is_file(): # file + with open(p) as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # F += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + else: + raise FileNotFoundError(f'{self.prefix}{p} does not exist') + im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib + assert im_files, f'{self.prefix}No images found' + except Exception as e: + raise FileNotFoundError(f'{self.prefix}Error loading data from {img_path}\n{HELP_URL}') from e + if self.fraction < 1: + im_files = im_files[:round(len(im_files) * self.fraction)] + return im_files + + def update_labels(self, include_class: Optional[list]): + """include_class, filter labels to include only these classes (optional).""" + include_class_array = np.array(include_class).reshape(1, -1) + for i in range(len(self.labels)): + if include_class is not None: + cls = self.labels[i]['cls'] + bboxes = self.labels[i]['bboxes'] + segments = self.labels[i]['segments'] + keypoints = self.labels[i]['keypoints'] + j = (cls == include_class_array).any(1) + self.labels[i]['cls'] = cls[j] + self.labels[i]['bboxes'] = bboxes[j] + if segments: + self.labels[i]['segments'] = [segments[si] for si, idx in enumerate(j) if idx] + if keypoints is not None: + self.labels[i]['keypoints'] = keypoints[j] + if self.single_cls: + self.labels[i]['cls'][:, 0] = 0 + + def load_image(self, i): + """Loads 1 image from dataset index 'i', returns (im, resized hw).""" + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + if im is None: + raise FileNotFoundError(f'Image Not Found {f}') + h0, w0 = im.shape[:2] # orig hw + r = self.imgsz / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA + im = cv2.resize(im, (min(math.ceil(w0 * r), self.imgsz), min(math.ceil(h0 * r), self.imgsz)), + interpolation=interp) + + # Add to buffer if training with augmentations + if self.augment: + self.ims[i], self.im_hw0[i], self.im_hw[i] = im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + self.buffer.append(i) + if len(self.buffer) >= self.max_buffer_length: + j = self.buffer.pop(0) + self.ims[j], self.im_hw0[j], self.im_hw[j] = None, None, None + + return im, (h0, w0), im.shape[:2] + + return self.ims[i], self.im_hw0[i], self.im_hw[i] + + def cache_images(self, cache): + """Cache images to memory or disk.""" + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + fcn = self.cache_images_to_disk if cache == 'disk' else self.load_image + with ThreadPool(NUM_THREADS) as pool: + results = pool.imap(fcn, range(self.ni)) + pbar = tqdm(enumerate(results), total=self.ni, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) + for i, x in pbar: + if cache == 'disk': + b += self.npy_files[i].stat().st_size + else: # 'ram' + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + b += self.ims[i].nbytes + pbar.desc = f'{self.prefix}Caching images ({b / gb:.1f}GB {cache})' + pbar.close() + + def cache_images_to_disk(self, i): + """Saves an image as an *.npy file for faster loading.""" + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) + + def check_cache_ram(self, safety_margin=0.5): + """Check image caching requirements vs available memory.""" + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + n = min(self.ni, 30) # extrapolate from 30 random images + for _ in range(n): + im = cv2.imread(random.choice(self.im_files)) # sample image + ratio = self.imgsz / max(im.shape[0], im.shape[1]) # max(h, w) # ratio + b += im.nbytes * ratio ** 2 + mem_required = b * self.ni / n * (1 + safety_margin) # GB required to cache dataset into RAM + mem = psutil.virtual_memory() + cache = mem_required < mem.available # to cache or not to cache, that is the question + if not cache: + LOGGER.info(f'{self.prefix}{mem_required / gb:.1f}GB RAM required to cache images ' + f'with {int(safety_margin * 100)}% safety margin but only ' + f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' + f"{'caching images ✅' if cache else 'not caching images ⚠️'}") + return cache + + def set_rectangle(self): + """Sets the shape of bounding boxes for YOLO detections as rectangles.""" + bi = np.floor(np.arange(self.ni) / self.batch_size).astype(int) # batch index + nb = bi[-1] + 1 # number of batches + + s = np.array([x.pop('shape') for x in self.labels]) # hw + ar = s[:, 0] / s[:, 1] # aspect ratio + irect = ar.argsort() + self.im_files = [self.im_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * self.imgsz / self.stride + self.pad).astype(int) * self.stride + self.batch = bi # batch index of image + + def __getitem__(self, index): + """Returns transformed label information for given index.""" + return self.transforms(self.get_image_and_label(index)) + + def get_image_and_label(self, index): + """Get and return label information from the dataset.""" + label = deepcopy(self.labels[index]) # requires deepcopy() https://github.com/ultralytics/ultralytics/pull/1948 + label.pop('shape', None) # shape is for rect, remove it + label['img'], label['ori_shape'], label['resized_shape'] = self.load_image(index) + label['ratio_pad'] = (label['resized_shape'][0] / label['ori_shape'][0], + label['resized_shape'][1] / label['ori_shape'][1]) # for evaluation + if self.rect: + label['rect_shape'] = self.batch_shapes[self.batch[index]] + return self.update_labels_info(label) + + def __len__(self): + """Returns the length of the labels list for the dataset.""" + return len(self.labels) + + def update_labels_info(self, label): + """custom your label format here.""" + return label + + def build_transforms(self, hyp=None): + """Users can custom augmentations here + like: + if self.augment: + # Training transforms + return Compose([]) + else: + # Val transforms + return Compose([]) + """ + raise NotImplementedError + + def get_labels(self): + """Users can custom their own format here. + Make sure your output is a list with each element like below: + dict( + im_file=im_file, + shape=shape, # format: (height, width) + cls=cls, + bboxes=bboxes, # xywh + segments=segments, # xy + keypoints=keypoints, # xy + normalized=True, # or False + bbox_format="xyxy", # or xywh, ltwh + ) + """ + raise NotImplementedError diff --git a/ultralytics/yolo/data/build.py b/ultralytics/yolo/data/build.py new file mode 100644 index 0000000..5499c76 --- /dev/null +++ b/ultralytics/yolo/data/build.py @@ -0,0 +1,170 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import os +import random +from pathlib import Path + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import dataloader, distributed + +from ultralytics.yolo.data.dataloaders.stream_loaders import (LOADERS, LoadImages, LoadPilAndNumpy, LoadScreenshots, + LoadStreams, LoadTensor, SourceTypes, autocast_list) +from ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS +from ultralytics.yolo.utils.checks import check_file + +from ..utils import RANK, colorstr +from .dataset import YOLODataset +from .utils import PIN_MEMORY + + +class InfiniteDataLoader(dataloader.DataLoader): + """Dataloader that reuses workers. Uses same syntax as vanilla DataLoader.""" + + def __init__(self, *args, **kwargs): + """Dataloader that infinitely recycles workers, inherits from DataLoader.""" + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + """Returns the length of the batch sampler's sampler.""" + return len(self.batch_sampler.sampler) + + def __iter__(self): + """Creates a sampler that repeats indefinitely.""" + for _ in range(len(self)): + yield next(self.iterator) + + def reset(self): + """Reset iterator. + This is useful when we want to modify settings of dataset while training. + """ + self.iterator = self._get_iterator() + + +class _RepeatSampler: + """ + Sampler that repeats forever. + + Args: + sampler (Dataset.sampler): The sampler to repeat. + """ + + def __init__(self, sampler): + """Initializes an object that repeats a given sampler indefinitely.""" + self.sampler = sampler + + def __iter__(self): + """Iterates over the 'sampler' and yields its contents.""" + while True: + yield from iter(self.sampler) + + +def seed_worker(worker_id): # noqa + """Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader.""" + worker_seed = torch.initial_seed() % 2 ** 32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + +def build_yolo_dataset(cfg, img_path, batch, data, mode='train', rect=False, stride=32): + """Build YOLO Dataset""" + return YOLODataset( + img_path=img_path, + imgsz=cfg.imgsz, + batch_size=batch, + augment=mode == 'train', # augmentation + hyp=cfg, # TODO: probably add a get_hyps_from_cfg function + rect=cfg.rect or rect, # rectangular batches + cache=cfg.cache or None, + single_cls=cfg.single_cls or False, + stride=int(stride), + pad=0.0 if mode == 'train' else 0.5, + prefix=colorstr(f'{mode}: '), + use_segments=cfg.task == 'segment', + use_keypoints=cfg.task == 'pose', + classes=cfg.classes, + data=data, + fraction=cfg.fraction if mode == 'train' else 1.0) + + +def build_dataloader(dataset, batch, workers, shuffle=True, rank=-1): + """Return an InfiniteDataLoader or DataLoader for training or validation set.""" + batch = min(batch, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch if batch > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return InfiniteDataLoader(dataset=dataset, + batch_size=batch, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + collate_fn=getattr(dataset, 'collate_fn', None), + worker_init_fn=seed_worker, + generator=generator) + + +def check_source(source): + """Check source type and return corresponding flag values.""" + webcam, screenshot, from_img, in_memory, tensor = False, False, False, False, False + if isinstance(source, (str, int, Path)): # int for local usb camera + source = str(source) + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('https://', 'http://', 'rtsp://', 'rtmp://')) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) + screenshot = source.lower() == 'screen' + if is_url and is_file: + source = check_file(source) # download + elif isinstance(source, tuple(LOADERS)): + in_memory = True + elif isinstance(source, (list, tuple)): + source = autocast_list(source) # convert all list elements to PIL or np arrays + from_img = True + elif isinstance(source, (Image.Image, np.ndarray)): + from_img = True + elif isinstance(source, torch.Tensor): + tensor = True + else: + raise TypeError('Unsupported image type. For supported types see https://docs.ultralytics.com/modes/predict') + + return source, webcam, screenshot, from_img, in_memory, tensor + + +def load_inference_source(source=None, imgsz=640, vid_stride=1): + """ + Loads an inference source for object detection and applies necessary transformations. + + Args: + source (str, Path, Tensor, PIL.Image, np.ndarray): The input source for inference. + imgsz (int, optional): The size of the image for inference. Default is 640. + vid_stride (int, optional): The frame interval for video sources. Default is 1. + + Returns: + dataset (Dataset): A dataset object for the specified input source. + """ + source, webcam, screenshot, from_img, in_memory, tensor = check_source(source) + source_type = source.source_type if in_memory else SourceTypes(webcam, screenshot, from_img, tensor) + + # Dataloader + if tensor: + dataset = LoadTensor(source) + elif in_memory: + dataset = source + elif webcam: + dataset = LoadStreams(source, imgsz=imgsz, vid_stride=vid_stride) + elif screenshot: + dataset = LoadScreenshots(source, imgsz=imgsz) + elif from_img: + dataset = LoadPilAndNumpy(source, imgsz=imgsz) + else: + dataset = LoadImages(source, imgsz=imgsz, vid_stride=vid_stride) + + # Attach source types to the dataset + setattr(dataset, 'source_type', source_type) + + return dataset diff --git a/ultralytics/yolo/data/converter.py b/ultralytics/yolo/data/converter.py new file mode 100644 index 0000000..c1278dd --- /dev/null +++ b/ultralytics/yolo/data/converter.py @@ -0,0 +1,230 @@ +import json +from collections import defaultdict +from pathlib import Path + +import cv2 +import numpy as np +from tqdm import tqdm + +from ultralytics.yolo.utils.checks import check_requirements +from ultralytics.yolo.utils.files import make_dirs + + +def coco91_to_coco80_class(): + """Converts 91-index COCO class IDs to 80-index COCO class IDs. + + Returns: + (list): A list of 91 class IDs where the index represents the 80-index class ID and the value is the + corresponding 91-index class ID. + + """ + return [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, None, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, None, 24, 25, None, + None, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, None, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, None, 60, None, None, 61, None, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + None, 73, 74, 75, 76, 77, 78, 79, None] + + +def convert_coco(labels_dir='../coco/annotations/', use_segments=False, use_keypoints=False, cls91to80=True): + """Converts COCO dataset annotations to a format suitable for training YOLOv5 models. + + Args: + labels_dir (str, optional): Path to directory containing COCO dataset annotation files. + use_segments (bool, optional): Whether to include segmentation masks in the output. + use_keypoints (bool, optional): Whether to include keypoint annotations in the output. + cls91to80 (bool, optional): Whether to map 91 COCO class IDs to the corresponding 80 COCO class IDs. + + Raises: + FileNotFoundError: If the labels_dir path does not exist. + + Example Usage: + convert_coco(labels_dir='../coco/annotations/', use_segments=True, use_keypoints=True, cls91to80=True) + + Output: + Generates output files in the specified output directory. + """ + + save_dir = make_dirs('yolo_labels') # output directory + coco80 = coco91_to_coco80_class() + + # Import json + for json_file in sorted(Path(labels_dir).resolve().glob('*.json')): + fn = Path(save_dir) / 'labels' / json_file.stem.replace('instances_', '') # folder name + fn.mkdir(parents=True, exist_ok=True) + with open(json_file) as f: + data = json.load(f) + + # Create image dict + images = {f'{x["id"]:d}': x for x in data['images']} + # Create image-annotations dict + imgToAnns = defaultdict(list) + for ann in data['annotations']: + imgToAnns[ann['image_id']].append(ann) + + # Write labels file + for img_id, anns in tqdm(imgToAnns.items(), desc=f'Annotations {json_file}'): + img = images[f'{img_id:d}'] + h, w, f = img['height'], img['width'], img['file_name'] + + bboxes = [] + segments = [] + keypoints = [] + for ann in anns: + if ann['iscrowd']: + continue + # The COCO box format is [top left x, top left y, width, height] + box = np.array(ann['bbox'], dtype=np.float64) + box[:2] += box[2:] / 2 # xy top-left corner to center + box[[0, 2]] /= w # normalize x + box[[1, 3]] /= h # normalize y + if box[2] <= 0 or box[3] <= 0: # if w <= 0 and h <= 0 + continue + + cls = coco80[ann['category_id'] - 1] if cls91to80 else ann['category_id'] - 1 # class + box = [cls] + box.tolist() + if box not in bboxes: + bboxes.append(box) + if use_segments and ann.get('segmentation') is not None: + if len(ann['segmentation']) == 0: + segments.append([]) + continue + if isinstance(ann['segmentation'], dict): + ann['segmentation'] = rle2polygon(ann['segmentation']) + if len(ann['segmentation']) > 1: + s = merge_multi_segment(ann['segmentation']) + s = (np.concatenate(s, axis=0) / np.array([w, h])).reshape(-1).tolist() + else: + s = [j for i in ann['segmentation'] for j in i] # all segments concatenated + s = (np.array(s).reshape(-1, 2) / np.array([w, h])).reshape(-1).tolist() + s = [cls] + s + if s not in segments: + segments.append(s) + if use_keypoints and ann.get('keypoints') is not None: + k = (np.array(ann['keypoints']).reshape(-1, 3) / np.array([w, h, 1])).reshape(-1).tolist() + k = box + k + keypoints.append(k) + + # Write + with open((fn / f).with_suffix('.txt'), 'a') as file: + for i in range(len(bboxes)): + if use_keypoints: + line = *(keypoints[i]), # cls, box, keypoints + else: + line = *(segments[i] + if use_segments and len(segments[i]) > 0 else bboxes[i]), # cls, box or segments + file.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def rle2polygon(segmentation): + """ + Convert Run-Length Encoding (RLE) mask to polygon coordinates. + + Args: + segmentation (dict, list): RLE mask representation of the object segmentation. + + Returns: + (list): A list of lists representing the polygon coordinates for each contour. + + Note: + Requires the 'pycocotools' package to be installed. + """ + check_requirements('pycocotools') + from pycocotools import mask + + m = mask.decode(segmentation) + m[m > 0] = 255 + contours, _ = cv2.findContours(m, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS) + polygons = [] + for contour in contours: + epsilon = 0.001 * cv2.arcLength(contour, True) + contour_approx = cv2.approxPolyDP(contour, epsilon, True) + polygon = contour_approx.flatten().tolist() + polygons.append(polygon) + return polygons + + +def min_index(arr1, arr2): + """ + Find a pair of indexes with the shortest distance between two arrays of 2D points. + + Args: + arr1 (np.array): A NumPy array of shape (N, 2) representing N 2D points. + arr2 (np.array): A NumPy array of shape (M, 2) representing M 2D points. + + Returns: + (tuple): A tuple containing the indexes of the points with the shortest distance in arr1 and arr2 respectively. + """ + dis = ((arr1[:, None, :] - arr2[None, :, :]) ** 2).sum(-1) + return np.unravel_index(np.argmin(dis, axis=None), dis.shape) + + +def merge_multi_segment(segments): + """ + Merge multiple segments into one list by connecting the coordinates with the minimum distance between each segment. + This function connects these coordinates with a thin line to merge all segments into one. + + Args: + segments (List[List]): Original segmentations in COCO's JSON file. + Each element is a list of coordinates, like [segmentation1, segmentation2,...]. + + Returns: + s (List[np.ndarray]): A list of connected segments represented as NumPy arrays. + """ + s = [] + segments = [np.array(i).reshape(-1, 2) for i in segments] + idx_list = [[] for _ in range(len(segments))] + + # record the indexes with min distance between each segment + for i in range(1, len(segments)): + idx1, idx2 = min_index(segments[i - 1], segments[i]) + idx_list[i - 1].append(idx1) + idx_list[i].append(idx2) + + # use two round to connect all the segments + for k in range(2): + # forward connection + if k == 0: + for i, idx in enumerate(idx_list): + # middle segments have two indexes + # reverse the index of middle segments + if len(idx) == 2 and idx[0] > idx[1]: + idx = idx[::-1] + segments[i] = segments[i][::-1, :] + + segments[i] = np.roll(segments[i], -idx[0], axis=0) + segments[i] = np.concatenate([segments[i], segments[i][:1]]) + # deal with the first segment and the last one + if i in [0, len(idx_list) - 1]: + s.append(segments[i]) + else: + idx = [0, idx[1] - idx[0]] + s.append(segments[i][idx[0]:idx[1] + 1]) + + else: + for i in range(len(idx_list) - 1, -1, -1): + if i not in [0, len(idx_list) - 1]: + idx = idx_list[i] + nidx = abs(idx[1] - idx[0]) + s.append(segments[i][nidx:]) + return s + + +def delete_dsstore(path='../datasets'): + """Delete Apple .DS_Store files in the specified directory and its subdirectories.""" + from pathlib import Path + + files = list(Path(path).rglob('.DS_store')) + print(files) + for f in files: + f.unlink() + + +if __name__ == '__main__': + source = 'COCO' + + if source == 'COCO': + convert_coco( + '../datasets/coco/annotations', # directory with *.json + use_segments=False, + use_keypoints=True, + cls91to80=False) diff --git a/ultralytics/yolo/data/dataloaders/__init__.py b/ultralytics/yolo/data/dataloaders/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ultralytics/yolo/data/dataloaders/stream_loaders.py b/ultralytics/yolo/data/dataloaders/stream_loaders.py new file mode 100644 index 0000000..d124a43 --- /dev/null +++ b/ultralytics/yolo/data/dataloaders/stream_loaders.py @@ -0,0 +1,404 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import glob +import math +import os +import time +from dataclasses import dataclass +from pathlib import Path +from threading import Thread +from urllib.parse import urlparse + +import cv2 +import numpy as np +import requests +import torch +from PIL import Image + +from ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS +from ultralytics.yolo.utils import LOGGER, ROOT, is_colab, is_kaggle, ops +from ultralytics.yolo.utils.checks import check_requirements + + +@dataclass +class SourceTypes: + webcam: bool = False + screenshot: bool = False + from_img: bool = False + tensor: bool = False + + +class LoadStreams: + """YOLOv8 streamloader, i.e. `yolo predict source='rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`.""" + + def __init__(self, sources='file.streams', imgsz=640, vid_stride=1): + """Initialize instance variables and check for consistent input stream shapes.""" + torch.backends.cudnn.benchmark = True # faster for fixed-size inference + self.mode = 'stream' + self.imgsz = imgsz + self.vid_stride = vid_stride # video frame-rate stride + sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] + n = len(sources) + self.sources = [ops.clean_str(x) for x in sources] # clean source names for later + self.imgs, self.fps, self.frames, self.threads, self.shape = [[]] * n, [0] * n, [0] * n, [None] * n, [None] * n + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream + st = f'{i + 1}/{n}: {s}... ' + if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' + s = get_best_youtube_url(s) + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + if s == 0 and (is_colab() or is_kaggle()): + raise NotImplementedError("'source=0' webcam not supported in Colab and Kaggle notebooks. " + "Try running 'source=0' in a local environment.") + cap = cv2.VideoCapture(s) + if not cap.isOpened(): + raise ConnectionError(f'{st}Failed to open {s}') + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback + + success, im = cap.read() # guarantee first frame + if not success or im is None: + raise ConnectionError(f'{st}Failed to read images from {s}') + self.imgs[i].append(im) + self.shape[i] = im.shape + self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) + LOGGER.info(f'{st}Success ✅ ({self.frames[i]} frames of shape {w}x{h} at {self.fps[i]:.2f} FPS)') + self.threads[i].start() + LOGGER.info('') # newline + + # Check for common shapes + self.bs = self.__len__() + + def update(self, i, cap, stream): + """Read stream `i` frames in daemon thread.""" + n, f = 0, self.frames[i] # frame number, frame array + while cap.isOpened() and n < f: + # Only read a new frame if the buffer is empty + if not self.imgs[i]: + n += 1 + cap.grab() # .read() = .grab() followed by .retrieve() + if n % self.vid_stride == 0: + success, im = cap.retrieve() + if success: + self.imgs[i].append(im) # add image to buffer + else: + LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') + self.imgs[i].append(np.zeros(self.shape[i])) + cap.open(stream) # re-open stream if signal was lost + else: + time.sleep(0.01) # wait until the buffer is empty + + def __iter__(self): + """Iterates through YOLO image feed and re-opens unresponsive streams.""" + self.count = -1 + return self + + def __next__(self): + """Returns source paths, transformed and original images for processing.""" + self.count += 1 + + # Wait until a frame is available in each buffer + while not all(self.imgs): + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + time.sleep(1 / min(self.fps)) + + # Get and remove the next frame from imgs buffer + return self.sources, [x.pop(0) for x in self.imgs], None, '' + + def __len__(self): + """Return the length of the sources object.""" + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years + + +class LoadScreenshots: + """YOLOv8 screenshot dataloader, i.e. `yolo predict source=screen`.""" + + def __init__(self, source, imgsz=640): + """source = [screen_number left top width height] (pixels).""" + check_requirements('mss') + import mss # noqa + + source, *params = source.split() + self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 + if len(params) == 1: + self.screen = int(params[0]) + elif len(params) == 4: + left, top, width, height = (int(x) for x in params) + elif len(params) == 5: + self.screen, left, top, width, height = (int(x) for x in params) + self.imgsz = imgsz + self.mode = 'stream' + self.frame = 0 + self.sct = mss.mss() + self.bs = 1 + + # Parse monitor shape + monitor = self.sct.monitors[self.screen] + self.top = monitor['top'] if top is None else (monitor['top'] + top) + self.left = monitor['left'] if left is None else (monitor['left'] + left) + self.width = width or monitor['width'] + self.height = height or monitor['height'] + self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} + + def __iter__(self): + """Returns an iterator of the object.""" + return self + + def __next__(self): + """mss screen capture: get raw pixels from the screen as np array.""" + im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR + s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' + + self.frame += 1 + return str(self.screen), im0, None, s # screen, img, original img, im0s, s + + +class LoadImages: + """YOLOv8 image/video dataloader, i.e. `yolo predict source=image.jpg/vid.mp4`.""" + + def __init__(self, path, imgsz=640, vid_stride=1): + """Initialize the Dataloader and raise FileNotFoundError if file not found.""" + if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line + path = Path(path).read_text().rsplit() + files = [] + for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: + p = str(Path(p).absolute()) # do not use .resolve() https://github.com/ultralytics/ultralytics/issues/2912 + if '*' in p: + files.extend(sorted(glob.glob(p, recursive=True))) # glob + elif os.path.isdir(p): + files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir + elif os.path.isfile(p): + files.append(p) # files + else: + raise FileNotFoundError(f'{p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + ni, nv = len(images), len(videos) + + self.imgsz = imgsz + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + self.vid_stride = vid_stride # video frame-rate stride + self.bs = 1 + if any(videos): + self.orientation = None # rotation degrees + self._new_video(videos[0]) # new video + else: + self.cap = None + if self.nf == 0: + raise FileNotFoundError(f'No images or videos found in {p}. ' + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}') + + def __iter__(self): + """Returns an iterator object for VideoStream or ImageFolder.""" + self.count = 0 + return self + + def __next__(self): + """Return next image, path and metadata from dataset.""" + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + for _ in range(self.vid_stride): + self.cap.grab() + success, im0 = self.cap.retrieve() + while not success: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + path = self.files[self.count] + self._new_video(path) + success, im0 = self.cap.read() + + self.frame += 1 + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + + else: + # Read image + self.count += 1 + im0 = cv2.imread(path) # BGR + if im0 is None: + raise FileNotFoundError(f'Image Not Found {path}') + s = f'image {self.count}/{self.nf} {path}: ' + + return [path], [im0], self.cap, s + + def _new_video(self, path): + """Create a new video capture object.""" + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) + if hasattr(cv2, 'CAP_PROP_ORIENTATION_META'): # cv2<4.6.0 compatibility + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # Disable auto-orientation due to known issues in https://github.com/ultralytics/yolov5/issues/8493 + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) + + def _cv2_rotate(self, im): + """Rotate a cv2 video manually.""" + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im + + def __len__(self): + """Returns the number of files in the object.""" + return self.nf # number of files + + +class LoadPilAndNumpy: + + def __init__(self, im0, imgsz=640): + """Initialize PIL and Numpy Dataloader.""" + if not isinstance(im0, list): + im0 = [im0] + self.paths = [getattr(im, 'filename', f'image{i}.jpg') for i, im in enumerate(im0)] + self.im0 = [self._single_check(im) for im in im0] + self.imgsz = imgsz + self.mode = 'image' + # Generate fake paths + self.bs = len(self.im0) + + @staticmethod + def _single_check(im): + """Validate and format an image to numpy array.""" + assert isinstance(im, (Image.Image, np.ndarray)), f'Expected PIL/np.ndarray image type, but got {type(im)}' + if isinstance(im, Image.Image): + if im.mode != 'RGB': + im = im.convert('RGB') + im = np.asarray(im)[:, :, ::-1] + im = np.ascontiguousarray(im) # contiguous + return im + + def __len__(self): + """Returns the length of the 'im0' attribute.""" + return len(self.im0) + + def __next__(self): + """Returns batch paths, images, processed images, None, ''.""" + if self.count == 1: # loop only once as it's batch inference + raise StopIteration + self.count += 1 + return self.paths, self.im0, None, '' + + def __iter__(self): + """Enables iteration for class LoadPilAndNumpy.""" + self.count = 0 + return self + + +class LoadTensor: + + def __init__(self, im0) -> None: + self.im0 = self._single_check(im0) + self.bs = self.im0.shape[0] + self.mode = 'image' + self.paths = [getattr(im, 'filename', f'image{i}.jpg') for i, im in enumerate(im0)] + + @staticmethod + def _single_check(im, stride=32): + """Validate and format an image to torch.Tensor.""" + s = f'WARNING ⚠️ torch.Tensor inputs should be BCHW i.e. shape(1, 3, 640, 640) ' \ + f'divisible by stride {stride}. Input shape{tuple(im.shape)} is incompatible.' + if len(im.shape) != 4: + if len(im.shape) == 3: + LOGGER.warning(s) + im = im.unsqueeze(0) + else: + raise ValueError(s) + if im.shape[2] % stride or im.shape[3] % stride: + raise ValueError(s) + if im.max() > 1.0: + LOGGER.warning(f'WARNING ⚠️ torch.Tensor inputs should be normalized 0.0-1.0 but max value is {im.max()}. ' + f'Dividing input by 255.') + im = im.float() / 255.0 + + return im + + def __iter__(self): + """Returns an iterator object.""" + self.count = 0 + return self + + def __next__(self): + """Return next item in the iterator.""" + if self.count == 1: + raise StopIteration + self.count += 1 + return self.paths, self.im0, None, '' + + def __len__(self): + """Returns the batch size.""" + return self.bs + + +def autocast_list(source): + """ + Merges a list of source of different types into a list of numpy arrays or PIL images + """ + files = [] + for im in source: + if isinstance(im, (str, Path)): # filename or uri + files.append(Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im)) + elif isinstance(im, (Image.Image, np.ndarray)): # PIL or np Image + files.append(im) + else: + raise TypeError(f'type {type(im).__name__} is not a supported Ultralytics prediction source type. \n' + f'See https://docs.ultralytics.com/modes/predict for supported source types.') + + return files + + +LOADERS = [LoadStreams, LoadPilAndNumpy, LoadImages, LoadScreenshots] + + +def get_best_youtube_url(url, use_pafy=True): + """ + Retrieves the URL of the best quality MP4 video stream from a given YouTube video. + + This function uses the pafy or yt_dlp library to extract the video info from YouTube. It then finds the highest + quality MP4 format that has video codec but no audio codec, and returns the URL of this video stream. + + Args: + url (str): The URL of the YouTube video. + use_pafy (bool): Use the pafy package, default=True, otherwise use yt_dlp package. + + Returns: + (str): The URL of the best quality MP4 video stream, or None if no suitable stream is found. + """ + if use_pafy: + check_requirements(('pafy', 'youtube_dl==2020.12.2')) + import pafy # noqa + return pafy.new(url).getbest(preftype='mp4').url + else: + check_requirements('yt-dlp') + import yt_dlp + with yt_dlp.YoutubeDL({'quiet': True}) as ydl: + info_dict = ydl.extract_info(url, download=False) # extract info + for f in info_dict.get('formats', None): + if f['vcodec'] != 'none' and f['acodec'] == 'none' and f['ext'] == 'mp4': + return f.get('url', None) + + +if __name__ == '__main__': + img = cv2.imread(str(ROOT / 'assets/bus.jpg')) + dataset = LoadPilAndNumpy(im0=img) + for d in dataset: + print(d[0]) diff --git a/ultralytics/yolo/data/dataloaders/v5augmentations.py b/ultralytics/yolo/data/dataloaders/v5augmentations.py new file mode 100644 index 0000000..8e0b3e2 --- /dev/null +++ b/ultralytics/yolo/data/dataloaders/v5augmentations.py @@ -0,0 +1,407 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Image augmentation functions +""" + +import math +import random + +import cv2 +import numpy as np +import torch +import torchvision.transforms as T +import torchvision.transforms.functional as TF + +from ultralytics.yolo.utils import LOGGER, colorstr +from ultralytics.yolo.utils.checks import check_version +from ultralytics.yolo.utils.metrics import bbox_ioa +from ultralytics.yolo.utils.ops import resample_segments, segment2box, xywhn2xyxy + +IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean +IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation + + +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self, size=640): + """Instantiate object with image augmentations for YOLOv5.""" + self.transform = None + prefix = colorstr('albumentations: ') + try: + import albumentations as A + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + T = [ + A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0), + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + def __call__(self, im, labels, p=1.0): + """Transforms input image and labels with probability 'p'.""" + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + +def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False): + """Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std.""" + return TF.normalize(x, mean, std, inplace=inplace) + + +def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD): + """Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean.""" + for i in range(3): + x[:, i] = x[:, i] * std[i] + mean[i] + return x + + +def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): + """HSV color-space augmentation.""" + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) + dtype = im.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + + +def hist_equalize(im, clahe=True, bgr=False): + """Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255.""" + yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def replicate(im, labels): + """Replicate labels.""" + h, w = im.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return im, labels + + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + """Resize and pad image while meeting stride-multiple constraints.""" + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # Clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # Create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # Clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # Filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return im, targets + + +def copy_paste(im, labels, segments, p=0.5): + """Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy).""" + n = len(segments) + if p and n: + h, w, c = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + + # Calculate ioa first then select indexes randomly + boxes = np.stack([w - labels[:, 3], labels[:, 2], w - labels[:, 1], labels[:, 4]], axis=-1) # (n, 4) + ioa = bbox_ioa(boxes, labels[:, 1:5]) # intersection over area + indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, ) + n = len(indexes) + for j in random.sample(list(indexes), k=round(p * n)): + l, box, s = labels[j], boxes[j], segments[j] + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED) + + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + return im, labels, segments + + +def cutout(im, labels, p=0.5): + """Applies image cutout augmentation https://arxiv.org/abs/1708.04552.""" + if random.random() < p: + h, w = im.shape[:2] + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) # create random masks + mask_w = random.randint(1, int(w * s)) + + # Box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # Apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # Return unobscured labels + if len(labels) and s > 0.03: + box = np.array([[xmin, ymin, xmax, ymax]], dtype=np.float32) + ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h))[0] # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def mixup(im, labels, im2, labels2): + """Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf.""" + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + return im, labels + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False): + # YOLOv5 classification Albumentations (optional, only used if package is installed) + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentation + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if jitter > 0: + jitter = float(jitter) + T += [A.ColorJitter(jitter, jitter, jitter, 0)] # brightness, contrast, satuaration, 0 hue + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)') + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +def classify_transforms(size=224): + """Transforms to apply if albumentations not installed.""" + assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' + # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + + +class LetterBox: + # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, size=(640, 640), auto=False, stride=32): + """Resizes and crops an image to a specified size for YOLOv5 preprocessing.""" + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + self.auto = auto # pass max size integer, automatically solve for short side using stride + self.stride = stride # used with auto + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + r = min(self.h / imh, self.w / imw) # ratio of new/old + h, w = round(imh * r), round(imw * r) # resized image + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) + im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) + im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + return im_out + + +class CenterCrop: + # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + def __init__(self, size=640): + """Converts input image into tensor for YOLOv5 processing.""" + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + m = min(imh, imw) # min dimension + top, left = (imh - m) // 2, (imw - m) // 2 + return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + + +class ToTensor: + # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, half=False): + """Initialize ToTensor class for YOLOv5 image preprocessing.""" + super().__init__() + self.half = half + + def __call__(self, im): # im = np.array HWC in BGR order + im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous + im = torch.from_numpy(im) # to torch + im = im.half() if self.half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0-255 to 0.0-1.0 + return im diff --git a/ultralytics/yolo/data/dataloaders/v5loader.py b/ultralytics/yolo/data/dataloaders/v5loader.py new file mode 100644 index 0000000..96549dd --- /dev/null +++ b/ultralytics/yolo/data/dataloaders/v5loader.py @@ -0,0 +1,1109 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Dataloaders and dataset utils +""" + +import contextlib +import glob +import hashlib +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from threading import Thread +from urllib.parse import urlparse + +import cv2 +import numpy as np +import psutil +import torch +import torchvision +from PIL import ExifTags, Image, ImageOps +from torch.utils.data import DataLoader, Dataset, dataloader, distributed +from tqdm import tqdm + +from ultralytics.yolo.utils import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, is_colab, is_dir_writeable, + is_kaggle) +from ultralytics.yolo.utils.checks import check_requirements +from ultralytics.yolo.utils.ops import clean_str, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn +from ultralytics.yolo.utils.torch_utils import torch_distributed_zero_first + +from .v5augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, + letterbox, mixup, random_perspective) + +# Parameters +HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data' +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes +VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(paths): + """Returns a single hash value of a list of paths (files or dirs).""" + size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes + h = hashlib.sha256(str(size).encode()) # hash sizes + h.update(''.join(paths).encode()) # hash paths + return h.hexdigest() # return hash + + +def exif_size(img): + """Returns exif-corrected PIL size.""" + s = img.size # (width, height) + with contextlib.suppress(Exception): + rotation = dict(img._getexif().items())[orientation] + if rotation in [6, 8]: # rotation 270 or 90 + s = (s[1], s[0]) + return s + + +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90}.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info['exif'] = exif.tobytes() + return image + + +def seed_worker(worker_id): + """Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader.""" + worker_seed = torch.initial_seed() % 2 ** 32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + close_mosaic=False, + min_items=0, + prefix='', + shuffle=False, + seed=0): + if rect and shuffle: + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabels( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + min_items=min_items, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights or close_mosaic else InfiniteDataLoader # DataLoader allows attribute updates + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + seed + RANK) + return loader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + collate_fn=LoadImagesAndLabels.collate_fn, + worker_init_fn=seed_worker, + generator=generator), dataset + + +class InfiniteDataLoader(dataloader.DataLoader): + """Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + """Dataloader that reuses workers for same syntax as vanilla DataLoader.""" + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + """Returns the length of batch_sampler's sampler.""" + return len(self.batch_sampler.sampler) + + def __iter__(self): + """Creates a sampler that infinitely repeats.""" + for _ in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler: + """Sampler that repeats forever + + Args: + sampler (Dataset.sampler): The sampler to repeat. + """ + + def __init__(self, sampler): + """Sampler that repeats dataset samples infinitely.""" + self.sampler = sampler + + def __iter__(self): + """Infinite loop iterating over a given sampler.""" + while True: + yield from iter(self.sampler) + + +class LoadScreenshots: + # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` + def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): + """source = [screen_number left top width height] (pixels).""" + check_requirements('mss') + import mss + + source, *params = source.split() + self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 + if len(params) == 1: + self.screen = int(params[0]) + elif len(params) == 4: + left, top, width, height = (int(x) for x in params) + elif len(params) == 5: + self.screen, left, top, width, height = (int(x) for x in params) + self.img_size = img_size + self.stride = stride + self.transforms = transforms + self.auto = auto + self.mode = 'stream' + self.frame = 0 + self.sct = mss.mss() + + # Parse monitor shape + monitor = self.sct.monitors[self.screen] + self.top = monitor['top'] if top is None else (monitor['top'] + top) + self.left = monitor['left'] if left is None else (monitor['left'] + left) + self.width = width or monitor['width'] + self.height = height or monitor['height'] + self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} + + def __iter__(self): + """Iterates over objects with the same structure as the monitor attribute.""" + return self + + def __next__(self): + """mss screen capture: get raw pixels from the screen as np array.""" + im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR + s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + self.frame += 1 + return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s + + +class LoadImages: + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + """Initialize instance variables and check for valid input.""" + if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line + path = Path(path).read_text().rsplit() + files = [] + for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: + p = str(Path(p).resolve()) + if '*' in p: + files.extend(sorted(glob.glob(p, recursive=True))) # glob + elif os.path.isdir(p): + files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir + elif os.path.isfile(p): + files.append(p) # files + else: + raise FileNotFoundError(f'{p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + self.auto = auto + self.transforms = transforms # optional + self.vid_stride = vid_stride # video frame-rate stride + if any(videos): + self._new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' + + def __iter__(self): + """Returns an iterator object for iterating over images or videos found in a directory.""" + self.count = 0 + return self + + def __next__(self): + """Iterator's next item, performs transformation on image and returns path, transformed image, original image, capture and size.""" + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + for _ in range(self.vid_stride): + self.cap.grab() + ret_val, im0 = self.cap.retrieve() + while not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + path = self.files[self.count] + self._new_video(path) + ret_val, im0 = self.cap.read() + + self.frame += 1 + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + + else: + # Read image + self.count += 1 + im0 = cv2.imread(path) # BGR + assert im0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + + return path, im, im0, self.cap, s + + def _new_video(self, path): + """Create a new video capture object.""" + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 + + def _cv2_rotate(self, im): + """Rotate a cv2 video manually.""" + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im + + def __len__(self): + """Returns the number of files in the class instance.""" + return self.nf # number of files + + +class LoadStreams: + # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + """Initialize YOLO detector with optional transforms and check input shapes.""" + torch.backends.cudnn.benchmark = True # faster for fixed-size inference + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + self.vid_stride = vid_stride # video frame-rate stride + sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] + n = len(sources) + self.sources = [clean_str(x) for x in sources] # clean source names for later + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream + st = f'{i + 1}/{n}: {s}... ' + if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' + check_requirements(('pafy', 'youtube_dl==2020.12.2')) + import pafy + s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + if s == 0: + assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' + assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' + cap = cv2.VideoCapture(s) + assert cap.isOpened(), f'{st}Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback + + _, self.imgs[i] = cap.read() # guarantee first frame + self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) + LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)') + self.threads[i].start() + LOGGER.info('') # newline + + # Check for common shapes + s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs]) + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + self.auto = auto and self.rect + self.transforms = transforms # optional + if not self.rect: + LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') + + def update(self, i, cap, stream): + """Read stream `i` frames in daemon thread.""" + n, f = 0, self.frames[i] # frame number, frame array + while cap.isOpened() and n < f: + n += 1 + cap.grab() # .read() = .grab() followed by .retrieve() + if n % self.vid_stride == 0: + success, im = cap.retrieve() + if success: + self.imgs[i] = im + else: + LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') + self.imgs[i] = np.zeros_like(self.imgs[i]) + cap.open(stream) # re-open stream if signal was lost + time.sleep(0.0) # wait time + + def __iter__(self): + """Iterator that returns the class instance.""" + self.count = -1 + return self + + def __next__(self): + """Return a tuple containing transformed and resized image data.""" + self.count += 1 + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + im0 = self.imgs.copy() + if self.transforms: + im = np.stack([self.transforms(x) for x in im0]) # transforms + else: + im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize + im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW + im = np.ascontiguousarray(im) # contiguous + + return self.sources, im, im0, None, '' + + def __len__(self): + """Returns the number of sources as the length of the object.""" + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + """Define label paths as a function of image paths.""" + sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + + +class LoadImagesAndLabels(Dataset): + """YOLOv5 train_loader/val_loader, loads images and labels for training and validation.""" + cache_version = 0.6 # dataset labels *.cache version + rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] + + def __init__(self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0.0, + min_items=0, + prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + self.albumentations = Albumentations(size=img_size) if augment else None + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('*.*')) # pathlib + elif p.is_file(): # file + with open(p) as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib) + else: + raise FileNotFoundError(f'{prefix}{p} does not exist') + self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib + assert self.im_files, f'{prefix}No images found' + except Exception as e: + raise FileNotFoundError(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e + + # Check cache + self.label_files = img2label_paths(self.im_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict + assert cache['version'] == self.cache_version # matches current version + assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash + except (FileNotFoundError, AssertionError, AttributeError): + cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + if exists and LOCAL_RANK in (-1, 0): + d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' + + # Read cache + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + labels, shapes, self.segments = zip(*cache.values()) + nl = len(np.concatenate(labels, 0)) # number of labels + assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}' + self.labels = list(labels) + self.shapes = np.array(shapes) + self.im_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + + # Filter images + if min_items: + include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int) + LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset') + self.im_files = [self.im_files[i] for i in include] + self.label_files = [self.label_files[i] for i in include] + self.labels = [self.labels[i] for i in include] + self.segments = [self.segments[i] for i in include] + self.shapes = self.shapes[include] # wh + + # Create indices + n = len(self.shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Update labels + include_class = [] # filter labels to include only these classes (optional) + include_class_array = np.array(include_class).reshape(1, -1) + for i, (label, segment) in enumerate(zip(self.labels, self.segments)): + if include_class: + j = (label[:, 0:1] == include_class_array).any(1) + self.labels[i] = label[j] + if segment: + self.segments[i] = [segment[si] for si, idx in enumerate(j) if idx] + if single_cls: # single-class training, merge all classes into 0 + self.labels[i][:, 0] = 0 + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.im_files = [self.im_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.segments = [self.segments[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride + + # Cache images into RAM/disk for faster training + if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix): + cache_images = False + self.ims = [None] * n + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + if cache_images: + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + self.im_hw0, self.im_hw = [None] * n, [None] * n + fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image + with ThreadPool(NUM_THREADS) as pool: + results = pool.imap(fcn, range(n)) + pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) + for i, x in pbar: + if cache_images == 'disk': + b += self.npy_files[i].stat().st_size + else: # 'ram' + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + b += self.ims[i].nbytes + pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})' + pbar.close() + + def check_cache_ram(self, safety_margin=0.1, prefix=''): + """Check image caching requirements vs available memory.""" + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + n = min(self.n, 30) # extrapolate from 30 random images + for _ in range(n): + im = cv2.imread(random.choice(self.im_files)) # sample image + ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio + b += im.nbytes * ratio ** 2 + mem_required = b * self.n / n # GB required to cache dataset into RAM + mem = psutil.virtual_memory() + cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question + if not cache: + LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, ' + f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' + f"{'caching images ✅' if cache else 'not caching images ⚠️'}") + return cache + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + """Cache labels and save as numpy file for next time.""" + # Cache dataset labels, check images and read shapes + if path.exists(): + path.unlink() # remove *.cache file if exists + x = {} # dict + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f'{prefix}Scanning {path.parent / path.stem}...' + total = len(self.im_files) + with ThreadPool(NUM_THREADS) as pool: + results = pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))) + pbar = tqdm(results, desc=desc, total=total, bar_format=TQDM_BAR_FORMAT) + for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x[im_file] = [lb, shape, segments] + if msg: + msgs.append(msg) + pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' + pbar.close() + + if msgs: + LOGGER.info('\n'.join(msgs)) + if nf == 0: + LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version + if is_dir_writeable(path.parent): + np.save(str(path), x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{prefix}New cache created: {path}') + else: + LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable') # not writeable + return x + + def __len__(self): + """Returns the length of 'im_files' attribute.""" + return len(self.im_files) + + def __getitem__(self, index): + """Get a sample and its corresponding label, filename and shape from the dataset.""" + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp['mixup']: + img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels = random_perspective(img, + labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) + + if self.augment: + # Albumentations + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + + # Flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + + # Cutouts + # labels = cutout(img, labels, p=0.5) + # nl = len(labels) # update after cutout + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.im_files[index], shapes + + def load_image(self, i): + """Loads 1 image from dataset index 'i', returns (im, original hw, resized hw).""" + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + assert im is not None, f'Image Not Found {f}' + h0, w0 = im.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA + im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + + def cache_images_to_disk(self, i): + """Saves an image as an *.npy file for faster loading.""" + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) + + def load_mosaic(self, index): + """YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic.""" + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + random.shuffle(indices) + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # Place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) + img4, labels4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + def load_mosaic9(self, index): + """YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic.""" + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + random.shuffle(indices) + hp, wp = -1, -1 # height, width previous + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # Place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste']) + img9, labels9 = random_perspective(img9, + labels9, + segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + @staticmethod + def collate_fn(batch): + """YOLOv8 collate function, outputs dict.""" + im, label, path, shapes = zip(*batch) # transposed + for i, lb in enumerate(label): + lb[:, 0] = i # add target image index for build_targets() + batch_idx, cls, bboxes = torch.cat(label, 0).split((1, 1, 4), dim=1) + return { + 'ori_shape': tuple((x[0] if x else None) for x in shapes), + 'ratio_pad': tuple((x[1] if x else None) for x in shapes), + 'im_file': path, + 'img': torch.stack(im, 0), + 'cls': cls, + 'bboxes': bboxes, + 'batch_idx': batch_idx.view(-1)} + + @staticmethod + def collate_fn_old(batch): + """YOLOv5 original collate function.""" + im, label, path, shapes = zip(*batch) # transposed + for i, lb in enumerate(label): + lb[:, 0] = i # add target image index for build_targets() + return torch.stack(im, 0), torch.cat(label, 0), path, shapes + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def flatten_recursive(path=DATASETS_DIR / 'coco128'): + """Flatten a recursive directory by bringing all files to top level.""" + new_path = Path(f'{str(path)}_flat') + if os.path.exists(new_path): + shutil.rmtree(new_path) # delete output folder + os.makedirs(new_path) # make new output folder + for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() + # Convert detection dataset into classification dataset, with one directory per class + path = Path(path) # images dir + shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in IMG_FORMATS: + # Image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # Labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file) as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # B[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): + """Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.dataloaders import *; autosplit() + Arguments + path: Path to images directory + weights: Train, val, test weights (list, tuple) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only + n = len(files) # number of files + random.seed(0) # for reproducibility + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + for x in txt: + if (path.parent / x).exists(): + (path.parent / x).unlink() # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path.parent / txt[i], 'a') as f: + f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file + + +def verify_image_label(args): + """Verify one image-label pair.""" + im_file, lb_file, prefix = args + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments + try: + # Verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + if f.read() != b'\xff\xd9': # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) + msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' + + # Verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any(len(x) > 6 for x in lb): # is segment + classes = np.array([x[0] for x in lb], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) + lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + lb = np.array(lb, dtype=np.float32) + nl = len(lb) + if nl: + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' + assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + _, i = np.unique(lb, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + lb = lb[i] # remove duplicates + if segments: + segments = [segments[x] for x in i] + msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' + else: + ne = 1 # label empty + lb = np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + lb = np.zeros((0, 5), dtype=np.float32) + return im_file, lb, shape, segments, nm, nf, ne, nc, msg + except Exception as e: + nc = 1 + msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' + return [None, None, None, None, nm, nf, ne, nc, msg] + + +# Classification dataloaders ------------------------------------------------------------------------------------------- +class ClassificationDataset(torchvision.datasets.ImageFolder): + """ + YOLOv5 Classification Dataset. + Arguments + root: Dataset path + transform: torchvision transforms, used by default + album_transform: Albumentations transforms, used if installed + """ + + def __init__(self, root, augment, imgsz, cache=False): + """Initialize YOLO dataset with root, augmentation, image size, and cache parameters.""" + super().__init__(root=root) + self.torch_transforms = classify_transforms(imgsz) + self.album_transforms = classify_albumentations(augment, imgsz) if augment else None + self.cache_ram = cache is True or cache == 'ram' + self.cache_disk = cache == 'disk' + self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im + + def __getitem__(self, i): + """Retrieves data items of 'dataset' via indices & creates InfiniteDataLoader.""" + f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + if self.album_transforms: + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] + else: + sample = self.torch_transforms(im) + return sample, j + + +def create_classification_dataloader(path, + imgsz=224, + batch_size=16, + augment=True, + cache=False, + rank=-1, + workers=8, + shuffle=True): + """Returns Dataloader object to be used with YOLOv5 Classifier.""" + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return InfiniteDataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + worker_init_fn=seed_worker, + generator=generator) # or DataLoader(persistent_workers=True) diff --git a/ultralytics/yolo/data/dataset.py b/ultralytics/yolo/data/dataset.py new file mode 100644 index 0000000..17e6d47 --- /dev/null +++ b/ultralytics/yolo/data/dataset.py @@ -0,0 +1,274 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import cv2 +import numpy as np +import torch +import torchvision +from tqdm import tqdm + +from ..utils import LOCAL_RANK, NUM_THREADS, TQDM_BAR_FORMAT, is_dir_writeable +from .augment import Compose, Format, Instances, LetterBox, classify_albumentations, classify_transforms, v8_transforms +from .base import BaseDataset +from .utils import HELP_URL, LOGGER, get_hash, img2label_paths, verify_image_label + + +class YOLODataset(BaseDataset): + """ + Dataset class for loading object detection and/or segmentation labels in YOLO format. + + Args: + data (dict, optional): A dataset YAML dictionary. Defaults to None. + use_segments (bool, optional): If True, segmentation masks are used as labels. Defaults to False. + use_keypoints (bool, optional): If True, keypoints are used as labels. Defaults to False. + + Returns: + (torch.utils.data.Dataset): A PyTorch dataset object that can be used for training an object detection model. + """ + cache_version = '1.0.2' # dataset labels *.cache version, >= 1.0.0 for YOLOv8 + rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] + + def __init__(self, *args, data=None, use_segments=False, use_keypoints=False, **kwargs): + self.use_segments = use_segments + self.use_keypoints = use_keypoints + self.data = data + assert not (self.use_segments and self.use_keypoints), 'Can not use both segments and keypoints.' + super().__init__(*args, **kwargs) + + def cache_labels(self, path=Path('./labels.cache')): + """Cache dataset labels, check images and read shapes. + Args: + path (Path): path where to save the cache file (default: Path('./labels.cache')). + Returns: + (dict): labels. + """ + x = {'labels': []} + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f'{self.prefix}Scanning {path.parent / path.stem}...' + total = len(self.im_files) + nkpt, ndim = self.data.get('kpt_shape', (0, 0)) + if self.use_keypoints and (nkpt <= 0 or ndim not in (2, 3)): + raise ValueError("'kpt_shape' in data.yaml missing or incorrect. Should be a list with [number of " + "keypoints, number of dims (2 for x,y or 3 for x,y,visible)], i.e. 'kpt_shape: [17, 3]'") + with ThreadPool(NUM_THREADS) as pool: + results = pool.imap(func=verify_image_label, + iterable=zip(self.im_files, self.label_files, repeat(self.prefix), + repeat(self.use_keypoints), repeat(len(self.data['names'])), repeat(nkpt), + repeat(ndim))) + pbar = tqdm(results, desc=desc, total=total, bar_format=TQDM_BAR_FORMAT) + for im_file, lb, shape, segments, keypoint, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x['labels'].append( + dict( + im_file=im_file, + shape=shape, + cls=lb[:, 0:1], # n, 1 + bboxes=lb[:, 1:], # n, 4 + segments=segments, + keypoints=keypoint, + normalized=True, + bbox_format='xywh')) + if msg: + msgs.append(msg) + pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' + pbar.close() + + if msgs: + LOGGER.info('\n'.join(msgs)) + if nf == 0: + LOGGER.warning(f'{self.prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version + if is_dir_writeable(path.parent): + if path.exists(): + path.unlink() # remove *.cache file if exists + np.save(str(path), x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{self.prefix}New cache created: {path}') + else: + LOGGER.warning(f'{self.prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable, cache not saved.') + return x + + def get_labels(self): + """Returns dictionary of labels for YOLO training.""" + self.label_files = img2label_paths(self.im_files) + cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') + try: + import gc + gc.disable() # reduce pickle load time https://github.com/ultralytics/ultralytics/pull/1585 + cache, exists = np.load(str(cache_path), allow_pickle=True).item(), True # load dict + gc.enable() + assert cache['version'] == self.cache_version # matches current version + assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash + except (FileNotFoundError, AssertionError, AttributeError): + cache, exists = self.cache_labels(cache_path), False # run cache ops + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + if exists and LOCAL_RANK in (-1, 0): + d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' + tqdm(None, desc=self.prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + if nf == 0: # number of labels found + raise FileNotFoundError(f'{self.prefix}No labels found in {cache_path}, can not start training. {HELP_URL}') + + # Read cache + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + labels = cache['labels'] + self.im_files = [lb['im_file'] for lb in labels] # update im_files + + # Check if the dataset is all boxes or all segments + lengths = ((len(lb['cls']), len(lb['bboxes']), len(lb['segments'])) for lb in labels) + len_cls, len_boxes, len_segments = (sum(x) for x in zip(*lengths)) + if len_segments and len_boxes != len_segments: + LOGGER.warning( + f'WARNING ⚠️ Box and segment counts should be equal, but got len(segments) = {len_segments}, ' + f'len(boxes) = {len_boxes}. To resolve this only boxes will be used and all segments will be removed. ' + 'To avoid this please supply either a detect or segment dataset, not a detect-segment mixed dataset.') + for lb in labels: + lb['segments'] = [] + if len_cls == 0: + raise ValueError(f'All labels empty in {cache_path}, can not start training without labels. {HELP_URL}') + return labels + + # TODO: use hyp config to set all these augmentations + def build_transforms(self, hyp=None): + """Builds and appends transforms to the list.""" + if self.augment: + hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0 + hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0 + transforms = v8_transforms(self, self.imgsz, hyp) + else: + transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), scaleup=False)]) + transforms.append( + Format(bbox_format='xywh', + normalize=True, + return_mask=self.use_segments, + return_keypoint=self.use_keypoints, + batch_idx=True, + mask_ratio=hyp.mask_ratio, + mask_overlap=hyp.overlap_mask)) + return transforms + + def close_mosaic(self, hyp): + """Sets mosaic, copy_paste and mixup options to 0.0 and builds transformations.""" + hyp.mosaic = 0.0 # set mosaic ratio=0.0 + hyp.copy_paste = 0.0 # keep the same behavior as previous v8 close-mosaic + hyp.mixup = 0.0 # keep the same behavior as previous v8 close-mosaic + self.transforms = self.build_transforms(hyp) + + def update_labels_info(self, label): + """custom your label format here.""" + # NOTE: cls is not with bboxes now, classification and semantic segmentation need an independent cls label + # we can make it also support classification and semantic segmentation by add or remove some dict keys there. + bboxes = label.pop('bboxes') + segments = label.pop('segments') + keypoints = label.pop('keypoints', None) + bbox_format = label.pop('bbox_format') + normalized = label.pop('normalized') + label['instances'] = Instances(bboxes, segments, keypoints, bbox_format=bbox_format, normalized=normalized) + return label + + @staticmethod + def collate_fn(batch): + """Collates data samples into batches.""" + new_batch = {} + keys = batch[0].keys() + values = list(zip(*[list(b.values()) for b in batch])) + for i, k in enumerate(keys): + value = values[i] + if k == 'img': + value = torch.stack(value, 0) + if k in ['masks', 'keypoints', 'bboxes', 'cls']: + value = torch.cat(value, 0) + new_batch[k] = value + new_batch['batch_idx'] = list(new_batch['batch_idx']) + for i in range(len(new_batch['batch_idx'])): + new_batch['batch_idx'][i] += i # add target image index for build_targets() + new_batch['batch_idx'] = torch.cat(new_batch['batch_idx'], 0) + return new_batch + + +# Classification dataloaders ------------------------------------------------------------------------------------------- +class ClassificationDataset(torchvision.datasets.ImageFolder): + """ + YOLO Classification Dataset. + + Args: + root (str): Dataset path. + + Attributes: + cache_ram (bool): True if images should be cached in RAM, False otherwise. + cache_disk (bool): True if images should be cached on disk, False otherwise. + samples (list): List of samples containing file, index, npy, and im. + torch_transforms (callable): torchvision transforms applied to the dataset. + album_transforms (callable, optional): Albumentations transforms applied to the dataset if augment is True. + """ + + def __init__(self, root, args, augment=False, cache=False): + """ + Initialize YOLO object with root, image size, augmentations, and cache settings. + + Args: + root (str): Dataset path. + args (Namespace): Argument parser containing dataset related settings. + augment (bool, optional): True if dataset should be augmented, False otherwise. Defaults to False. + cache (bool | str | optional): Cache setting, can be True, False, 'ram' or 'disk'. Defaults to False. + """ + super().__init__(root=root) + if augment and args.fraction < 1.0: # reduce training fraction + self.samples = self.samples[:round(len(self.samples) * args.fraction)] + self.cache_ram = cache is True or cache == 'ram' + self.cache_disk = cache == 'disk' + self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im + self.torch_transforms = classify_transforms(args.imgsz) + self.album_transforms = classify_albumentations( + augment=augment, + size=args.imgsz, + scale=(1.0 - args.scale, 1.0), # (0.08, 1.0) + hflip=args.fliplr, + vflip=args.flipud, + hsv_h=args.hsv_h, # HSV-Hue augmentation (fraction) + hsv_s=args.hsv_s, # HSV-Saturation augmentation (fraction) + hsv_v=args.hsv_v, # HSV-Value augmentation (fraction) + mean=(0.0, 0.0, 0.0), # IMAGENET_MEAN + std=(1.0, 1.0, 1.0), # IMAGENET_STD + auto_aug=False) if augment else None + + def __getitem__(self, i): + """Returns subset of data and targets corresponding to given indices.""" + f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + if self.album_transforms: + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] + else: + sample = self.torch_transforms(im) + return {'img': sample, 'cls': j} + + def __len__(self) -> int: + return len(self.samples) + + +# TODO: support semantic segmentation +class SemanticDataset(BaseDataset): + + def __init__(self): + """Initialize a SemanticDataset object.""" + super().__init__() diff --git a/ultralytics/yolo/data/dataset_wrappers.py b/ultralytics/yolo/data/dataset_wrappers.py new file mode 100644 index 0000000..72a6fb5 --- /dev/null +++ b/ultralytics/yolo/data/dataset_wrappers.py @@ -0,0 +1,53 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import collections +from copy import deepcopy + +from .augment import LetterBox + + +class MixAndRectDataset: + """ + A dataset class that applies mosaic and mixup transformations as well as rectangular training. + + Attributes: + dataset: The base dataset. + imgsz: The size of the images in the dataset. + """ + + def __init__(self, dataset): + """ + Args: + dataset (BaseDataset): The base dataset to apply transformations to. + """ + self.dataset = dataset + self.imgsz = dataset.imgsz + + def __len__(self): + """Returns the number of items in the dataset.""" + return len(self.dataset) + + def __getitem__(self, index): + """ + Applies mosaic, mixup and rectangular training transformations to an item in the dataset. + + Args: + index (int): Index of the item in the dataset. + + Returns: + (dict): A dictionary containing the transformed item data. + """ + labels = deepcopy(self.dataset[index]) + for transform in self.dataset.transforms.tolist(): + # Mosaic and mixup + if hasattr(transform, 'get_indexes'): + indexes = transform.get_indexes(self.dataset) + if not isinstance(indexes, collections.abc.Sequence): + indexes = [indexes] + labels['mix_labels'] = [deepcopy(self.dataset[index]) for index in indexes] + if self.dataset.rect and isinstance(transform, LetterBox): + transform.new_shape = self.dataset.batch_shapes[self.dataset.batch[index]] + labels = transform(labels) + if 'mix_labels' in labels: + labels.pop('mix_labels') + return labels diff --git a/ultralytics/yolo/data/scripts/download_weights.sh b/ultralytics/yolo/data/scripts/download_weights.sh new file mode 100644 index 0000000..72502a3 --- /dev/null +++ b/ultralytics/yolo/data/scripts/download_weights.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Download latest models from https://github.com/ultralytics/assets/releases +# Example usage: bash ultralytics/yolo/data/scripts/download_weights.sh +# parent +# └── weights +# ├── yolov8n.pt ← downloads here +# ├── yolov8s.pt +# └── ... + +python - < 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + if f.read() != b'\xff\xd9': # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) + msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' + + # Verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any(len(x) > 6 for x in lb) and (not keypoint): # is segment + classes = np.array([x[0] for x in lb], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) + lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + lb = np.array(lb, dtype=np.float32) + nl = len(lb) + if nl: + if keypoint: + assert lb.shape[1] == (5 + nkpt * ndim), f'labels require {(5 + nkpt * ndim)} columns each' + assert (lb[:, 5::ndim] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert (lb[:, 6::ndim] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + else: + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' + assert (lb[:, 1:] <= 1).all(), \ + f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + # All labels + max_cls = int(lb[:, 0].max()) # max label count + assert max_cls <= num_cls, \ + f'Label class {max_cls} exceeds dataset class count {num_cls}. ' \ + f'Possible class labels are 0-{num_cls - 1}' + _, i = np.unique(lb, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + lb = lb[i] # remove duplicates + if segments: + segments = [segments[x] for x in i] + msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' + else: + ne = 1 # label empty + lb = np.zeros((0, (5 + nkpt * ndim)), dtype=np.float32) if keypoint else np.zeros( + (0, 5), dtype=np.float32) + else: + nm = 1 # label missing + lb = np.zeros((0, (5 + nkpt * ndim)), dtype=np.float32) if keypoint else np.zeros((0, 5), dtype=np.float32) + if keypoint: + keypoints = lb[:, 5:].reshape(-1, nkpt, ndim) + if ndim == 2: + kpt_mask = np.ones(keypoints.shape[:2], dtype=np.float32) + kpt_mask = np.where(keypoints[..., 0] < 0, 0.0, kpt_mask) + kpt_mask = np.where(keypoints[..., 1] < 0, 0.0, kpt_mask) + keypoints = np.concatenate([keypoints, kpt_mask[..., None]], axis=-1) # (nl, nkpt, 3) + lb = lb[:, :5] + return im_file, lb, shape, segments, keypoints, nm, nf, ne, nc, msg + except Exception as e: + nc = 1 + msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' + return [None, None, None, None, None, nm, nf, ne, nc, msg] + + +def polygon2mask(imgsz, polygons, color=1, downsample_ratio=1): + """ + Args: + imgsz (tuple): The image size. + polygons (list[np.ndarray]): [N, M], N is the number of polygons, M is the number of points(Be divided by 2). + color (int): color + downsample_ratio (int): downsample ratio + """ + mask = np.zeros(imgsz, dtype=np.uint8) + polygons = np.asarray(polygons) + polygons = polygons.astype(np.int32) + shape = polygons.shape + polygons = polygons.reshape(shape[0], -1, 2) + cv2.fillPoly(mask, polygons, color=color) + nh, nw = (imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio) + # NOTE: fillPoly firstly then resize is trying the keep the same way + # of loss calculation when mask-ratio=1. + mask = cv2.resize(mask, (nw, nh)) + return mask + + +def polygons2masks(imgsz, polygons, color, downsample_ratio=1): + """ + Args: + imgsz (tuple): The image size. + polygons (list[np.ndarray]): each polygon is [N, M], N is number of polygons, M is number of points (M % 2 = 0) + color (int): color + downsample_ratio (int): downsample ratio + """ + masks = [] + for si in range(len(polygons)): + mask = polygon2mask(imgsz, [polygons[si].reshape(-1)], color, downsample_ratio) + masks.append(mask) + return np.array(masks) + + +def polygons2masks_overlap(imgsz, segments, downsample_ratio=1): + """Return a (640, 640) overlap mask.""" + masks = np.zeros((imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8) + areas = [] + ms = [] + for si in range(len(segments)): + mask = polygon2mask(imgsz, [segments[si].reshape(-1)], downsample_ratio=downsample_ratio, color=1) + ms.append(mask) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + mask = ms[i] * (i + 1) + masks = masks + mask + masks = np.clip(masks, a_min=0, a_max=i + 1) + return masks, index + + +def check_det_dataset(dataset, autodownload=True): + """Download, check and/or unzip dataset if not found locally.""" + data = check_file(dataset) + + # Download (optional) + extract_dir = '' + if isinstance(data, (str, Path)) and (zipfile.is_zipfile(data) or is_tarfile(data)): + new_dir = safe_download(data, dir=DATASETS_DIR, unzip=True, delete=False, curl=False) + data = next((DATASETS_DIR / new_dir).rglob('*.yaml')) + extract_dir, autodownload = data.parent, False + + # Read yaml (optional) + if isinstance(data, (str, Path)): + data = yaml_load(data, append_filename=True) # dictionary + + # Checks + for k in 'train', 'val': + if k not in data: + raise SyntaxError( + emojis(f"{dataset} '{k}:' key missing ❌.\n'train' and 'val' are required in all data YAMLs.")) + if 'names' not in data and 'nc' not in data: + raise SyntaxError(emojis(f"{dataset} key missing ❌.\n either 'names' or 'nc' are required in all data YAMLs.")) + if 'names' in data and 'nc' in data and len(data['names']) != data['nc']: + raise SyntaxError(emojis(f"{dataset} 'names' length {len(data['names'])} and 'nc: {data['nc']}' must match.")) + if 'names' not in data: + data['names'] = [f'class_{i}' for i in range(data['nc'])] + else: + data['nc'] = len(data['names']) + + data['names'] = check_class_names(data['names']) + + # Resolve paths + path = Path(extract_dir or data.get('path') or Path(data.get('yaml_file', '')).parent) # dataset root + + if not path.is_absolute(): + path = (DATASETS_DIR / path).resolve() + data['path'] = path # download scripts + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + if isinstance(data[k], str): + x = (path / data[k]).resolve() + if not x.exists() and data[k].startswith('../'): + x = (path / data[k][3:]).resolve() + data[k] = str(x) + else: + data[k] = [str((path / x).resolve()) for x in data[k]] + + # Parse yaml + train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) + if val: + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + name = clean_url(dataset) # dataset name with URL auth stripped + m = f"\nDataset '{name}' images not found ⚠️, missing paths %s" % [str(x) for x in val if not x.exists()] + if s and autodownload: + LOGGER.warning(m) + else: + m += f"\nNote dataset download directory is '{DATASETS_DIR}'. You can update this in '{SETTINGS_YAML}'" + raise FileNotFoundError(m) + t = time.time() + if s.startswith('http') and s.endswith('.zip'): # URL + safe_download(url=s, dir=DATASETS_DIR, delete=True) + r = None # success + elif s.startswith('bash '): # bash script + LOGGER.info(f'Running {s} ...') + r = os.system(s) + else: # python script + r = exec(s, {'yaml': data}) # return None + dt = f'({round(time.time() - t, 1)}s)' + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f'failure {dt} ❌' + LOGGER.info(f'Dataset download {s}\n') + check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf') # download fonts + + return data # dictionary + + +def check_cls_dataset(dataset: str, split=''): + """ + Checks a classification dataset such as Imagenet. + + This function accepts a `dataset` name and attempts to retrieve the corresponding dataset information. + If the dataset is not found locally, it attempts to download the dataset from the internet and save it locally. + + Args: + dataset (str): The name of the dataset. + split (str, optional): The split of the dataset. Either 'val', 'test', or ''. Defaults to ''. + + Returns: + (dict): A dictionary containing the following keys: + - 'train' (Path): The directory path containing the training set of the dataset. + - 'val' (Path): The directory path containing the validation set of the dataset. + - 'test' (Path): The directory path containing the test set of the dataset. + - 'nc' (int): The number of classes in the dataset. + - 'names' (dict): A dictionary of class names in the dataset. + + Raises: + FileNotFoundError: If the specified dataset is not found and cannot be downloaded. + """ + + dataset = Path(dataset) + data_dir = (dataset if dataset.is_dir() else (DATASETS_DIR / dataset)).resolve() + if not data_dir.is_dir(): + LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') + t = time.time() + if str(dataset) == 'imagenet': + subprocess.run(f"bash {ROOT / 'yolo/data/scripts/get_imagenet.sh'}", shell=True, check=True) + else: + url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{dataset}.zip' + download(url, dir=data_dir.parent) + s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n" + LOGGER.info(s) + train_set = data_dir / 'train' + val_set = data_dir / 'val' if (data_dir / 'val').exists() else None # data/test or data/val + test_set = data_dir / 'test' if (data_dir / 'test').exists() else None # data/val or data/test + if split == 'val' and not val_set: + LOGGER.info("WARNING ⚠️ Dataset 'split=val' not found, using 'split=test' instead.") + elif split == 'test' and not test_set: + LOGGER.info("WARNING ⚠️ Dataset 'split=test' not found, using 'split=val' instead.") + + nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes + names = [x.name for x in (data_dir / 'train').iterdir() if x.is_dir()] # class names list + names = dict(enumerate(sorted(names))) + return {'train': train_set, 'val': val_set or test_set, 'test': test_set or val_set, 'nc': nc, 'names': names} + + +class HUBDatasetStats(): + """ + A class for generating HUB dataset JSON and `-hub` dataset directory. + + Args: + path (str): Path to data.yaml or data.zip (with data.yaml inside data.zip). Default is 'coco128.yaml'. + task (str): Dataset task. Options are 'detect', 'segment', 'pose', 'classify'. Default is 'detect'. + autodownload (bool): Attempt to download dataset if not found locally. Default is False. + + Usage + from ultralytics.yolo.data.utils import HUBDatasetStats + stats = HUBDatasetStats('/Users/glennjocher/Downloads/coco8.zip', task='detect') # detect dataset + stats = HUBDatasetStats('/Users/glennjocher/Downloads/coco8-seg.zip', task='segment') # segment dataset + stats = HUBDatasetStats('/Users/glennjocher/Downloads/coco8-pose.zip', task='pose') # pose dataset + stats.get_json(save=False) + stats.process_images() + """ + + def __init__(self, path='coco128.yaml', task='detect', autodownload=False): + """Initialize class.""" + LOGGER.info(f'Starting HUB dataset checks for {path}....') + zipped, data_dir, yaml_path = self._unzip(Path(path)) + try: + # data = yaml_load(check_yaml(yaml_path)) # data dict + data = check_det_dataset(yaml_path, autodownload) # data dict + if zipped: + data['path'] = data_dir + except Exception as e: + raise Exception('error/HUB/dataset_stats/yaml_load') from e + + self.hub_dir = Path(str(data['path']) + '-hub') + self.im_dir = self.hub_dir / 'images' + self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images + self.stats = {'nc': len(data['names']), 'names': list(data['names'].values())} # statistics dictionary + self.data = data + self.task = task # detect, segment, pose, classify + + @staticmethod + def _find_yaml(dir): + """Return data.yaml file.""" + files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive + assert files, f'No *.yaml file found in {dir}' + if len(files) > 1: + files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name + assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed' + assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' + return files[0] + + def _unzip(self, path): + """Unzip data.zip.""" + if not str(path).endswith('.zip'): # path is data.yaml + return False, None, path + unzip_dir = unzip_file(path, path=path.parent) + assert unzip_dir.is_dir(), f'Error unzipping {path}, {unzip_dir} not found. ' \ + f'path/to/abc.zip MUST unzip to path/to/abc/' + return True, str(unzip_dir), self._find_yaml(unzip_dir) # zipped, data_dir, yaml_path + + def _hub_ops(self, f): + """Saves a compressed image for HUB previews.""" + compress_one_image(f, self.im_dir / Path(f).name) # save to dataset-hub + + def get_json(self, save=False, verbose=False): + """Return dataset JSON for Ultralytics HUB.""" + from ultralytics.yolo.data import YOLODataset # ClassificationDataset + + def _round(labels): + """Update labels to integer class and 4 decimal place floats.""" + if self.task == 'detect': + coordinates = labels['bboxes'] + elif self.task == 'segment': + coordinates = [x.flatten() for x in labels['segments']] + elif self.task == 'pose': + n = labels['keypoints'].shape[0] + coordinates = np.concatenate((labels['bboxes'], labels['keypoints'].reshape(n, -1)), 1) + else: + raise ValueError('Undefined dataset task.') + zipped = zip(labels['cls'], coordinates) + return [[int(c), *(round(float(x), 4) for x in points)] for c, points in zipped] + + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + self.stats[split] = None # i.e. no test set + continue + + dataset = YOLODataset(img_path=self.data[split], + data=self.data, + use_segments=self.task == 'segment', + use_keypoints=self.task == 'pose') + x = np.array([ + np.bincount(label['cls'].astype(int).flatten(), minlength=self.data['nc']) + for label in tqdm(dataset.labels, total=len(dataset), desc='Statistics')]) # shape(128x80) + self.stats[split] = { + 'instance_stats': { + 'total': int(x.sum()), + 'per_class': x.sum(0).tolist()}, + 'image_stats': { + 'total': len(dataset), + 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': [{ + Path(k).name: _round(v)} for k, v in zip(dataset.im_files, dataset.labels)]} + + # Save, print and return + if save: + stats_path = self.hub_dir / 'stats.json' + LOGGER.info(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(self.stats, f) # save stats.json + if verbose: + LOGGER.info(json.dumps(self.stats, indent=2, sort_keys=False)) + return self.stats + + def process_images(self): + """Compress images for Ultralytics HUB.""" + from ultralytics.yolo.data import YOLODataset # ClassificationDataset + + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + continue + dataset = YOLODataset(img_path=self.data[split], data=self.data) + with ThreadPool(NUM_THREADS) as pool: + for _ in tqdm(pool.imap(self._hub_ops, dataset.im_files), total=len(dataset), desc=f'{split} images'): + pass + LOGGER.info(f'Done. All images saved to {self.im_dir}') + return self.im_dir + + +def compress_one_image(f, f_new=None, max_dim=1920, quality=50): + """ + Compresses a single image file to reduced size while preserving its aspect ratio and quality using either the + Python Imaging Library (PIL) or OpenCV library. If the input image is smaller than the maximum dimension, it will + not be resized. + + Args: + f (str): The path to the input image file. + f_new (str, optional): The path to the output image file. If not specified, the input file will be overwritten. + max_dim (int, optional): The maximum dimension (width or height) of the output image. Default is 1920 pixels. + quality (int, optional): The image compression quality as a percentage. Default is 50%. + + Usage: + from pathlib import Path + from ultralytics.yolo.data.utils import compress_one_image + for f in Path('/Users/glennjocher/Downloads/dataset').rglob('*.jpg'): + compress_one_image(f) + """ + try: # use PIL + im = Image.open(f) + r = max_dim / max(im.height, im.width) # ratio + if r < 1.0: # image too large + im = im.resize((int(im.width * r), int(im.height * r))) + im.save(f_new or f, 'JPEG', quality=quality, optimize=True) # save + except Exception as e: # use OpenCV + LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}') + im = cv2.imread(f) + im_height, im_width = im.shape[:2] + r = max_dim / max(im_height, im_width) # ratio + if r < 1.0: # image too large + im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) + cv2.imwrite(str(f_new or f), im) + + +def delete_dsstore(path): + """ + Deletes all ".DS_store" files under a specified directory. + + Args: + path (str, optional): The directory path where the ".DS_store" files should be deleted. + + Usage: + from ultralytics.yolo.data.utils import delete_dsstore + delete_dsstore('/Users/glennjocher/Downloads/dataset') + + Note: + ".DS_store" files are created by the Apple operating system and contain metadata about folders and files. They + are hidden system files and can cause issues when transferring files between different operating systems. + """ + # Delete Apple .DS_store files + files = list(Path(path).rglob('.DS_store')) + LOGGER.info(f'Deleting *.DS_store files: {files}') + for f in files: + f.unlink() + + +def zip_directory(dir, use_zipfile_library=True): + """ + Zips a directory and saves the archive to the specified output path. + + Args: + dir (str): The path to the directory to be zipped. + use_zipfile_library (bool): Whether to use zipfile library or shutil for zipping. + + Usage: + from ultralytics.yolo.data.utils import zip_directory + zip_directory('/Users/glennjocher/Downloads/playground') + + zip -r coco8-pose.zip coco8-pose + """ + delete_dsstore(dir) + if use_zipfile_library: + dir = Path(dir) + with zipfile.ZipFile(dir.with_suffix('.zip'), 'w', zipfile.ZIP_DEFLATED) as zip_file: + for file_path in dir.glob('**/*'): + if file_path.is_file(): + zip_file.write(file_path, file_path.relative_to(dir)) + else: + import shutil + shutil.make_archive(dir, 'zip', dir) diff --git a/ultralytics/yolo/engine/__init__.py b/ultralytics/yolo/engine/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ultralytics/yolo/engine/exporter.py b/ultralytics/yolo/engine/exporter.py new file mode 100644 index 0000000..61a108e --- /dev/null +++ b/ultralytics/yolo/engine/exporter.py @@ -0,0 +1,926 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Export a YOLOv8 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit + +Format | `format=argument` | Model +--- | --- | --- +PyTorch | - | yolov8n.pt +TorchScript | `torchscript` | yolov8n.torchscript +ONNX | `onnx` | yolov8n.onnx +OpenVINO | `openvino` | yolov8n_openvino_model/ +TensorRT | `engine` | yolov8n.engine +CoreML | `coreml` | yolov8n.mlmodel +TensorFlow SavedModel | `saved_model` | yolov8n_saved_model/ +TensorFlow GraphDef | `pb` | yolov8n.pb +TensorFlow Lite | `tflite` | yolov8n.tflite +TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov8n_web_model/ +PaddlePaddle | `paddle` | yolov8n_paddle_model/ +NCNN | `ncnn` | yolov8n_ncnn_model/ + +Requirements: + $ pip install ultralytics[export] + +Python: + from ultralytics import YOLO + model = YOLO('yolov8n.pt') + results = model.export(format='onnx') + +CLI: + $ yolo mode=export model=yolov8n.pt format=onnx + +Inference: + $ yolo predict model=yolov8n.pt # PyTorch + yolov8n.torchscript # TorchScript + yolov8n.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov8n_openvino_model # OpenVINO + yolov8n.engine # TensorRT + yolov8n.mlmodel # CoreML (macOS-only) + yolov8n_saved_model # TensorFlow SavedModel + yolov8n.pb # TensorFlow GraphDef + yolov8n.tflite # TensorFlow Lite + yolov8n_edgetpu.tflite # TensorFlow Edge TPU + yolov8n_paddle_model # PaddlePaddle + +TensorFlow.js: + $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example + $ npm install + $ ln -s ../../yolov5/yolov8n_web_model public/yolov8n_web_model + $ npm start +""" +import json +import os +import shutil +import subprocess +import time +import warnings +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import torch + +from ultralytics.nn.autobackend import check_class_names +from ultralytics.nn.modules import C2f, Detect, RTDETRDecoder +from ultralytics.nn.tasks import DetectionModel, SegmentationModel +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.utils import (ARM64, DEFAULT_CFG, LINUX, LOGGER, MACOS, ROOT, __version__, callbacks, colorstr, + get_default_args, yaml_save) +from ultralytics.yolo.utils.checks import check_imgsz, check_requirements, check_version +from ultralytics.yolo.utils.downloads import attempt_download_asset, get_github_assets +from ultralytics.yolo.utils.files import file_size +from ultralytics.yolo.utils.ops import Profile +from ultralytics.yolo.utils.torch_utils import get_latest_opset, select_device, smart_inference_mode + + +def export_formats(): + """YOLOv8 export formats.""" + import pandas + x = [ + ['PyTorch', '-', '.pt', True, True], + ['TorchScript', 'torchscript', '.torchscript', True, True], + ['ONNX', 'onnx', '.onnx', True, True], + ['OpenVINO', 'openvino', '_openvino_model', True, False], + ['TensorRT', 'engine', '.engine', False, True], + ['CoreML', 'coreml', '.mlmodel', True, False], + ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], + ['TensorFlow GraphDef', 'pb', '.pb', True, True], + ['TensorFlow Lite', 'tflite', '.tflite', True, False], + ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', True, False], + ['TensorFlow.js', 'tfjs', '_web_model', True, False], + ['PaddlePaddle', 'paddle', '_paddle_model', True, True], + ['NCNN', 'ncnn', '_ncnn_model', True, True], ] + return pandas.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) + + +def gd_outputs(gd): + """TensorFlow GraphDef model output node names.""" + name_list, input_list = [], [] + for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef + name_list.append(node.name) + input_list.extend(node.input) + return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp')) + + +def try_export(inner_func): + """YOLOv8 export decorator, i..e @try_export.""" + inner_args = get_default_args(inner_func) + + def outer_func(*args, **kwargs): + """Export a model.""" + prefix = inner_args['prefix'] + try: + with Profile() as dt: + f, model = inner_func(*args, **kwargs) + LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') + return f, model + except Exception as e: + LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') + return None, None + + return outer_func + + +class Exporter: + """ + A class for exporting a model. + + Attributes: + args (SimpleNamespace): Configuration for the exporter. + save_dir (Path): Directory to save results. + """ + + def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + """ + Initializes the Exporter class. + + Args: + cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG. + overrides (dict, optional): Configuration overrides. Defaults to None. + _callbacks (list, optional): List of callback functions. Defaults to None. + """ + self.args = get_cfg(cfg, overrides) + self.callbacks = _callbacks or callbacks.get_default_callbacks() + callbacks.add_integration_callbacks(self) + + @smart_inference_mode() + def __call__(self, model=None): + """Returns list of exported files/dirs after running callbacks.""" + self.run_callbacks('on_export_start') + t = time.time() + format = self.args.format.lower() # to lowercase + if format in ('tensorrt', 'trt'): # engine aliases + format = 'engine' + fmts = tuple(export_formats()['Argument'][1:]) # available export formats + flags = [x == format for x in fmts] + if sum(flags) != 1: + raise ValueError(f"Invalid export format='{format}'. Valid formats are {fmts}") + jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn = flags # export booleans + + # Load PyTorch model + self.device = select_device('cpu' if self.args.device is None else self.args.device) + + # Checks + model.names = check_class_names(model.names) + if self.args.half and onnx and self.device.type == 'cpu': + LOGGER.warning('WARNING ⚠️ half=True only compatible with GPU export, i.e. use device=0') + self.args.half = False + assert not self.args.dynamic, 'half=True not compatible with dynamic=True, i.e. use only one.' + self.imgsz = check_imgsz(self.args.imgsz, stride=model.stride, min_dim=2) # check image size + if self.args.optimize: + assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False" + assert self.device.type == 'cpu', "optimize=True not compatible with cuda devices, i.e. use device='cpu'" + if edgetpu and not LINUX: + raise SystemError('Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler/') + + # Input + im = torch.zeros(self.args.batch, 3, *self.imgsz).to(self.device) + file = Path( + getattr(model, 'pt_path', None) or getattr(model, 'yaml_file', None) or model.yaml.get('yaml_file', '')) + if file.suffix == '.yaml': + file = Path(file.name) + + # Update model + model = deepcopy(model).to(self.device) + for p in model.parameters(): + p.requires_grad = False + model.eval() + model.float() + model = model.fuse() + for k, m in model.named_modules(): + if isinstance(m, (Detect, RTDETRDecoder)): # Segment and Pose use Detect base class + m.dynamic = self.args.dynamic + m.export = True + m.format = self.args.format + elif isinstance(m, C2f) and not any((saved_model, pb, tflite, edgetpu, tfjs)): + # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph + m.forward = m.forward_split + + y = None + for _ in range(2): + y = model(im) # dry runs + if self.args.half and (engine or onnx) and self.device.type != 'cpu': + im, model = im.half(), model.half() # to FP16 + + # Filter warnings + warnings.filterwarnings('ignore', category=torch.jit.TracerWarning) # suppress TracerWarning + warnings.filterwarnings('ignore', category=UserWarning) # suppress shape prim::Constant missing ONNX warning + warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress CoreML np.bool deprecation warning + + # Assign + self.im = im + self.model = model + self.file = file + self.output_shape = tuple(y.shape) if isinstance(y, torch.Tensor) else \ + tuple(tuple(x.shape if isinstance(x, torch.Tensor) else []) for x in y) + self.pretty_name = Path(self.model.yaml.get('yaml_file', self.file)).stem.replace('yolo', 'YOLO') + trained_on = f'trained on {Path(self.args.data).name}' if self.args.data else '(untrained)' + description = f'Ultralytics {self.pretty_name} model {trained_on}' + self.metadata = { + 'description': description, + 'author': 'Ultralytics', + 'license': 'AGPL-3.0 https://ultralytics.com/license', + 'date': datetime.now().isoformat(), + 'version': __version__, + 'stride': int(max(model.stride)), + 'task': model.task, + 'batch': self.args.batch, + 'imgsz': self.imgsz, + 'names': model.names} # model metadata + if model.task == 'pose': + self.metadata['kpt_shape'] = model.model[-1].kpt_shape + + LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with input shape {tuple(im.shape)} BCHW and " + f'output shape(s) {self.output_shape} ({file_size(file):.1f} MB)') + + # Exports + f = [''] * len(fmts) # exported filenames + if jit or ncnn: # TorchScript + f[0], _ = self.export_torchscript() + if engine: # TensorRT required before ONNX + f[1], _ = self.export_engine() + if onnx or xml: # OpenVINO requires ONNX + f[2], _ = self.export_onnx() + if xml: # OpenVINO + f[3], _ = self.export_openvino() + if coreml: # CoreML + f[4], _ = self.export_coreml() + if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats + self.args.int8 |= edgetpu + f[5], s_model = self.export_saved_model() + if pb or tfjs: # pb prerequisite to tfjs + f[6], _ = self.export_pb(s_model) + if tflite: + f[7], _ = self.export_tflite(s_model, nms=False, agnostic_nms=self.args.agnostic_nms) + if edgetpu: + f[8], _ = self.export_edgetpu(tflite_model=Path(f[5]) / f'{self.file.stem}_full_integer_quant.tflite') + if tfjs: + f[9], _ = self.export_tfjs() + if paddle: # PaddlePaddle + f[10], _ = self.export_paddle() + if ncnn: # NCNN + f[11], _ = self.export_ncnn() + + # Finish + f = [str(x) for x in f if x] # filter out '' and None + if any(f): + f = str(Path(f[-1])) + square = self.imgsz[0] == self.imgsz[1] + s = '' if square else f"WARNING ⚠️ non-PyTorch val requires square images, 'imgsz={self.imgsz}' will not " \ + f"work. Use export 'imgsz={max(self.imgsz)}' if val is required." + imgsz = self.imgsz[0] if square else str(self.imgsz)[1:-1].replace(' ', '') + data = f'data={self.args.data}' if model.task == 'segment' and format == 'pb' else '' + LOGGER.info( + f'\nExport complete ({time.time() - t:.1f}s)' + f"\nResults saved to {colorstr('bold', file.parent.resolve())}" + f'\nPredict: yolo predict task={model.task} model={f} imgsz={imgsz} {data}' + f'\nValidate: yolo val task={model.task} model={f} imgsz={imgsz} data={self.args.data} {s}' + f'\nVisualize: https://netron.app') + + self.run_callbacks('on_export_end') + return f # return list of exported files/dirs + + @try_export + def export_torchscript(self, prefix=colorstr('TorchScript:')): + """YOLOv8 TorchScript model export.""" + LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') + f = self.file.with_suffix('.torchscript') + + ts = torch.jit.trace(self.model, self.im, strict=False) + extra_files = {'config.txt': json.dumps(self.metadata)} # torch._C.ExtraFilesMap() + if self.args.optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html + LOGGER.info(f'{prefix} optimizing for mobile...') + from torch.utils.mobile_optimizer import optimize_for_mobile + optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) + else: + ts.save(str(f), _extra_files=extra_files) + return f, None + + @try_export + def export_onnx(self, prefix=colorstr('ONNX:')): + """YOLOv8 ONNX export.""" + requirements = ['onnx>=1.12.0'] + if self.args.simplify: + requirements += ['onnxsim>=0.4.17', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime'] + check_requirements(requirements) + import onnx # noqa + + opset_version = self.args.opset or get_latest_opset() + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__} opset {opset_version}...') + f = str(self.file.with_suffix('.onnx')) + + output_names = ['output0', 'output1'] if isinstance(self.model, SegmentationModel) else ['output0'] + dynamic = self.args.dynamic + if dynamic: + dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) + if isinstance(self.model, SegmentationModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) + elif isinstance(self.model, DetectionModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + + torch.onnx.export( + self.model.cpu() if dynamic else self.model, # --dynamic only compatible with cpu + self.im.cpu() if dynamic else self.im, + f, + verbose=False, + opset_version=opset_version, + do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False + input_names=['images'], + output_names=output_names, + dynamic_axes=dynamic or None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + # onnx.checker.check_model(model_onnx) # check onnx model + + # Simplify + if self.args.simplify: + try: + import onnxsim + + LOGGER.info(f'{prefix} simplifying with onnxsim {onnxsim.__version__}...') + # subprocess.run(f'onnxsim {f} {f}', shell=True) + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'Simplified ONNX model could not be validated' + except Exception as e: + LOGGER.info(f'{prefix} simplifier failure: {e}') + + # Metadata + for k, v in self.metadata.items(): + meta = model_onnx.metadata_props.add() + meta.key, meta.value = k, str(v) + + onnx.save(model_onnx, f) + return f, model_onnx + + @try_export + def export_openvino(self, prefix=colorstr('OpenVINO:')): + """YOLOv8 OpenVINO export.""" + check_requirements('openvino-dev>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.runtime as ov # noqa + from openvino.tools import mo # noqa + + LOGGER.info(f'\n{prefix} starting export with openvino {ov.__version__}...') + f = str(self.file).replace(self.file.suffix, f'_openvino_model{os.sep}') + f_onnx = self.file.with_suffix('.onnx') + f_ov = str(Path(f) / self.file.with_suffix('.xml').name) + + ov_model = mo.convert_model(f_onnx, + model_name=self.pretty_name, + framework='onnx', + compress_to_fp16=self.args.half) # export + + # Set RT info + ov_model.set_rt_info('YOLOv8', ['model_info', 'model_type']) + ov_model.set_rt_info(True, ['model_info', 'reverse_input_channels']) + ov_model.set_rt_info(114, ['model_info', 'pad_value']) + ov_model.set_rt_info([255.0], ['model_info', 'scale_values']) + ov_model.set_rt_info(self.args.iou, ['model_info', 'iou_threshold']) + ov_model.set_rt_info([v.replace(' ', '_') for k, v in sorted(self.model.names.items())], + ['model_info', 'labels']) + if self.model.task != 'classify': + ov_model.set_rt_info('fit_to_window_letterbox', ['model_info', 'resize_type']) + + ov.serialize(ov_model, f_ov) # save + yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml + return f, None + + @try_export + def export_paddle(self, prefix=colorstr('PaddlePaddle:')): + """YOLOv8 Paddle export.""" + check_requirements(('paddlepaddle', 'x2paddle')) + import x2paddle # noqa + from x2paddle.convert import pytorch2paddle # noqa + + LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...') + f = str(self.file).replace(self.file.suffix, f'_paddle_model{os.sep}') + + pytorch2paddle(module=self.model, save_dir=f, jit_type='trace', input_examples=[self.im]) # export + yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml + return f, None + + @try_export + def export_ncnn(self, prefix=colorstr('NCNN:')): + """ + YOLOv8 NCNN export using PNNX https://github.com/pnnx/pnnx. + """ + check_requirements('git+https://github.com/Tencent/ncnn.git' if ARM64 else 'ncnn') # requires NCNN + import ncnn # noqa + + LOGGER.info(f'\n{prefix} starting export with NCNN {ncnn.__version__}...') + f = Path(str(self.file).replace(self.file.suffix, f'_ncnn_model{os.sep}')) + f_ts = str(self.file.with_suffix('.torchscript')) + + if Path('./pnnx').is_file(): + pnnx = './pnnx' + elif (ROOT / 'pnnx').is_file(): + pnnx = ROOT / 'pnnx' + else: + LOGGER.warning( + f'{prefix} WARNING ⚠️ PNNX not found. Attempting to download binary file from ' + 'https://github.com/pnnx/pnnx/.\nNote PNNX Binary file must be placed in current working directory ' + f'or in {ROOT}. See PNNX repo for full installation instructions.') + _, assets = get_github_assets(repo='pnnx/pnnx') + asset = [x for x in assets if ('macos' if MACOS else 'ubuntu' if LINUX else 'windows') in x][0] + attempt_download_asset(asset, repo='pnnx/pnnx', release='latest') + unzip_dir = Path(asset).with_suffix('') + pnnx = ROOT / 'pnnx' # new location + (unzip_dir / 'pnnx').rename(pnnx) # move binary to ROOT + shutil.rmtree(unzip_dir) # delete unzip dir + Path(asset).unlink() # delete zip + pnnx.chmod(0o777) # set read, write, and execute permissions for everyone + + cmd = [ + str(pnnx), + f_ts, + f'pnnxparam={f / "model.pnnx.param"}', + f'pnnxbin={f / "model.pnnx.bin"}', + f'pnnxpy={f / "model_pnnx.py"}', + f'pnnxonnx={f / "model.pnnx.onnx"}', + f'ncnnparam={f / "model.ncnn.param"}', + f'ncnnbin={f / "model.ncnn.bin"}', + f'ncnnpy={f / "model_ncnn.py"}', + f'fp16={int(self.args.half)}', + f'device={self.device.type}', + f'inputshape="{[self.args.batch, 3, *self.imgsz]}"', ] + f.mkdir(exist_ok=True) # make ncnn_model directory + LOGGER.info(f"{prefix} running '{' '.join(cmd)}'") + subprocess.run(cmd, check=True) + for f_debug in 'debug.bin', 'debug.param', 'debug2.bin', 'debug2.param': # remove debug files + Path(f_debug).unlink(missing_ok=True) + + yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml + return str(f), None + + @try_export + def export_coreml(self, prefix=colorstr('CoreML:')): + """YOLOv8 CoreML export.""" + check_requirements('coremltools>=6.0') + import coremltools as ct # noqa + + LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') + f = self.file.with_suffix('.mlmodel') + + bias = [0.0, 0.0, 0.0] + scale = 1 / 255 + classifier_config = None + if self.model.task == 'classify': + classifier_config = ct.ClassifierConfig(list(self.model.names.values())) if self.args.nms else None + model = self.model + elif self.model.task == 'detect': + model = iOSDetectModel(self.model, self.im) if self.args.nms else self.model + else: + # TODO CoreML Segment and Pose model pipelining + model = self.model + + ts = torch.jit.trace(model.eval(), self.im, strict=False) # TorchScript model + ct_model = ct.convert(ts, + inputs=[ct.ImageType('image', shape=self.im.shape, scale=scale, bias=bias)], + classifier_config=classifier_config) + bits, mode = (8, 'kmeans_lut') if self.args.int8 else (16, 'linear') if self.args.half else (32, None) + if bits < 32: + if 'kmeans' in mode: + check_requirements('scikit-learn') # scikit-learn package required for k-means quantization + ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) + if self.args.nms and self.model.task == 'detect': + ct_model = self._pipeline_coreml(ct_model) + + m = self.metadata # metadata dict + ct_model.short_description = m.pop('description') + ct_model.author = m.pop('author') + ct_model.license = m.pop('license') + ct_model.version = m.pop('version') + ct_model.user_defined_metadata.update({k: str(v) for k, v in m.items()}) + ct_model.save(str(f)) + return f, ct_model + + @try_export + def export_engine(self, prefix=colorstr('TensorRT:')): + """YOLOv8 TensorRT export https://developer.nvidia.com/tensorrt.""" + assert self.im.device.type != 'cpu', "export running on CPU but must be on GPU, i.e. use 'device=0'" + try: + import tensorrt as trt # noqa + except ImportError: + if LINUX: + check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') + import tensorrt as trt # noqa + + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 + self.args.simplify = True + f_onnx, _ = self.export_onnx() + + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + assert Path(f_onnx).exists(), f'failed to export ONNX file: {f_onnx}' + f = self.file.with_suffix('.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if self.args.verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = self.args.workspace * 1 << 30 + # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(f_onnx): + raise RuntimeError(f'failed to load ONNX file: {f_onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + for inp in inputs: + LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}') + + if self.args.dynamic: + shape = self.im.shape + if shape[0] <= 1: + LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument') + profile = builder.create_optimization_profile() + for inp in inputs: + profile.set_shape(inp.name, (1, *shape[1:]), (max(1, shape[0] // 2), *shape[1:]), shape) + config.add_optimization_profile(profile) + + LOGGER.info( + f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and self.args.half else 32} engine as {f}') + if builder.platform_has_fast_fp16 and self.args.half: + config.set_flag(trt.BuilderFlag.FP16) + + # Write file + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + # Metadata + meta = json.dumps(self.metadata) + t.write(len(meta).to_bytes(4, byteorder='little', signed=True)) + t.write(meta.encode()) + # Model + t.write(engine.serialize()) + + return f, None + + @try_export + def export_saved_model(self, prefix=colorstr('TensorFlow SavedModel:')): + """YOLOv8 TensorFlow SavedModel export.""" + try: + import tensorflow as tf # noqa + except ImportError: + cuda = torch.cuda.is_available() + check_requirements(f"tensorflow{'-macos' if MACOS else '-aarch64' if ARM64 else '' if cuda else '-cpu'}") + import tensorflow as tf # noqa + check_requirements(('onnx', 'onnx2tf>=1.7.7', 'sng4onnx>=1.0.1', 'onnxsim>=0.4.17', 'onnx_graphsurgeon>=0.3.26', + 'tflite_support', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime'), + cmds='--extra-index-url https://pypi.ngc.nvidia.com') + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = Path(str(self.file).replace(self.file.suffix, '_saved_model')) + if f.is_dir(): + import shutil + shutil.rmtree(f) # delete output folder + + # Export to ONNX + self.args.simplify = True + f_onnx, _ = self.export_onnx() + + # Export to TF + int8 = '-oiqt -qt per-tensor' if self.args.int8 else '' + cmd = f'onnx2tf -i {f_onnx} -o {f} -nuo --non_verbose {int8}' + LOGGER.info(f"\n{prefix} running '{cmd.strip()}'") + subprocess.run(cmd, shell=True) + yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml + + # Remove/rename TFLite models + if self.args.int8: + for file in f.rglob('*_dynamic_range_quant.tflite'): + file.rename(file.with_name(file.stem.replace('_dynamic_range_quant', '_int8') + file.suffix)) + for file in f.rglob('*_integer_quant_with_int16_act.tflite'): + file.unlink() # delete extra fp16 activation TFLite files + + # Add TFLite metadata + for file in f.rglob('*.tflite'): + f.unlink() if 'quant_with_int16_act.tflite' in str(f) else self._add_tflite_metadata(file) + + # Load saved_model + keras_model = tf.saved_model.load(f, tags=None, options=None) + + return str(f), keras_model + + @try_export + def export_pb(self, keras_model, prefix=colorstr('TensorFlow GraphDef:')): + """YOLOv8 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow.""" + import tensorflow as tf # noqa + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 # noqa + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = self.file.with_suffix('.pb') + + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) + frozen_func = convert_variables_to_constants_v2(m) + frozen_func.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) + return f, None + + @try_export + def export_tflite(self, keras_model, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): + """YOLOv8 TensorFlow Lite export.""" + import tensorflow as tf # noqa + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + saved_model = Path(str(self.file).replace(self.file.suffix, '_saved_model')) + if self.args.int8: + f = saved_model / f'{self.file.stem}_int8.tflite' # fp32 in/out + elif self.args.half: + f = saved_model / f'{self.file.stem}_float16.tflite' # fp32 in/out + else: + f = saved_model / f'{self.file.stem}_float32.tflite' + return str(f), None + + @try_export + def export_edgetpu(self, tflite_model='', prefix=colorstr('Edge TPU:')): + """YOLOv8 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/.""" + LOGGER.warning(f'{prefix} WARNING ⚠️ Edge TPU known bug https://github.com/ultralytics/ultralytics/issues/1185') + + cmd = 'edgetpu_compiler --version' + help_url = 'https://coral.ai/docs/edgetpu/compiler/' + assert LINUX, f'export only supported on Linux. See {help_url}' + if subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True).returncode != 0: + LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') + sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system + for c in ( + 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', + 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): + subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) + ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] + + LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') + f = str(tflite_model).replace('.tflite', '_edgetpu.tflite') # Edge TPU model + + cmd = f'edgetpu_compiler -s -d -k 10 --out_dir {Path(f).parent} {tflite_model}' + LOGGER.info(f"{prefix} running '{cmd}'") + subprocess.run(cmd.split(), check=True) + self._add_tflite_metadata(f) + return f, None + + @try_export + def export_tfjs(self, prefix=colorstr('TensorFlow.js:')): + """YOLOv8 TensorFlow.js export.""" + check_requirements('tensorflowjs') + import tensorflow as tf + import tensorflowjs as tfjs # noqa + + LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') + f = str(self.file).replace(self.file.suffix, '_web_model') # js dir + f_pb = self.file.with_suffix('.pb') # *.pb path + + gd = tf.Graph().as_graph_def() # TF GraphDef + with open(f_pb, 'rb') as file: + gd.ParseFromString(file.read()) + outputs = ','.join(gd_outputs(gd)) + LOGGER.info(f'\n{prefix} output node names: {outputs}') + + cmd = f'tensorflowjs_converter --input_format=tf_frozen_model --output_node_names={outputs} {f_pb} {f}' + subprocess.run(cmd.split(), check=True) + + # f_json = Path(f) / 'model.json' # *.json path + # with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order + # subst = re.sub( + # r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' + # r'"Identity.?.?": {"name": "Identity.?.?"}, ' + # r'"Identity.?.?": {"name": "Identity.?.?"}, ' + # r'"Identity.?.?": {"name": "Identity.?.?"}}}', + # r'{"outputs": {"Identity": {"name": "Identity"}, ' + # r'"Identity_1": {"name": "Identity_1"}, ' + # r'"Identity_2": {"name": "Identity_2"}, ' + # r'"Identity_3": {"name": "Identity_3"}}}', + # f_json.read_text(), + # ) + # j.write(subst) + yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml + return f, None + + def _add_tflite_metadata(self, file): + """Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata.""" + from tflite_support import flatbuffers # noqa + from tflite_support import metadata as _metadata # noqa + from tflite_support import metadata_schema_py_generated as _metadata_fb # noqa + + # Create model info + model_meta = _metadata_fb.ModelMetadataT() + model_meta.name = self.metadata['description'] + model_meta.version = self.metadata['version'] + model_meta.author = self.metadata['author'] + model_meta.license = self.metadata['license'] + + # Label file + tmp_file = Path(file).parent / 'temp_meta.txt' + with open(tmp_file, 'w') as f: + f.write(str(self.metadata)) + + label_file = _metadata_fb.AssociatedFileT() + label_file.name = tmp_file.name + label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS + + # Create input info + input_meta = _metadata_fb.TensorMetadataT() + input_meta.name = 'image' + input_meta.description = 'Input image to be detected.' + input_meta.content = _metadata_fb.ContentT() + input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT() + input_meta.content.contentProperties.colorSpace = _metadata_fb.ColorSpaceType.RGB + input_meta.content.contentPropertiesType = _metadata_fb.ContentProperties.ImageProperties + + # Create output info + output1 = _metadata_fb.TensorMetadataT() + output1.name = 'output' + output1.description = 'Coordinates of detected objects, class labels, and confidence score' + output1.associatedFiles = [label_file] + if self.model.task == 'segment': + output2 = _metadata_fb.TensorMetadataT() + output2.name = 'output' + output2.description = 'Mask protos' + output2.associatedFiles = [label_file] + + # Create subgraph info + subgraph = _metadata_fb.SubGraphMetadataT() + subgraph.inputTensorMetadata = [input_meta] + subgraph.outputTensorMetadata = [output1, output2] if self.model.task == 'segment' else [output1] + model_meta.subgraphMetadata = [subgraph] + + b = flatbuffers.Builder(0) + b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) + metadata_buf = b.Output() + + populator = _metadata.MetadataPopulator.with_model_file(str(file)) + populator.load_metadata_buffer(metadata_buf) + populator.load_associated_files([str(tmp_file)]) + populator.populate() + tmp_file.unlink() + + def _pipeline_coreml(self, model, prefix=colorstr('CoreML Pipeline:')): + """YOLOv8 CoreML pipeline.""" + import coremltools as ct # noqa + + LOGGER.info(f'{prefix} starting pipeline with coremltools {ct.__version__}...') + batch_size, ch, h, w = list(self.im.shape) # BCHW + + # Output shapes + spec = model.get_spec() + out0, out1 = iter(spec.description.output) + if MACOS: + from PIL import Image + img = Image.new('RGB', (w, h)) # img(192 width, 320 height) + # img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection + out = model.predict({'image': img}) + out0_shape = out[out0.name].shape + out1_shape = out[out1.name].shape + else: # linux and windows can not run model.predict(), get sizes from pytorch output y + out0_shape = self.output_shape[2], self.output_shape[1] - 4 # (3780, 80) + out1_shape = self.output_shape[2], 4 # (3780, 4) + + # Checks + names = self.metadata['names'] + nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height + na, nc = out0_shape + # na, nc = out0.type.multiArrayType.shape # number anchors, classes + assert len(names) == nc, f'{len(names)} names found for nc={nc}' # check + + # Define output shapes (missing) + out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80) + out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4) + # spec.neuralNetwork.preprocessing[0].featureName = '0' + + # Flexible input shapes + # from coremltools.models.neural_network import flexible_shape_utils + # s = [] # shapes + # s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192)) + # s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width) + # flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s) + # r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges + # r.add_height_range((192, 640)) + # r.add_width_range((192, 640)) + # flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r) + + # Print + # print(spec.description) + + # Model from spec + model = ct.models.MLModel(spec) + + # 3. Create NMS protobuf + nms_spec = ct.proto.Model_pb2.Model() + nms_spec.specificationVersion = 5 + for i in range(2): + decoder_output = model._spec.description.output[i].SerializeToString() + nms_spec.description.input.add() + nms_spec.description.input[i].ParseFromString(decoder_output) + nms_spec.description.output.add() + nms_spec.description.output[i].ParseFromString(decoder_output) + + nms_spec.description.output[0].name = 'confidence' + nms_spec.description.output[1].name = 'coordinates' + + output_sizes = [nc, 4] + for i in range(2): + ma_type = nms_spec.description.output[i].type.multiArrayType + ma_type.shapeRange.sizeRanges.add() + ma_type.shapeRange.sizeRanges[0].lowerBound = 0 + ma_type.shapeRange.sizeRanges[0].upperBound = -1 + ma_type.shapeRange.sizeRanges.add() + ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i] + ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i] + del ma_type.shape[:] + + nms = nms_spec.nonMaximumSuppression + nms.confidenceInputFeatureName = out0.name # 1x507x80 + nms.coordinatesInputFeatureName = out1.name # 1x507x4 + nms.confidenceOutputFeatureName = 'confidence' + nms.coordinatesOutputFeatureName = 'coordinates' + nms.iouThresholdInputFeatureName = 'iouThreshold' + nms.confidenceThresholdInputFeatureName = 'confidenceThreshold' + nms.iouThreshold = 0.45 + nms.confidenceThreshold = 0.25 + nms.pickTop.perClass = True + nms.stringClassLabels.vector.extend(names.values()) + nms_model = ct.models.MLModel(nms_spec) + + # 4. Pipeline models together + pipeline = ct.models.pipeline.Pipeline(input_features=[('image', ct.models.datatypes.Array(3, ny, nx)), + ('iouThreshold', ct.models.datatypes.Double()), + ('confidenceThreshold', ct.models.datatypes.Double())], + output_features=['confidence', 'coordinates']) + pipeline.add_model(model) + pipeline.add_model(nms_model) + + # Correct datatypes + pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString()) + pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString()) + pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString()) + + # Update metadata + pipeline.spec.specificationVersion = 5 + pipeline.spec.description.metadata.userDefined.update({ + 'IoU threshold': str(nms.iouThreshold), + 'Confidence threshold': str(nms.confidenceThreshold)}) + + # Save the model + model = ct.models.MLModel(pipeline.spec) + model.input_description['image'] = 'Input image' + model.input_description['iouThreshold'] = f'(optional) IOU threshold override (default: {nms.iouThreshold})' + model.input_description['confidenceThreshold'] = \ + f'(optional) Confidence threshold override (default: {nms.confidenceThreshold})' + model.output_description['confidence'] = 'Boxes × Class confidence (see user-defined metadata "classes")' + model.output_description['coordinates'] = 'Boxes × [x, y, width, height] (relative to image size)' + LOGGER.info(f'{prefix} pipeline success') + return model + + def add_callback(self, event: str, callback): + """ + Appends the given callback. + """ + self.callbacks[event].append(callback) + + def run_callbacks(self, event: str): + """Execute all callbacks for a given event.""" + for callback in self.callbacks.get(event, []): + callback(self) + + +class iOSDetectModel(torch.nn.Module): + """Wrap an Ultralytics YOLO model for iOS export.""" + + def __init__(self, model, im): + """Initialize the iOSDetectModel class with a YOLO model and example image.""" + super().__init__() + b, c, h, w = im.shape # batch, channel, height, width + self.model = model + self.nc = len(model.names) # number of classes + if w == h: + self.normalize = 1.0 / w # scalar + else: + self.normalize = torch.tensor([1.0 / w, 1.0 / h, 1.0 / w, 1.0 / h]) # broadcast (slower, smaller) + + def forward(self, x): + """Normalize predictions of object detection model with input size-dependent factors.""" + xywh, cls = self.model(x)[0].transpose(0, 1).split((4, self.nc), 1) + return cls, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4) + + +def export(cfg=DEFAULT_CFG): + """Export a YOLOv model to a specific format.""" + cfg.model = cfg.model or 'yolov8n.yaml' + cfg.format = cfg.format or 'torchscript' + + from ultralytics import YOLO + model = YOLO(cfg.model) + model.export(**vars(cfg)) + + +if __name__ == '__main__': + """ + CLI: + yolo mode=export model=yolov8n.yaml format=onnx + """ + export() diff --git a/ultralytics/yolo/engine/model.py b/ultralytics/yolo/engine/model.py new file mode 100644 index 0000000..a010f0e --- /dev/null +++ b/ultralytics/yolo/engine/model.py @@ -0,0 +1,436 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import sys +from pathlib import Path +from typing import Union + +from ultralytics import yolo # noqa +from ultralytics.nn.tasks import (ClassificationModel, DetectionModel, PoseModel, SegmentationModel, + attempt_load_one_weight, guess_model_task, nn, yaml_model_load) +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.engine.exporter import Exporter +from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, ROOT, callbacks, + is_git_dir, yaml_load) +from ultralytics.yolo.utils.checks import check_file, check_imgsz, check_pip_update_available, check_yaml +from ultralytics.yolo.utils.downloads import GITHUB_ASSET_STEMS +from ultralytics.yolo.utils.torch_utils import smart_inference_mode + +# Map head to model, trainer, validator, and predictor classes +TASK_MAP = { + 'classify': [ + ClassificationModel, yolo.v8.classify.ClassificationTrainer, yolo.v8.classify.ClassificationValidator, + yolo.v8.classify.ClassificationPredictor], + 'detect': [ + DetectionModel, yolo.v8.detect.DetectionTrainer, yolo.v8.detect.DetectionValidator, + yolo.v8.detect.DetectionPredictor], + 'segment': [ + SegmentationModel, yolo.v8.segment.SegmentationTrainer, yolo.v8.segment.SegmentationValidator, + yolo.v8.segment.SegmentationPredictor], + 'pose': [PoseModel, yolo.v8.pose.PoseTrainer, yolo.v8.pose.PoseValidator, yolo.v8.pose.PosePredictor]} + + +class YOLO: + """ + YOLO (You Only Look Once) object detection model. + + Args: + model (str, Path): Path to the model file to load or create. + task (Any, optional): Task type for the YOLO model. Defaults to None. + + Attributes: + predictor (Any): The predictor object. + model (Any): The model object. + trainer (Any): The trainer object. + task (str): The type of model task. + ckpt (Any): The checkpoint object if the model loaded from *.pt file. + cfg (str): The model configuration if loaded from *.yaml file. + ckpt_path (str): The checkpoint file path. + overrides (dict): Overrides for the trainer object. + metrics (Any): The data for metrics. + + Methods: + __call__(source=None, stream=False, **kwargs): + Alias for the predict method. + _new(cfg:str, verbose:bool=True) -> None: + Initializes a new model and infers the task type from the model definitions. + _load(weights:str, task:str='') -> None: + Initializes a new model and infers the task type from the model head. + _check_is_pytorch_model() -> None: + Raises TypeError if the model is not a PyTorch model. + reset() -> None: + Resets the model modules. + info(verbose:bool=False) -> None: + Logs the model info. + fuse() -> None: + Fuses the model for faster inference. + predict(source=None, stream=False, **kwargs) -> List[ultralytics.yolo.engine.results.Results]: + Performs prediction using the YOLO model. + + Returns: + list(ultralytics.yolo.engine.results.Results): The prediction results. + """ + + def __init__(self, model: Union[str, Path] = 'yolov8n.pt', task=None) -> None: + """ + Initializes the YOLO model. + + Args: + model (Union[str, Path], optional): Path or name of the model to load or create. Defaults to 'yolov8n.pt'. + task (Any, optional): Task type for the YOLO model. Defaults to None. + """ + self.callbacks = callbacks.get_default_callbacks() + self.predictor = None # reuse predictor + self.model = None # model object + self.trainer = None # trainer object + self.task = None # task type + self.ckpt = None # if loaded from *.pt + self.cfg = None # if loaded from *.yaml + self.ckpt_path = None + self.overrides = {} # overrides for trainer object + self.metrics = None # validation/training metrics + self.session = None # HUB session + model = str(model).strip() # strip spaces + + # Check if Ultralytics HUB model from https://hub.ultralytics.com + if self.is_hub_model(model): + from ultralytics.hub.session import HUBTrainingSession + self.session = HUBTrainingSession(model) + model = self.session.model_file + + # Load or create new YOLO model + suffix = Path(model).suffix + if not suffix and Path(model).stem in GITHUB_ASSET_STEMS: + model, suffix = Path(model).with_suffix('.pt'), '.pt' # add suffix, i.e. yolov8n -> yolov8n.pt + if suffix == '.yaml': + self._new(model, task) + else: + self._load(model, task) + + def __call__(self, source=None, stream=False, **kwargs): + """Calls the 'predict' function with given arguments to perform object detection.""" + return self.predict(source, stream, **kwargs) + + def __getattr__(self, attr): + """Raises error if object has no requested attribute.""" + name = self.__class__.__name__ + raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") + + @staticmethod + def is_hub_model(model): + """Check if the provided model is a HUB model.""" + return any(( + model.startswith('https://hub.ultralytics.com/models/'), # i.e. https://hub.ultralytics.com/models/MODEL_ID + [len(x) for x in model.split('_')] == [42, 20], # APIKEY_MODELID + len(model) == 20 and not Path(model).exists() and all(x not in model for x in './\\'))) # MODELID + + def _new(self, cfg: str, task=None, verbose=True): + """ + Initializes a new model and infers the task type from the model definitions. + + Args: + cfg (str): model configuration file + task (str | None): model task + verbose (bool): display model info on load + """ + cfg_dict = yaml_model_load(cfg) + self.cfg = cfg + self.task = task or guess_model_task(cfg_dict) + self.model = TASK_MAP[self.task][0](cfg_dict, verbose=verbose and RANK == -1) # build model + self.overrides['model'] = self.cfg + + # Below added to allow export from yamls + args = {**DEFAULT_CFG_DICT, **self.overrides} # combine model and default args, preferring model args + self.model.args = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # attach args to model + self.model.task = self.task + + def _load(self, weights: str, task=None): + """ + Initializes a new model and infers the task type from the model head. + + Args: + weights (str): model checkpoint to be loaded + task (str | None): model task + """ + suffix = Path(weights).suffix + if suffix == '.pt': + self.model, self.ckpt = attempt_load_one_weight(weights) + self.task = self.model.args['task'] + self.overrides = self.model.args = self._reset_ckpt_args(self.model.args) + self.ckpt_path = self.model.pt_path + else: + weights = check_file(weights) + self.model, self.ckpt = weights, None + self.task = task or guess_model_task(weights) + self.ckpt_path = weights + self.overrides['model'] = weights + self.overrides['task'] = self.task + + def _check_is_pytorch_model(self): + """ + Raises TypeError is model is not a PyTorch model + """ + pt_str = isinstance(self.model, (str, Path)) and Path(self.model).suffix == '.pt' + pt_module = isinstance(self.model, nn.Module) + if not (pt_module or pt_str): + raise TypeError(f"model='{self.model}' must be a *.pt PyTorch model, but is a different type. " + f'PyTorch models can be used to train, val, predict and export, i.e. ' + f"'yolo export model=yolov8n.pt', but exported formats like ONNX, TensorRT etc. only " + f"support 'predict' and 'val' modes, i.e. 'yolo predict model=yolov8n.onnx'.") + + @smart_inference_mode() + def reset_weights(self): + """ + Resets the model modules parameters to randomly initialized values, losing all training information. + """ + self._check_is_pytorch_model() + for m in self.model.modules(): + if hasattr(m, 'reset_parameters'): + m.reset_parameters() + for p in self.model.parameters(): + p.requires_grad = True + return self + + @smart_inference_mode() + def load(self, weights='yolov8n.pt'): + """ + Transfers parameters with matching names and shapes from 'weights' to model. + """ + self._check_is_pytorch_model() + if isinstance(weights, (str, Path)): + weights, self.ckpt = attempt_load_one_weight(weights) + self.model.load(weights) + return self + + def info(self, detailed=False, verbose=True): + """ + Logs model info. + + Args: + detailed (bool): Show detailed information about model. + verbose (bool): Controls verbosity. + """ + self._check_is_pytorch_model() + return self.model.info(detailed=detailed, verbose=verbose) + + def fuse(self): + """Fuse PyTorch Conv2d and BatchNorm2d layers.""" + self._check_is_pytorch_model() + self.model.fuse() + + @smart_inference_mode() + def predict(self, source=None, stream=False, **kwargs): + """ + Perform prediction using the YOLO model. + + Args: + source (str | int | PIL | np.ndarray): The source of the image to make predictions on. + Accepts all source types accepted by the YOLO model. + stream (bool): Whether to stream the predictions or not. Defaults to False. + **kwargs : Additional keyword arguments passed to the predictor. + Check the 'configuration' section in the documentation for all available options. + + Returns: + (List[ultralytics.yolo.engine.results.Results]): The prediction results. + """ + if source is None: + source = ROOT / 'assets' if is_git_dir() else 'https://ultralytics.com/images/bus.jpg' + LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using 'source={source}'.") + is_cli = (sys.argv[0].endswith('yolo') or sys.argv[0].endswith('ultralytics')) and any( + x in sys.argv for x in ('predict', 'track', 'mode=predict', 'mode=track')) + overrides = self.overrides.copy() + overrides['conf'] = 0.25 + overrides.update(kwargs) # prefer kwargs + overrides['mode'] = kwargs.get('mode', 'predict') + assert overrides['mode'] in ['track', 'predict'] + if not is_cli: + overrides['save'] = kwargs.get('save', False) # do not save by default if called in Python + if not self.predictor: + self.task = overrides.get('task') or self.task + self.predictor = TASK_MAP[self.task][3](overrides=overrides, _callbacks=self.callbacks) + self.predictor.setup_model(model=self.model, verbose=is_cli) + else: # only update args if predictor is already setup + self.predictor.args = get_cfg(self.predictor.args, overrides) + if 'project' in overrides or 'name' in overrides: + self.predictor.save_dir = self.predictor.get_save_dir() + return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream) + + def track(self, source=None, stream=False, persist=False, **kwargs): + """ + Perform object tracking on the input source using the registered trackers. + + Args: + source (str, optional): The input source for object tracking. Can be a file path or a video stream. + stream (bool, optional): Whether the input source is a video stream. Defaults to False. + persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False. + **kwargs (optional): Additional keyword arguments for the tracking process. + + Returns: + (List[ultralytics.yolo.engine.results.Results]): The tracking results. + + """ + if not hasattr(self.predictor, 'trackers'): + from ultralytics.tracker import register_tracker + register_tracker(self, persist) + # ByteTrack-based method needs low confidence predictions as input + conf = kwargs.get('conf') or 0.1 + kwargs['conf'] = conf + kwargs['mode'] = 'track' + return self.predict(source=source, stream=stream, **kwargs) + + @smart_inference_mode() + def val(self, data=None, **kwargs): + """ + Validate a model on a given dataset. + + Args: + data (str): The dataset to validate on. Accepts all formats accepted by yolo + **kwargs : Any other args accepted by the validators. To see all args check 'configuration' section in docs + """ + overrides = self.overrides.copy() + overrides['rect'] = True # rect batches as default + overrides.update(kwargs) + overrides['mode'] = 'val' + args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides) + args.data = data or args.data + if 'task' in overrides: + self.task = args.task + else: + args.task = self.task + if args.imgsz == DEFAULT_CFG.imgsz and not isinstance(self.model, (str, Path)): + args.imgsz = self.model.args['imgsz'] # use trained imgsz unless custom value is passed + args.imgsz = check_imgsz(args.imgsz, max_dim=1) + + validator = TASK_MAP[self.task][2](args=args, _callbacks=self.callbacks) + validator(model=self.model) + self.metrics = validator.metrics + + return validator.metrics + + @smart_inference_mode() + def benchmark(self, **kwargs): + """ + Benchmark a model on all export formats. + + Args: + **kwargs : Any other args accepted by the validators. To see all args check 'configuration' section in docs + """ + self._check_is_pytorch_model() + from ultralytics.yolo.utils.benchmarks import benchmark + overrides = self.model.args.copy() + overrides.update(kwargs) + overrides['mode'] = 'benchmark' + overrides = {**DEFAULT_CFG_DICT, **overrides} # fill in missing overrides keys with defaults + return benchmark(model=self, imgsz=overrides['imgsz'], half=overrides['half'], device=overrides['device']) + + def export(self, **kwargs): + """ + Export model. + + Args: + **kwargs : Any other args accepted by the predictors. To see all args check 'configuration' section in docs + """ + self._check_is_pytorch_model() + overrides = self.overrides.copy() + overrides.update(kwargs) + overrides['mode'] = 'export' + if overrides.get('imgsz') is None: + overrides['imgsz'] = self.model.args['imgsz'] # use trained imgsz unless custom value is passed + if 'batch' not in kwargs: + overrides['batch'] = 1 # default to 1 if not modified + args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides) + args.task = self.task + return Exporter(overrides=args, _callbacks=self.callbacks)(model=self.model) + + def train(self, **kwargs): + """ + Trains the model on a given dataset. + + Args: + **kwargs (Any): Any number of arguments representing the training configuration. + """ + self._check_is_pytorch_model() + if self.session: # Ultralytics HUB session + if any(kwargs): + LOGGER.warning('WARNING ⚠️ using HUB training arguments, ignoring local training arguments.') + kwargs = self.session.train_args + check_pip_update_available() + overrides = self.overrides.copy() + if kwargs.get('cfg'): + LOGGER.info(f"cfg file passed. Overriding default params with {kwargs['cfg']}.") + overrides = yaml_load(check_yaml(kwargs['cfg'])) + overrides.update(kwargs) + overrides['mode'] = 'train' + if not overrides.get('data'): + raise AttributeError("Dataset required but missing, i.e. pass 'data=coco128.yaml'") + if overrides.get('resume'): + overrides['resume'] = self.ckpt_path + self.task = overrides.get('task') or self.task + self.trainer = TASK_MAP[self.task][1](overrides=overrides, _callbacks=self.callbacks) + if not overrides.get('resume'): # manually set model only if not resuming + self.trainer.model = self.trainer.get_model(weights=self.model if self.ckpt else None, cfg=self.model.yaml) + self.model = self.trainer.model + self.trainer.hub_session = self.session # attach optional HUB session + self.trainer.train() + # Update model and cfg after training + if RANK in (-1, 0): + self.model, _ = attempt_load_one_weight(str(self.trainer.best)) + self.overrides = self.model.args + self.metrics = getattr(self.trainer.validator, 'metrics', None) # TODO: no metrics returned by DDP + + def to(self, device): + """ + Sends the model to the given device. + + Args: + device (str): device + """ + self._check_is_pytorch_model() + self.model.to(device) + + def tune(self, *args, **kwargs): + """ + Runs hyperparameter tuning using Ray Tune. See ultralytics.yolo.utils.tuner.run_ray_tune for Args. + + Returns: + (dict): A dictionary containing the results of the hyperparameter search. + + Raises: + ModuleNotFoundError: If Ray Tune is not installed. + """ + self._check_is_pytorch_model() + from ultralytics.yolo.utils.tuner import run_ray_tune + return run_ray_tune(self, *args, **kwargs) + + @property + def names(self): + """Returns class names of the loaded model.""" + return self.model.names if hasattr(self.model, 'names') else None + + @property + def device(self): + """Returns device if PyTorch model.""" + return next(self.model.parameters()).device if isinstance(self.model, nn.Module) else None + + @property + def transforms(self): + """Returns transform of the loaded model.""" + return self.model.transforms if hasattr(self.model, 'transforms') else None + + def add_callback(self, event: str, func): + """Add a callback.""" + self.callbacks[event].append(func) + + def clear_callback(self, event: str): + """Clear all event callbacks.""" + self.callbacks[event] = [] + + @staticmethod + def _reset_ckpt_args(args): + """Reset arguments when loading a PyTorch model.""" + include = {'imgsz', 'data', 'task', 'single_cls'} # only remember these arguments when loading a PyTorch model + return {k: v for k, v in args.items() if k in include} + + def _reset_callbacks(self): + """Reset all registered callbacks.""" + for event in callbacks.default_callbacks.keys(): + self.callbacks[event] = [callbacks.default_callbacks[event][0]] diff --git a/ultralytics/yolo/engine/predictor.py b/ultralytics/yolo/engine/predictor.py new file mode 100644 index 0000000..e326e5c --- /dev/null +++ b/ultralytics/yolo/engine/predictor.py @@ -0,0 +1,358 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Run prediction on images, videos, directories, globs, YouTube, webcam, streams, etc. + +Usage - sources: + $ yolo mode=predict model=yolov8n.pt source=0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ yolo mode=predict model=yolov8n.pt # PyTorch + yolov8n.torchscript # TorchScript + yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True + yolov8n_openvino_model # OpenVINO + yolov8n.engine # TensorRT + yolov8n.mlmodel # CoreML (macOS-only) + yolov8n_saved_model # TensorFlow SavedModel + yolov8n.pb # TensorFlow GraphDef + yolov8n.tflite # TensorFlow Lite + yolov8n_edgetpu.tflite # TensorFlow Edge TPU + yolov8n_paddle_model # PaddlePaddle +""" +import platform +from pathlib import Path + +import cv2 +import numpy as np +import torch + +from ultralytics.nn.autobackend import AutoBackend +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.data import load_inference_source +from ultralytics.yolo.data.augment import LetterBox, classify_transforms +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, MACOS, SETTINGS, WINDOWS, callbacks, colorstr, ops +from ultralytics.yolo.utils.checks import check_imgsz, check_imshow +from ultralytics.yolo.utils.files import increment_path +from ultralytics.yolo.utils.torch_utils import select_device, smart_inference_mode + +STREAM_WARNING = """ + WARNING ⚠️ stream/video/webcam/dir predict source will accumulate results in RAM unless `stream=True` is passed, + causing potential out-of-memory errors for large sources or long-running streams/videos. + + Usage: + results = model(source=..., stream=True) # generator of Results objects + for r in results: + boxes = r.boxes # Boxes object for bbox outputs + masks = r.masks # Masks object for segment masks outputs + probs = r.probs # Class probabilities for classification outputs +""" + + +class BasePredictor: + """ + BasePredictor + + A base class for creating predictors. + + Attributes: + args (SimpleNamespace): Configuration for the predictor. + save_dir (Path): Directory to save results. + done_warmup (bool): Whether the predictor has finished setup. + model (nn.Module): Model used for prediction. + data (dict): Data configuration. + device (torch.device): Device used for prediction. + dataset (Dataset): Dataset used for prediction. + vid_path (str): Path to video file. + vid_writer (cv2.VideoWriter): Video writer for saving video output. + data_path (str): Path to data. + """ + + def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + """ + Initializes the BasePredictor class. + + Args: + cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG. + overrides (dict, optional): Configuration overrides. Defaults to None. + """ + self.args = get_cfg(cfg, overrides) + self.save_dir = self.get_save_dir() + if self.args.conf is None: + self.args.conf = 0.25 # default conf=0.25 + self.done_warmup = False + if self.args.show: + self.args.show = check_imshow(warn=True) + + # Usable if setup is done + self.model = None + self.data = self.args.data # data_dict + self.imgsz = None + self.device = None + self.dataset = None + self.vid_path, self.vid_writer = None, None + self.plotted_img = None + self.data_path = None + self.source_type = None + self.batch = None + self.results = None + self.transforms = None + self.callbacks = _callbacks or callbacks.get_default_callbacks() + callbacks.add_integration_callbacks(self) + + def get_save_dir(self): + project = self.args.project or Path(SETTINGS['runs_dir']) / self.args.task + name = self.args.name or f'{self.args.mode}' + return increment_path(Path(project) / name, exist_ok=self.args.exist_ok) + + def preprocess(self, im): + """Prepares input image before inference. + + Args: + im (torch.Tensor | List(np.ndarray)): BCHW for tensor, [(HWC) x B] for list. + """ + not_tensor = not isinstance(im, torch.Tensor) + if not_tensor: + im = np.stack(self.pre_transform(im)) + im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW, (n, 3, h, w) + im = np.ascontiguousarray(im) # contiguous + im = torch.from_numpy(im) + + img = im.to(self.device) + img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32 + if not_tensor: + img /= 255 # 0 - 255 to 0.0 - 1.0 + return img + + def inference(self, im, *args, **kwargs): + visualize = increment_path(self.save_dir / Path(self.batch[0][0]).stem, + mkdir=True) if self.args.visualize and (not self.source_type.tensor) else False + return self.model(im, augment=self.args.augment, visualize=visualize) + + def pre_transform(self, im): + """Pre-transform input image before inference. + + Args: + im (List(np.ndarray)): (N, 3, h, w) for tensor, [(h, w, 3) x N] for list. + + Return: A list of transformed imgs. + """ + same_shapes = all(x.shape == im[0].shape for x in im) + auto = same_shapes and self.model.pt + return [LetterBox(self.imgsz, auto=auto, stride=self.model.stride)(image=x) for x in im] + + def write_results(self, idx, results, batch): + """Write inference results to a file or directory.""" + p, im, _ = batch + log_string = '' + if len(im.shape) == 3: + im = im[None] # expand for batch dim + if self.source_type.webcam or self.source_type.from_img or self.source_type.tensor: # batch_size >= 1 + log_string += f'{idx}: ' + frame = self.dataset.count + else: + frame = getattr(self.dataset, 'frame', 0) + self.data_path = p + self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}') + log_string += '%gx%g ' % im.shape[2:] # print string + result = results[idx] + log_string += result.verbose() + + if self.args.save or self.args.show: # Add bbox to image + plot_args = { + 'line_width': self.args.line_width, + 'boxes': self.args.boxes, + 'conf': self.args.show_conf, + 'labels': self.args.show_labels} + if not self.args.retina_masks: + plot_args['im_gpu'] = im[idx] + self.plotted_img = result.plot(**plot_args) + # Write + if self.args.save_txt: + result.save_txt(f'{self.txt_path}.txt', save_conf=self.args.save_conf) + if self.args.save_crop: + result.save_crop(save_dir=self.save_dir / 'crops', file_name=self.data_path.stem) + + return log_string + + def postprocess(self, preds, img, orig_imgs): + """Post-processes predictions for an image and returns them.""" + return preds + + def __call__(self, source=None, model=None, stream=False, *args, **kwargs): + """Performs inference on an image or stream.""" + self.stream = stream + if stream: + return self.stream_inference(source, model, *args, **kwargs) + else: + return list(self.stream_inference(source, model, *args, **kwargs)) # merge list of Result into one + + def predict_cli(self, source=None, model=None): + """Method used for CLI prediction. It uses always generator as outputs as not required by CLI mode.""" + gen = self.stream_inference(source, model) + for _ in gen: # running CLI inference without accumulating any outputs (do not modify) + pass + + def setup_source(self, source): + """Sets up source and inference mode.""" + self.imgsz = check_imgsz(self.args.imgsz, stride=self.model.stride, min_dim=2) # check image size + self.transforms = getattr(self.model.model, 'transforms', classify_transforms( + self.imgsz[0])) if self.args.task == 'classify' else None + self.dataset = load_inference_source(source=source, imgsz=self.imgsz, vid_stride=self.args.vid_stride) + self.source_type = self.dataset.source_type + if not getattr(self, 'stream', True) and (self.dataset.mode == 'stream' or # streams + len(self.dataset) > 1000 or # images + any(getattr(self.dataset, 'video_flag', [False]))): # videos + LOGGER.warning(STREAM_WARNING) + self.vid_path, self.vid_writer = [None] * self.dataset.bs, [None] * self.dataset.bs + + @smart_inference_mode() + def stream_inference(self, source=None, model=None, *args, **kwargs): + """Streams real-time inference on camera feed and saves results to file.""" + if self.args.verbose: + LOGGER.info('') + + # Setup model + if not self.model: + self.setup_model(model) + + # Setup source every time predict is called + self.setup_source(source if source is not None else self.args.source) + + # Check if save_dir/ label file exists + if self.args.save or self.args.save_txt: + (self.save_dir / 'labels' if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) + + # Warmup model + if not self.done_warmup: + self.model.warmup(imgsz=(1 if self.model.pt or self.model.triton else self.dataset.bs, 3, *self.imgsz)) + self.done_warmup = True + + self.seen, self.windows, self.batch, profilers = 0, [], None, (ops.Profile(), ops.Profile(), ops.Profile()) + self.run_callbacks('on_predict_start') + for batch in self.dataset: + self.run_callbacks('on_predict_batch_start') + self.batch = batch + path, im0s, vid_cap, s = batch + + # Preprocess + with profilers[0]: + im = self.preprocess(im0s) + + # Inference + with profilers[1]: + preds = self.inference(im, *args, **kwargs) + + # Postprocess + with profilers[2]: + self.results = self.postprocess(preds, im, im0s) + self.run_callbacks('on_predict_postprocess_end') + + # Visualize, save, write results + n = len(im0s) + for i in range(n): + self.seen += 1 + self.results[i].speed = { + 'preprocess': profilers[0].dt * 1E3 / n, + 'inference': profilers[1].dt * 1E3 / n, + 'postprocess': profilers[2].dt * 1E3 / n} + p, im0 = path[i], None if self.source_type.tensor else im0s[i].copy() + p = Path(p) + + if self.args.verbose or self.args.save or self.args.save_txt or self.args.show: + s += self.write_results(i, self.results, (p, im, im0)) + if self.args.save or self.args.save_txt: + self.results[i].save_dir = self.save_dir.__str__() + if self.args.show and self.plotted_img is not None: + self.show(p) + if self.args.save and self.plotted_img is not None: + self.save_preds(vid_cap, i, str(self.save_dir / p.name)) + + self.run_callbacks('on_predict_batch_end') + yield from self.results + + # Print time (inference-only) + if self.args.verbose: + LOGGER.info(f'{s}{profilers[1].dt * 1E3:.1f}ms') + + # Release assets + if isinstance(self.vid_writer[-1], cv2.VideoWriter): + self.vid_writer[-1].release() # release final video writer + + # Print results + if self.args.verbose and self.seen: + t = tuple(x.t / self.seen * 1E3 for x in profilers) # speeds per image + LOGGER.info(f'Speed: %.1fms preprocess, %.1fms inference, %.1fms postprocess per image at shape ' + f'{(1, 3, *im.shape[2:])}' % t) + if self.args.save or self.args.save_txt or self.args.save_crop: + nl = len(list(self.save_dir.glob('labels/*.txt'))) # number of labels + s = f"\n{nl} label{'s' * (nl > 1)} saved to {self.save_dir / 'labels'}" if self.args.save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}{s}") + + self.run_callbacks('on_predict_end') + + def setup_model(self, model, verbose=True): + """Initialize YOLO model with given parameters and set it to evaluation mode.""" + device = select_device(self.args.device, verbose=verbose) + model = model or self.args.model + self.args.half &= device.type != 'cpu' # half precision only supported on CUDA + self.model = AutoBackend(model, + device=device, + dnn=self.args.dnn, + data=self.args.data, + fp16=self.args.half, + fuse=True, + verbose=verbose) + self.device = device + self.model.eval() + + def show(self, p): + """Display an image in a window using OpenCV imshow().""" + im0 = self.plotted_img + if platform.system() == 'Linux' and p not in self.windows: + self.windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + cv2.waitKey(500 if self.batch[3].startswith('image') else 1) # 1 millisecond + + def save_preds(self, vid_cap, idx, save_path): + """Save video predictions as mp4 at specified path.""" + im0 = self.plotted_img + # Save imgs + if self.dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if self.vid_path[idx] != save_path: # new video + self.vid_path[idx] = save_path + if isinstance(self.vid_writer[idx], cv2.VideoWriter): + self.vid_writer[idx].release() # release previous video writer + if vid_cap: # video + fps = int(vid_cap.get(cv2.CAP_PROP_FPS)) # integer required, floats produce error in MP4 codec + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + suffix = '.mp4' if MACOS else '.avi' if WINDOWS else '.avi' + fourcc = 'avc1' if MACOS else 'WMV2' if WINDOWS else 'MJPG' + save_path = str(Path(save_path).with_suffix(suffix)) + self.vid_writer[idx] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)) + self.vid_writer[idx].write(im0) + + def run_callbacks(self, event: str): + """Runs all registered callbacks for a specific event.""" + for callback in self.callbacks.get(event, []): + callback(self) + + def add_callback(self, event: str, func): + """ + Add callback + """ + self.callbacks[event].append(func) diff --git a/ultralytics/yolo/engine/results.py b/ultralytics/yolo/engine/results.py new file mode 100644 index 0000000..e934730 --- /dev/null +++ b/ultralytics/yolo/engine/results.py @@ -0,0 +1,614 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Ultralytics Results, Boxes and Masks classes for handling inference results + +Usage: See https://docs.ultralytics.com/modes/predict/ +""" + +from copy import deepcopy +from functools import lru_cache +from pathlib import Path + +import numpy as np +import torch + +from ultralytics.yolo.data.augment import LetterBox +from ultralytics.yolo.utils import LOGGER, SimpleClass, deprecation_warn, ops +from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box + + +class BaseTensor(SimpleClass): + """ + Base tensor class with additional methods for easy manipulation and device handling. + """ + + def __init__(self, data, orig_shape) -> None: + """Initialize BaseTensor with data and original shape. + + Args: + data (torch.Tensor | np.ndarray): Predictions, such as bboxes, masks and keypoints. + orig_shape (tuple): Original shape of image. + """ + assert isinstance(data, (torch.Tensor, np.ndarray)) + self.data = data + self.orig_shape = orig_shape + + @property + def shape(self): + """Return the shape of the data tensor.""" + return self.data.shape + + def cpu(self): + """Return a copy of the tensor on CPU memory.""" + return self if isinstance(self.data, np.ndarray) else self.__class__(self.data.cpu(), self.orig_shape) + + def numpy(self): + """Return a copy of the tensor as a numpy array.""" + return self if isinstance(self.data, np.ndarray) else self.__class__(self.data.numpy(), self.orig_shape) + + def cuda(self): + """Return a copy of the tensor on GPU memory.""" + return self.__class__(torch.as_tensor(self.data).cuda(), self.orig_shape) + + def to(self, *args, **kwargs): + """Return a copy of the tensor with the specified device and dtype.""" + return self.__class__(torch.as_tensor(self.data).to(*args, **kwargs), self.orig_shape) + + def __len__(self): # override len(results) + """Return the length of the data tensor.""" + return len(self.data) + + def __getitem__(self, idx): + """Return a BaseTensor with the specified index of the data tensor.""" + return self.__class__(self.data[idx], self.orig_shape) + + +class Results(SimpleClass): + """ + A class for storing and manipulating inference results. + + Args: + orig_img (numpy.ndarray): The original image as a numpy array. + path (str): The path to the image file. + names (dict): A dictionary of class names. + boxes (torch.tensor, optional): A 2D tensor of bounding box coordinates for each detection. + masks (torch.tensor, optional): A 3D tensor of detection masks, where each mask is a binary image. + probs (torch.tensor, optional): A 1D tensor of probabilities of each class for classification task. + keypoints (List[List[float]], optional): A list of detected keypoints for each object. + + + Attributes: + orig_img (numpy.ndarray): The original image as a numpy array. + orig_shape (tuple): The original image shape in (height, width) format. + boxes (Boxes, optional): A Boxes object containing the detection bounding boxes. + masks (Masks, optional): A Masks object containing the detection masks. + probs (Probs, optional): A Probs object containing probabilities of each class for classification task. + names (dict): A dictionary of class names. + path (str): The path to the image file. + keypoints (Keypoints, optional): A Keypoints object containing detected keypoints for each object. + speed (dict): A dictionary of preprocess, inference and postprocess speeds in milliseconds per image. + _keys (tuple): A tuple of attribute names for non-empty attributes. + """ + + def __init__(self, orig_img, path, names, boxes=None, masks=None, probs=None, keypoints=None) -> None: + """Initialize the Results class.""" + self.orig_img = orig_img + self.orig_shape = orig_img.shape[:2] + self.boxes = Boxes(boxes, self.orig_shape) if boxes is not None else None # native size boxes + self.masks = Masks(masks, self.orig_shape) if masks is not None else None # native size or imgsz masks + self.probs = Probs(probs) if probs is not None else None + self.keypoints = Keypoints(keypoints, self.orig_shape) if keypoints is not None else None + self.speed = {'preprocess': None, 'inference': None, 'postprocess': None} # milliseconds per image + self.names = names + self.path = path + self.save_dir = None + self._keys = ('boxes', 'masks', 'probs', 'keypoints') + + def __getitem__(self, idx): + """Return a Results object for the specified index.""" + r = self.new() + for k in self.keys: + setattr(r, k, getattr(self, k)[idx]) + return r + + def update(self, boxes=None, masks=None, probs=None): + """Update the boxes, masks, and probs attributes of the Results object.""" + if boxes is not None: + self.boxes = Boxes(boxes, self.orig_shape) + if masks is not None: + self.masks = Masks(masks, self.orig_shape) + if probs is not None: + self.probs = probs + + def cpu(self): + """Return a copy of the Results object with all tensors on CPU memory.""" + r = self.new() + for k in self.keys: + setattr(r, k, getattr(self, k).cpu()) + return r + + def numpy(self): + """Return a copy of the Results object with all tensors as numpy arrays.""" + r = self.new() + for k in self.keys: + setattr(r, k, getattr(self, k).numpy()) + return r + + def cuda(self): + """Return a copy of the Results object with all tensors on GPU memory.""" + r = self.new() + for k in self.keys: + setattr(r, k, getattr(self, k).cuda()) + return r + + def to(self, *args, **kwargs): + """Return a copy of the Results object with tensors on the specified device and dtype.""" + r = self.new() + for k in self.keys: + setattr(r, k, getattr(self, k).to(*args, **kwargs)) + return r + + def __len__(self): + """Return the number of detections in the Results object.""" + for k in self.keys: + return len(getattr(self, k)) + + def new(self): + """Return a new Results object with the same image, path, and names.""" + return Results(orig_img=self.orig_img, path=self.path, names=self.names) + + @property + def keys(self): + """Return a list of non-empty attribute names.""" + return [k for k in self._keys if getattr(self, k) is not None] + + def plot( + self, + conf=True, + line_width=None, + font_size=None, + font='Arial.ttf', + pil=False, + img=None, + im_gpu=None, + kpt_line=True, + labels=True, + boxes=True, + masks=True, + probs=True, + **kwargs # deprecated args TODO: remove support in 8.2 + ): + """ + Plots the detection results on an input RGB image. Accepts a numpy array (cv2) or a PIL Image. + + Args: + conf (bool): Whether to plot the detection confidence score. + line_width (float, optional): The line width of the bounding boxes. If None, it is scaled to the image size. + font_size (float, optional): The font size of the text. If None, it is scaled to the image size. + font (str): The font to use for the text. + pil (bool): Whether to return the image as a PIL Image. + img (numpy.ndarray): Plot to another image. if not, plot to original image. + im_gpu (torch.Tensor): Normalized image in gpu with shape (1, 3, 640, 640), for faster mask plotting. + kpt_line (bool): Whether to draw lines connecting keypoints. + labels (bool): Whether to plot the label of bounding boxes. + boxes (bool): Whether to plot the bounding boxes. + masks (bool): Whether to plot the masks. + probs (bool): Whether to plot classification probability + + Returns: + (numpy.ndarray): A numpy array of the annotated image. + """ + if img is None and isinstance(self.orig_img, torch.Tensor): + img = np.ascontiguousarray(self.orig_img[0].permute(1, 2, 0).cpu().detach().numpy()) * 255 + + # Deprecation warn TODO: remove in 8.2 + if 'show_conf' in kwargs: + deprecation_warn('show_conf', 'conf') + conf = kwargs['show_conf'] + assert type(conf) == bool, '`show_conf` should be of boolean type, i.e, show_conf=True/False' + + if 'line_thickness' in kwargs: + deprecation_warn('line_thickness', 'line_width') + line_width = kwargs['line_thickness'] + assert type(line_width) == int, '`line_width` should be of int type, i.e, line_width=3' + + names = self.names + pred_boxes, show_boxes = self.boxes, boxes + pred_masks, show_masks = self.masks, masks + pred_probs, show_probs = self.probs, probs + annotator = Annotator( + deepcopy(self.orig_img if img is None else img), + line_width, + font_size, + font, + pil or (pred_probs is not None and show_probs), # Classify tasks default to pil=True + example=names) + + # Plot Segment results + if pred_masks and show_masks: + if im_gpu is None: + img = LetterBox(pred_masks.shape[1:])(image=annotator.result()) + im_gpu = torch.as_tensor(img, dtype=torch.float16, device=pred_masks.data.device).permute( + 2, 0, 1).flip(0).contiguous() / 255 + idx = pred_boxes.cls if pred_boxes else range(len(pred_masks)) + annotator.masks(pred_masks.data, colors=[colors(x, True) for x in idx], im_gpu=im_gpu) + + # Plot Detect results + if pred_boxes and show_boxes: + for d in reversed(pred_boxes): + c, conf, id = int(d.cls), float(d.conf) if conf else None, None if d.id is None else int(d.id.item()) + name = ('' if id is None else f'id:{id} ') + names[c] + label = (f'{name} {conf:.2f}' if conf else name) if labels else None + annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True)) + + # Plot Classify results + if pred_probs is not None and show_probs: + text = ',\n'.join(f'{names[j] if names else j} {pred_probs.data[j]:.2f}' for j in pred_probs.top5) + x = round(self.orig_shape[0] * 0.03) + annotator.text([x, x], text, txt_color=(255, 255, 255)) # TODO: allow setting colors + + # Plot Pose results + if self.keypoints is not None: + for k in reversed(self.keypoints.data): + annotator.kpts(k, self.orig_shape, kpt_line=kpt_line) + + return annotator.result() + + def verbose(self): + """ + Return log string for each task. + """ + log_string = '' + probs = self.probs + boxes = self.boxes + if len(self) == 0: + return log_string if probs is not None else f'{log_string}(no detections), ' + if probs is not None: + log_string += f"{', '.join(f'{self.names[j]} {probs.data[j]:.2f}' for j in probs.top5)}, " + if boxes: + for c in boxes.cls.unique(): + n = (boxes.cls == c).sum() # detections per class + log_string += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " + return log_string + + def save_txt(self, txt_file, save_conf=False): + """ + Save predictions into txt file. + + Args: + txt_file (str): txt file path. + save_conf (bool): save confidence score or not. + """ + boxes = self.boxes + masks = self.masks + probs = self.probs + kpts = self.keypoints + texts = [] + if probs is not None: + # Classify + [texts.append(f'{probs.data[j]:.2f} {self.names[j]}') for j in probs.top5] + elif boxes: + # Detect/segment/pose + for j, d in enumerate(boxes): + c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item()) + line = (c, *d.xywhn.view(-1)) + if masks: + seg = masks[j].xyn[0].copy().reshape(-1) # reversed mask.xyn, (n,2) to (n*2) + line = (c, *seg) + if kpts is not None: + kpt = torch.cat((kpts[j].xyn, kpts[j].conf[..., None]), 2) if kpts[j].has_visible else kpts[j].xyn + line += (*kpt.reshape(-1).tolist(), ) + line += (conf, ) * save_conf + (() if id is None else (id, )) + texts.append(('%g ' * len(line)).rstrip() % line) + + if texts: + with open(txt_file, 'a') as f: + f.writelines(text + '\n' for text in texts) + + def save_crop(self, save_dir, file_name=Path('im.jpg')): + """ + Save cropped predictions to `save_dir/cls/file_name.jpg`. + + Args: + save_dir (str | pathlib.Path): Save path. + file_name (str | pathlib.Path): File name. + """ + if self.probs is not None: + LOGGER.warning('WARNING ⚠️ Classify task do not support `save_crop`.') + return + if isinstance(save_dir, str): + save_dir = Path(save_dir) + if isinstance(file_name, str): + file_name = Path(file_name) + for d in self.boxes: + save_one_box(d.xyxy, + self.orig_img.copy(), + file=save_dir / self.names[int(d.cls)] / f'{file_name.stem}.jpg', + BGR=True) + + def pandas(self): + """Convert the object to a pandas DataFrame (not yet implemented).""" + LOGGER.warning("WARNING ⚠️ 'Results.pandas' method is not yet implemented.") + + def tojson(self, normalize=False): + """Convert the object to JSON format.""" + if self.probs is not None: + LOGGER.warning('Warning: Classify task do not support `tojson` yet.') + return + + import json + + # Create list of detection dictionaries + results = [] + data = self.boxes.data.cpu().tolist() + h, w = self.orig_shape if normalize else (1, 1) + for i, row in enumerate(data): + box = {'x1': row[0] / w, 'y1': row[1] / h, 'x2': row[2] / w, 'y2': row[3] / h} + conf = row[4] + id = int(row[5]) + name = self.names[id] + result = {'name': name, 'class': id, 'confidence': conf, 'box': box} + if self.masks: + x, y = self.masks.xy[i][:, 0], self.masks.xy[i][:, 1] # numpy array + result['segments'] = {'x': (x / w).tolist(), 'y': (y / h).tolist()} + if self.keypoints is not None: + x, y, visible = self.keypoints[i].data[0].cpu().unbind(dim=1) # torch Tensor + result['keypoints'] = {'x': (x / w).tolist(), 'y': (y / h).tolist(), 'visible': visible.tolist()} + results.append(result) + + # Convert detections to JSON + return json.dumps(results, indent=2) + + +class Boxes(BaseTensor): + """ + A class for storing and manipulating detection boxes. + + Args: + boxes (torch.Tensor | numpy.ndarray): A tensor or numpy array containing the detection boxes, + with shape (num_boxes, 6). The last two columns should contain confidence and class values. + orig_shape (tuple): Original image size, in the format (height, width). + + Attributes: + boxes (torch.Tensor | numpy.ndarray): The detection boxes with shape (num_boxes, 6). + orig_shape (torch.Tensor | numpy.ndarray): Original image size, in the format (height, width). + is_track (bool): True if the boxes also include track IDs, False otherwise. + + Properties: + xyxy (torch.Tensor | numpy.ndarray): The boxes in xyxy format. + conf (torch.Tensor | numpy.ndarray): The confidence values of the boxes. + cls (torch.Tensor | numpy.ndarray): The class values of the boxes. + id (torch.Tensor | numpy.ndarray): The track IDs of the boxes (if available). + xywh (torch.Tensor | numpy.ndarray): The boxes in xywh format. + xyxyn (torch.Tensor | numpy.ndarray): The boxes in xyxy format normalized by original image size. + xywhn (torch.Tensor | numpy.ndarray): The boxes in xywh format normalized by original image size. + data (torch.Tensor): The raw bboxes tensor + + Methods: + cpu(): Move the object to CPU memory. + numpy(): Convert the object to a numpy array. + cuda(): Move the object to CUDA memory. + to(*args, **kwargs): Move the object to the specified device. + pandas(): Convert the object to a pandas DataFrame (not yet implemented). + """ + + def __init__(self, boxes, orig_shape) -> None: + """Initialize the Boxes class.""" + if boxes.ndim == 1: + boxes = boxes[None, :] + n = boxes.shape[-1] + assert n in (6, 7), f'expected `n` in [6, 7], but got {n}' # xyxy, (track_id), conf, cls + super().__init__(boxes, orig_shape) + self.is_track = n == 7 + self.orig_shape = orig_shape + + @property + def xyxy(self): + """Return the boxes in xyxy format.""" + return self.data[:, :4] + + @property + def conf(self): + """Return the confidence values of the boxes.""" + return self.data[:, -2] + + @property + def cls(self): + """Return the class values of the boxes.""" + return self.data[:, -1] + + @property + def id(self): + """Return the track IDs of the boxes (if available).""" + return self.data[:, -3] if self.is_track else None + + @property + @lru_cache(maxsize=2) # maxsize 1 should suffice + def xywh(self): + """Return the boxes in xywh format.""" + return ops.xyxy2xywh(self.xyxy) + + @property + @lru_cache(maxsize=2) + def xyxyn(self): + """Return the boxes in xyxy format normalized by original image size.""" + xyxy = self.xyxy.clone() if isinstance(self.xyxy, torch.Tensor) else np.copy(self.xyxy) + xyxy[..., [0, 2]] /= self.orig_shape[1] + xyxy[..., [1, 3]] /= self.orig_shape[0] + return xyxy + + @property + @lru_cache(maxsize=2) + def xywhn(self): + """Return the boxes in xywh format normalized by original image size.""" + xywh = ops.xyxy2xywh(self.xyxy) + xywh[..., [0, 2]] /= self.orig_shape[1] + xywh[..., [1, 3]] /= self.orig_shape[0] + return xywh + + @property + def boxes(self): + """Return the raw bboxes tensor (deprecated).""" + LOGGER.warning("WARNING ⚠️ 'Boxes.boxes' is deprecated. Use 'Boxes.data' instead.") + return self.data + + +class Masks(BaseTensor): + """ + A class for storing and manipulating detection masks. + + Args: + masks (torch.Tensor | np.ndarray): A tensor containing the detection masks, with shape (num_masks, height, width). + orig_shape (tuple): Original image size, in the format (height, width). + + Attributes: + masks (torch.Tensor | np.ndarray): A tensor containing the detection masks, with shape (num_masks, height, width). + orig_shape (tuple): Original image size, in the format (height, width). + + Properties: + xy (list): A list of segments (pixels) which includes x, y segments of each detection. + xyn (list): A list of segments (normalized) which includes x, y segments of each detection. + + Methods: + cpu(): Returns a copy of the masks tensor on CPU memory. + numpy(): Returns a copy of the masks tensor as a numpy array. + cuda(): Returns a copy of the masks tensor on GPU memory. + to(): Returns a copy of the masks tensor with the specified device and dtype. + """ + + def __init__(self, masks, orig_shape) -> None: + """Initialize the Masks class.""" + if masks.ndim == 2: + masks = masks[None, :] + super().__init__(masks, orig_shape) + + @property + @lru_cache(maxsize=1) + def segments(self): + """Return segments (deprecated; normalized).""" + LOGGER.warning("WARNING ⚠️ 'Masks.segments' is deprecated. Use 'Masks.xyn' for segments (normalized) and " + "'Masks.xy' for segments (pixels) instead.") + return self.xyn + + @property + @lru_cache(maxsize=1) + def xyn(self): + """Return segments (normalized).""" + return [ + ops.scale_coords(self.data.shape[1:], x, self.orig_shape, normalize=True) + for x in ops.masks2segments(self.data)] + + @property + @lru_cache(maxsize=1) + def xy(self): + """Return segments (pixels).""" + return [ + ops.scale_coords(self.data.shape[1:], x, self.orig_shape, normalize=False) + for x in ops.masks2segments(self.data)] + + @property + def masks(self): + """Return the raw masks tensor (deprecated).""" + LOGGER.warning("WARNING ⚠️ 'Masks.masks' is deprecated. Use 'Masks.data' instead.") + return self.data + + def pandas(self): + """Convert the object to a pandas DataFrame (not yet implemented).""" + LOGGER.warning("WARNING ⚠️ 'Masks.pandas' method is not yet implemented.") + + +class Keypoints(BaseTensor): + """ + A class for storing and manipulating detection keypoints. + + Args: + keypoints (torch.Tensor | np.ndarray): A tensor containing the detection keypoints, with shape (num_dets, num_kpts, 2/3). + orig_shape (tuple): Original image size, in the format (height, width). + + Attributes: + keypoints (torch.Tensor | np.ndarray): A tensor containing the detection keypoints, with shape (num_dets, num_kpts, 2/3). + orig_shape (tuple): Original image size, in the format (height, width). + + Properties: + xy (list): A list of keypoints (pixels) which includes x, y keypoints of each detection. + xyn (list): A list of keypoints (normalized) which includes x, y keypoints of each detection. + + Methods: + cpu(): Returns a copy of the keypoints tensor on CPU memory. + numpy(): Returns a copy of the keypoints tensor as a numpy array. + cuda(): Returns a copy of the keypoints tensor on GPU memory. + to(): Returns a copy of the keypoints tensor with the specified device and dtype. + """ + + def __init__(self, keypoints, orig_shape) -> None: + if keypoints.ndim == 2: + keypoints = keypoints[None, :] + super().__init__(keypoints, orig_shape) + self.has_visible = self.data.shape[-1] == 3 + + @property + @lru_cache(maxsize=1) + def xy(self): + return self.data[..., :2] + + @property + @lru_cache(maxsize=1) + def xyn(self): + xy = self.xy.clone() if isinstance(self.xy, torch.Tensor) else np.copy(self.xy) + xy[..., 0] /= self.orig_shape[1] + xy[..., 1] /= self.orig_shape[0] + return xy + + @property + @lru_cache(maxsize=1) + def conf(self): + return self.data[..., 2] if self.has_visible else None + + +class Probs(BaseTensor): + """ + A class for storing and manipulating classify predictions. + + Args: + probs (torch.Tensor | np.ndarray): A tensor containing the detection keypoints, with shape (num_class, ). + + Attributes: + probs (torch.Tensor | np.ndarray): A tensor containing the detection keypoints, with shape (num_class). + + Properties: + top5 (list[int]): Top 1 indice. + top1 (int): Top 5 indices. + + Methods: + cpu(): Returns a copy of the probs tensor on CPU memory. + numpy(): Returns a copy of the probs tensor as a numpy array. + cuda(): Returns a copy of the probs tensor on GPU memory. + to(): Returns a copy of the probs tensor with the specified device and dtype. + """ + + def __init__(self, probs, orig_shape=None) -> None: + super().__init__(probs, orig_shape) + + @property + @lru_cache(maxsize=1) + def top5(self): + """Return the indices of top 5.""" + return (-self.data).argsort(0)[:5].tolist() # this way works with both torch and numpy. + + @property + @lru_cache(maxsize=1) + def top1(self): + """Return the indices of top 1.""" + return int(self.data.argmax()) + + @property + @lru_cache(maxsize=1) + def top5conf(self): + """Return the confidences of top 5.""" + return self.data[self.top5] + + @property + @lru_cache(maxsize=1) + def top1conf(self): + """Return the confidences of top 1.""" + return self.data[self.top1] diff --git a/ultralytics/yolo/engine/trainer.py b/ultralytics/yolo/engine/trainer.py new file mode 100644 index 0000000..144be9c --- /dev/null +++ b/ultralytics/yolo/engine/trainer.py @@ -0,0 +1,664 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Train a model on a dataset + +Usage: + $ yolo mode=train model=yolov8n.pt data=coco128.yaml imgsz=640 epochs=100 batch=16 +""" +import math +import os +import subprocess +import time +from copy import deepcopy +from datetime import datetime, timedelta +from pathlib import Path + +import numpy as np +import torch +from torch import distributed as dist +from torch import nn, optim +from torch.cuda import amp +from torch.nn.parallel import DistributedDataParallel as DDP +from tqdm import tqdm + +from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.data.utils import check_cls_dataset, check_det_dataset +from ultralytics.yolo.utils import (DEFAULT_CFG, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, __version__, callbacks, + clean_url, colorstr, emojis, yaml_save) +from ultralytics.yolo.utils.autobatch import check_train_batch_size +from ultralytics.yolo.utils.checks import check_amp, check_file, check_imgsz, print_args +from ultralytics.yolo.utils.dist import ddp_cleanup, generate_ddp_command +from ultralytics.yolo.utils.files import get_latest_run, increment_path +from ultralytics.yolo.utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, init_seeds, one_cycle, + select_device, strip_optimizer) + + +class BaseTrainer: + """ + BaseTrainer + + A base class for creating trainers. + + Attributes: + args (SimpleNamespace): Configuration for the trainer. + check_resume (method): Method to check if training should be resumed from a saved checkpoint. + validator (BaseValidator): Validator instance. + model (nn.Module): Model instance. + callbacks (defaultdict): Dictionary of callbacks. + save_dir (Path): Directory to save results. + wdir (Path): Directory to save weights. + last (Path): Path to last checkpoint. + best (Path): Path to best checkpoint. + save_period (int): Save checkpoint every x epochs (disabled if < 1). + batch_size (int): Batch size for training. + epochs (int): Number of epochs to train for. + start_epoch (int): Starting epoch for training. + device (torch.device): Device to use for training. + amp (bool): Flag to enable AMP (Automatic Mixed Precision). + scaler (amp.GradScaler): Gradient scaler for AMP. + data (str): Path to data. + trainset (torch.utils.data.Dataset): Training dataset. + testset (torch.utils.data.Dataset): Testing dataset. + ema (nn.Module): EMA (Exponential Moving Average) of the model. + lf (nn.Module): Loss function. + scheduler (torch.optim.lr_scheduler._LRScheduler): Learning rate scheduler. + best_fitness (float): The best fitness value achieved. + fitness (float): Current fitness value. + loss (float): Current loss value. + tloss (float): Total loss value. + loss_names (list): List of loss names. + csv (Path): Path to results CSV file. + """ + + def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + """ + Initializes the BaseTrainer class. + + Args: + cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG. + overrides (dict, optional): Configuration overrides. Defaults to None. + """ + self.args = get_cfg(cfg, overrides) + self.device = select_device(self.args.device, self.args.batch) + self.check_resume() + self.validator = None + self.model = None + self.metrics = None + self.plots = {} + init_seeds(self.args.seed + 1 + RANK, deterministic=self.args.deterministic) + + # Dirs + project = self.args.project or Path(SETTINGS['runs_dir']) / self.args.task + name = self.args.name or f'{self.args.mode}' + if hasattr(self.args, 'save_dir'): + self.save_dir = Path(self.args.save_dir) + else: + self.save_dir = Path( + increment_path(Path(project) / name, exist_ok=self.args.exist_ok if RANK in (-1, 0) else True)) + self.wdir = self.save_dir / 'weights' # weights dir + if RANK in (-1, 0): + self.wdir.mkdir(parents=True, exist_ok=True) # make dir + self.args.save_dir = str(self.save_dir) + yaml_save(self.save_dir / 'args.yaml', vars(self.args)) # save run args + self.last, self.best = self.wdir / 'last.pt', self.wdir / 'best.pt' # checkpoint paths + self.save_period = self.args.save_period + + self.batch_size = self.args.batch + self.epochs = self.args.epochs + self.start_epoch = 0 + if RANK == -1: + print_args(vars(self.args)) + + # Device + if self.device.type == 'cpu': + self.args.workers = 0 # faster CPU training as time dominated by inference, not dataloading + + # Model and Dataset + self.model = self.args.model + try: + if self.args.task == 'classify': + self.data = check_cls_dataset(self.args.data) + elif self.args.data.endswith('.yaml') or self.args.task in ('detect', 'segment'): + self.data = check_det_dataset(self.args.data) + if 'yaml_file' in self.data: + self.args.data = self.data['yaml_file'] # for validating 'yolo train data=url.zip' usage + except Exception as e: + raise RuntimeError(emojis(f"Dataset '{clean_url(self.args.data)}' error ❌ {e}")) from e + + self.trainset, self.testset = self.get_dataset(self.data) + self.ema = None + + # Optimization utils init + self.lf = None + self.scheduler = None + + # Epoch level metrics + self.best_fitness = None + self.fitness = None + self.loss = None + self.tloss = None + self.loss_names = ['Loss'] + self.csv = self.save_dir / 'results.csv' + self.plot_idx = [0, 1, 2] + + # Callbacks + self.callbacks = _callbacks or callbacks.get_default_callbacks() + if RANK in (-1, 0): + callbacks.add_integration_callbacks(self) + + def add_callback(self, event: str, callback): + """ + Appends the given callback. + """ + self.callbacks[event].append(callback) + + def set_callback(self, event: str, callback): + """ + Overrides the existing callbacks with the given callback. + """ + self.callbacks[event] = [callback] + + def run_callbacks(self, event: str): + """Run all existing callbacks associated with a particular event.""" + for callback in self.callbacks.get(event, []): + callback(self) + + def train(self): + """Allow device='', device=None on Multi-GPU systems to default to device=0.""" + if isinstance(self.args.device, int) or self.args.device: # i.e. device=0 or device=[0,1,2,3] + world_size = torch.cuda.device_count() + elif torch.cuda.is_available(): # i.e. device=None or device='' + world_size = 1 # default to device 0 + else: # i.e. device='cpu' or 'mps' + world_size = 0 + + # Run subprocess if DDP training, else train normally + if world_size > 1 and 'LOCAL_RANK' not in os.environ: + # Argument checks + if self.args.rect: + LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with Multi-GPU training, setting rect=False") + self.args.rect = False + # Command + cmd, file = generate_ddp_command(world_size, self) + try: + LOGGER.info(f'DDP command: {cmd}') + subprocess.run(cmd, check=True) + except Exception as e: + raise e + finally: + ddp_cleanup(self, str(file)) + else: + self._do_train(world_size) + + def _setup_ddp(self, world_size): + """Initializes and sets the DistributedDataParallel parameters for training.""" + torch.cuda.set_device(RANK) + self.device = torch.device('cuda', RANK) + LOGGER.info(f'DDP info: RANK {RANK}, WORLD_SIZE {world_size}, DEVICE {self.device}') + os.environ['NCCL_BLOCKING_WAIT'] = '1' # set to enforce timeout + dist.init_process_group( + 'nccl' if dist.is_nccl_available() else 'gloo', + timeout=timedelta(seconds=10800), # 3 hours + rank=RANK, + world_size=world_size) + + def _setup_train(self, world_size): + """ + Builds dataloaders and optimizer on correct rank process. + """ + # Model + self.run_callbacks('on_pretrain_routine_start') + ckpt = self.setup_model() + self.model = self.model.to(self.device) + self.set_model_attributes() + # Check AMP + self.amp = torch.tensor(self.args.amp).to(self.device) # True or False + if self.amp and RANK in (-1, 0): # Single-GPU and DDP + callbacks_backup = callbacks.default_callbacks.copy() # backup callbacks as check_amp() resets them + self.amp = torch.tensor(check_amp(self.model), device=self.device) + callbacks.default_callbacks = callbacks_backup # restore callbacks + if RANK > -1 and world_size > 1: # DDP + dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None) + self.amp = bool(self.amp) # as boolean + self.scaler = amp.GradScaler(enabled=self.amp) + if world_size > 1: + self.model = DDP(self.model, device_ids=[RANK]) + # Check imgsz + gs = max(int(self.model.stride.max() if hasattr(self.model, 'stride') else 32), 32) # grid size (max stride) + self.args.imgsz = check_imgsz(self.args.imgsz, stride=gs, floor=gs, max_dim=1) + # Batch size + if self.batch_size == -1: + if RANK == -1: # single-GPU only, estimate best batch size + self.args.batch = self.batch_size = check_train_batch_size(self.model, self.args.imgsz, self.amp) + else: + SyntaxError('batch=-1 to use AutoBatch is only available in Single-GPU training. ' + 'Please pass a valid batch size value for Multi-GPU DDP training, i.e. batch=16') + + # Dataloaders + batch_size = self.batch_size // max(world_size, 1) + self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=RANK, mode='train') + if RANK in (-1, 0): + self.test_loader = self.get_dataloader(self.testset, batch_size=batch_size * 2, rank=-1, mode='val') + self.validator = self.get_validator() + metric_keys = self.validator.metrics.keys + self.label_loss_items(prefix='val') + self.metrics = dict(zip(metric_keys, [0] * len(metric_keys))) # TODO: init metrics for plot_results()? + self.ema = ModelEMA(self.model) + if self.args.plots and not self.args.v5loader: + self.plot_training_labels() + + # Optimizer + self.accumulate = max(round(self.args.nbs / self.batch_size), 1) # accumulate loss before optimizing + weight_decay = self.args.weight_decay * self.batch_size * self.accumulate / self.args.nbs # scale weight_decay + iterations = math.ceil(len(self.train_loader.dataset) / max(self.batch_size, self.args.nbs)) * self.epochs + self.optimizer = self.build_optimizer(model=self.model, + name=self.args.optimizer, + lr=self.args.lr0, + momentum=self.args.momentum, + decay=weight_decay, + iterations=iterations) + # Scheduler + if self.args.cos_lr: + self.lf = one_cycle(1, self.args.lrf, self.epochs) # cosine 1->hyp['lrf'] + else: + self.lf = lambda x: (1 - x / self.epochs) * (1.0 - self.args.lrf) + self.args.lrf # linear + self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf) + self.stopper, self.stop = EarlyStopping(patience=self.args.patience), False + self.resume_training(ckpt) + self.scheduler.last_epoch = self.start_epoch - 1 # do not move + self.run_callbacks('on_pretrain_routine_end') + + def _do_train(self, world_size=1): + """Train completed, evaluate and plot if specified by arguments.""" + if world_size > 1: + self._setup_ddp(world_size) + + self._setup_train(world_size) + + self.epoch_time = None + self.epoch_time_start = time.time() + self.train_time_start = time.time() + nb = len(self.train_loader) # number of batches + nw = max(round(self.args.warmup_epochs * + nb), 100) if self.args.warmup_epochs > 0 else -1 # number of warmup iterations + last_opt_step = -1 + self.run_callbacks('on_train_start') + LOGGER.info(f'Image sizes {self.args.imgsz} train, {self.args.imgsz} val\n' + f'Using {self.train_loader.num_workers * (world_size or 1)} dataloader workers\n' + f"Logging results to {colorstr('bold', self.save_dir)}\n" + f'Starting training for {self.epochs} epochs...') + if self.args.close_mosaic: + base_idx = (self.epochs - self.args.close_mosaic) * nb + self.plot_idx.extend([base_idx, base_idx + 1, base_idx + 2]) + epoch = self.epochs # predefine for resume fully trained model edge cases + for epoch in range(self.start_epoch, self.epochs): + self.epoch = epoch + self.run_callbacks('on_train_epoch_start') + self.model.train() + if RANK != -1: + self.train_loader.sampler.set_epoch(epoch) + pbar = enumerate(self.train_loader) + # Update dataloader attributes (optional) + if epoch == (self.epochs - self.args.close_mosaic): + LOGGER.info('Closing dataloader mosaic') + if hasattr(self.train_loader.dataset, 'mosaic'): + self.train_loader.dataset.mosaic = False + if hasattr(self.train_loader.dataset, 'close_mosaic'): + self.train_loader.dataset.close_mosaic(hyp=self.args) + self.train_loader.reset() + + if RANK in (-1, 0): + LOGGER.info(self.progress_string()) + pbar = tqdm(enumerate(self.train_loader), total=nb, bar_format=TQDM_BAR_FORMAT) + self.tloss = None + self.optimizer.zero_grad() + for i, batch in pbar: + self.run_callbacks('on_train_batch_start') + # Warmup + ni = i + nb * epoch + if ni <= nw: + xi = [0, nw] # x interp + self.accumulate = max(1, np.interp(ni, xi, [1, self.args.nbs / self.batch_size]).round()) + for j, x in enumerate(self.optimizer.param_groups): + # Bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp( + ni, xi, [self.args.warmup_bias_lr if j == 0 else 0.0, x['initial_lr'] * self.lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [self.args.warmup_momentum, self.args.momentum]) + + # Forward + with torch.cuda.amp.autocast(self.amp): + batch = self.preprocess_batch(batch) + self.loss, self.loss_items = self.model(batch) + if RANK != -1: + self.loss *= world_size + self.tloss = (self.tloss * i + self.loss_items) / (i + 1) if self.tloss is not None \ + else self.loss_items + + # Backward + self.scaler.scale(self.loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= self.accumulate: + self.optimizer_step() + last_opt_step = ni + + # Log + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + loss_len = self.tloss.shape[0] if len(self.tloss.size()) else 1 + losses = self.tloss if loss_len > 1 else torch.unsqueeze(self.tloss, 0) + if RANK in (-1, 0): + pbar.set_description( + ('%11s' * 2 + '%11.4g' * (2 + loss_len)) % + (f'{epoch + 1}/{self.epochs}', mem, *losses, batch['cls'].shape[0], batch['img'].shape[-1])) + self.run_callbacks('on_batch_end') + if self.args.plots and ni in self.plot_idx: + self.plot_training_samples(batch, ni) + + self.run_callbacks('on_train_batch_end') + + self.lr = {f'lr/pg{ir}': x['lr'] for ir, x in enumerate(self.optimizer.param_groups)} # for loggers + + self.scheduler.step() + self.run_callbacks('on_train_epoch_end') + + if RANK in (-1, 0): + + # Validation + self.ema.update_attr(self.model, include=['yaml', 'nc', 'args', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == self.epochs) or self.stopper.possible_stop + + if self.args.val or final_epoch: + self.metrics, self.fitness = self.validate() + self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **self.lr}) + self.stop = self.stopper(epoch + 1, self.fitness) + + # Save model + if self.args.save or (epoch + 1 == self.epochs): + self.save_model() + self.run_callbacks('on_model_save') + + tnow = time.time() + self.epoch_time = tnow - self.epoch_time_start + self.epoch_time_start = tnow + self.run_callbacks('on_fit_epoch_end') + torch.cuda.empty_cache() # clears GPU vRAM at end of epoch, can help with out of memory errors + + # Early Stopping + if RANK != -1: # if DDP training + broadcast_list = [self.stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + self.stop = broadcast_list[0] + if self.stop: + break # must break all DDP ranks + + if RANK in (-1, 0): + # Do final val with best.pt + LOGGER.info(f'\n{epoch - self.start_epoch + 1} epochs completed in ' + f'{(time.time() - self.train_time_start) / 3600:.3f} hours.') + self.final_eval() + if self.args.plots: + self.plot_metrics() + self.run_callbacks('on_train_end') + torch.cuda.empty_cache() + self.run_callbacks('teardown') + + def save_model(self): + """Save model checkpoints based on various conditions.""" + ckpt = { + 'epoch': self.epoch, + 'best_fitness': self.best_fitness, + 'model': deepcopy(de_parallel(self.model)).half(), + 'ema': deepcopy(self.ema.ema).half(), + 'updates': self.ema.updates, + 'optimizer': self.optimizer.state_dict(), + 'train_args': vars(self.args), # save as dict + 'date': datetime.now().isoformat(), + 'version': __version__} + + # Use dill (if exists) to serialize the lambda functions where pickle does not do this + try: + import dill as pickle + except ImportError: + import pickle + + # Save last, best and delete + torch.save(ckpt, self.last, pickle_module=pickle) + if self.best_fitness == self.fitness: + torch.save(ckpt, self.best, pickle_module=pickle) + if (self.epoch > 0) and (self.save_period > 0) and (self.epoch % self.save_period == 0): + torch.save(ckpt, self.wdir / f'epoch{self.epoch}.pt', pickle_module=pickle) + del ckpt + + @staticmethod + def get_dataset(data): + """ + Get train, val path from data dict if it exists. Returns None if data format is not recognized. + """ + return data['train'], data.get('val') or data.get('test') + + def setup_model(self): + """ + load/create/download model for any task. + """ + if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed + return + + model, weights = self.model, None + ckpt = None + if str(model).endswith('.pt'): + weights, ckpt = attempt_load_one_weight(model) + cfg = ckpt['model'].yaml + else: + cfg = model + self.model = self.get_model(cfg=cfg, weights=weights, verbose=RANK == -1) # calls Model(cfg, weights) + return ckpt + + def optimizer_step(self): + """Perform a single step of the training optimizer with gradient clipping and EMA update.""" + self.scaler.unscale_(self.optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0) # clip gradients + self.scaler.step(self.optimizer) + self.scaler.update() + self.optimizer.zero_grad() + if self.ema: + self.ema.update(self.model) + + def preprocess_batch(self, batch): + """ + Allows custom preprocessing model inputs and ground truths depending on task type. + """ + return batch + + def validate(self): + """ + Runs validation on test set using self.validator. The returned dict is expected to contain "fitness" key. + """ + metrics = self.validator(self) + fitness = metrics.pop('fitness', -self.loss.detach().cpu().numpy()) # use loss as fitness measure if not found + if not self.best_fitness or self.best_fitness < fitness: + self.best_fitness = fitness + return metrics, fitness + + def get_model(self, cfg=None, weights=None, verbose=True): + """Get model and raise NotImplementedError for loading cfg files.""" + raise NotImplementedError("This task trainer doesn't support loading cfg files") + + def get_validator(self): + """Returns a NotImplementedError when the get_validator function is called.""" + raise NotImplementedError('get_validator function not implemented in trainer') + + def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'): + """ + Returns dataloader derived from torch.data.Dataloader. + """ + raise NotImplementedError('get_dataloader function not implemented in trainer') + + def build_dataset(self, img_path, mode='train', batch=None): + """Build dataset""" + raise NotImplementedError('build_dataset function not implemented in trainer') + + def label_loss_items(self, loss_items=None, prefix='train'): + """ + Returns a loss dict with labelled training loss items tensor + """ + # Not needed for classification but necessary for segmentation & detection + return {'loss': loss_items} if loss_items is not None else ['loss'] + + def set_model_attributes(self): + """ + To set or update model parameters before training. + """ + self.model.names = self.data['names'] + + def build_targets(self, preds, targets): + """Builds target tensors for training YOLO model.""" + pass + + def progress_string(self): + """Returns a string describing training progress.""" + return '' + + # TODO: may need to put these following functions into callback + def plot_training_samples(self, batch, ni): + """Plots training samples during YOLOv5 training.""" + pass + + def plot_training_labels(self): + """Plots training labels for YOLO model.""" + pass + + def save_metrics(self, metrics): + """Saves training metrics to a CSV file.""" + keys, vals = list(metrics.keys()), list(metrics.values()) + n = len(metrics) + 1 # number of cols + s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header + with open(self.csv, 'a') as f: + f.write(s + ('%23.5g,' * n % tuple([self.epoch] + vals)).rstrip(',') + '\n') + + def plot_metrics(self): + """Plot and display metrics visually.""" + pass + + def on_plot(self, name, data=None): + """Registers plots (e.g. to be consumed in callbacks)""" + self.plots[name] = {'data': data, 'timestamp': time.time()} + + def final_eval(self): + """Performs final evaluation and validation for object detection YOLO model.""" + for f in self.last, self.best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is self.best: + LOGGER.info(f'\nValidating {f}...') + self.metrics = self.validator(model=f) + self.metrics.pop('fitness', None) + self.run_callbacks('on_fit_epoch_end') + + def check_resume(self): + """Check if resume checkpoint exists and update arguments accordingly.""" + resume = self.args.resume + if resume: + try: + exists = isinstance(resume, (str, Path)) and Path(resume).exists() + last = Path(check_file(resume) if exists else get_latest_run()) + + # Check that resume data YAML exists, otherwise strip to force re-download of dataset + ckpt_args = attempt_load_weights(last).args + if not Path(ckpt_args['data']).exists(): + ckpt_args['data'] = self.args.data + + self.args = get_cfg(ckpt_args) + self.args.model, resume = str(last), True # reinstate + except Exception as e: + raise FileNotFoundError('Resume checkpoint not found. Please pass a valid checkpoint to resume from, ' + "i.e. 'yolo train resume model=path/to/last.pt'") from e + self.resume = resume + + def resume_training(self, ckpt): + """Resume YOLO training from given epoch and best fitness.""" + if ckpt is None: + return + best_fitness = 0.0 + start_epoch = ckpt['epoch'] + 1 + if ckpt['optimizer'] is not None: + self.optimizer.load_state_dict(ckpt['optimizer']) # optimizer + best_fitness = ckpt['best_fitness'] + if self.ema and ckpt.get('ema'): + self.ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) # EMA + self.ema.updates = ckpt['updates'] + if self.resume: + assert start_epoch > 0, \ + f'{self.args.model} training to {self.epochs} epochs is finished, nothing to resume.\n' \ + f"Start a new training without resuming, i.e. 'yolo train model={self.args.model}'" + LOGGER.info( + f'Resuming training from {self.args.model} from epoch {start_epoch + 1} to {self.epochs} total epochs') + if self.epochs < start_epoch: + LOGGER.info( + f"{self.model} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {self.epochs} more epochs.") + self.epochs += ckpt['epoch'] # finetune additional epochs + self.best_fitness = best_fitness + self.start_epoch = start_epoch + if start_epoch > (self.epochs - self.args.close_mosaic): + LOGGER.info('Closing dataloader mosaic') + if hasattr(self.train_loader.dataset, 'mosaic'): + self.train_loader.dataset.mosaic = False + if hasattr(self.train_loader.dataset, 'close_mosaic'): + self.train_loader.dataset.close_mosaic(hyp=self.args) + + def build_optimizer(self, model, name='auto', lr=0.001, momentum=0.9, decay=1e-5, iterations=1e5): + """ + Constructs an optimizer for the given model, based on the specified optimizer name, learning rate, + momentum, weight decay, and number of iterations. + + Args: + model (torch.nn.Module): The model for which to build an optimizer. + name (str, optional): The name of the optimizer to use. If 'auto', the optimizer is selected + based on the number of iterations. Default: 'auto'. + lr (float, optional): The learning rate for the optimizer. Default: 0.001. + momentum (float, optional): The momentum factor for the optimizer. Default: 0.9. + decay (float, optional): The weight decay for the optimizer. Default: 1e-5. + iterations (float, optional): The number of iterations, which determines the optimizer if + name is 'auto'. Default: 1e5. + + Returns: + (torch.optim.Optimizer): The constructed optimizer. + """ + + g = [], [], [] # optimizer parameter groups + bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() + if name == 'auto': + nc = getattr(model, 'nc', 10) # number of classes + lr_fit = round(0.002 * 5 / (4 + nc), 6) # lr0 fit equation to 6 decimal places + name, lr, momentum = ('SGD', 0.01, 0.9) if iterations > 10000 else ('AdamW', lr_fit, 0.9) + self.args.warmup_bias_lr = 0.0 # no higher than 0.01 for Adam + + for module_name, module in model.named_modules(): + for param_name, param in module.named_parameters(recurse=False): + fullname = f'{module_name}.{param_name}' if module_name else param_name + if 'bias' in fullname: # bias (no decay) + g[2].append(param) + elif isinstance(module, bn): # weight (no decay) + g[1].append(param) + else: # weight (with decay) + g[0].append(param) + + if name in ('Adam', 'Adamax', 'AdamW', 'NAdam', 'RAdam'): + optimizer = getattr(optim, name, optim.Adam)(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0) + elif name == 'RMSProp': + optimizer = optim.RMSprop(g[2], lr=lr, momentum=momentum) + elif name == 'SGD': + optimizer = optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True) + else: + raise NotImplementedError( + f"Optimizer '{name}' not found in list of available optimizers " + f'[Adam, AdamW, NAdam, RAdam, RMSProp, SGD, auto].' + 'To request support for addition optimizers please visit https://github.com/ultralytics/ultralytics.') + + optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay + optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) + LOGGER.info( + f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}, momentum={momentum}) with parameter groups " + f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias(decay=0.0)') + return optimizer diff --git a/ultralytics/yolo/engine/validator.py b/ultralytics/yolo/engine/validator.py new file mode 100644 index 0000000..f84c8d0 --- /dev/null +++ b/ultralytics/yolo/engine/validator.py @@ -0,0 +1,276 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Check a model's accuracy on a test or val split of a dataset + +Usage: + $ yolo mode=val model=yolov8n.pt data=coco128.yaml imgsz=640 + +Usage - formats: + $ yolo mode=val model=yolov8n.pt # PyTorch + yolov8n.torchscript # TorchScript + yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True + yolov8n_openvino_model # OpenVINO + yolov8n.engine # TensorRT + yolov8n.mlmodel # CoreML (macOS-only) + yolov8n_saved_model # TensorFlow SavedModel + yolov8n.pb # TensorFlow GraphDef + yolov8n.tflite # TensorFlow Lite + yolov8n_edgetpu.tflite # TensorFlow Edge TPU + yolov8n_paddle_model # PaddlePaddle +""" +import json +import time +from pathlib import Path + +import torch +from tqdm import tqdm + +from ultralytics.nn.autobackend import AutoBackend +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.data.utils import check_cls_dataset, check_det_dataset +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, callbacks, colorstr, emojis +from ultralytics.yolo.utils.checks import check_imgsz +from ultralytics.yolo.utils.files import increment_path +from ultralytics.yolo.utils.ops import Profile +from ultralytics.yolo.utils.torch_utils import de_parallel, select_device, smart_inference_mode + + +class BaseValidator: + """ + BaseValidator + + A base class for creating validators. + + Attributes: + dataloader (DataLoader): Dataloader to use for validation. + pbar (tqdm): Progress bar to update during validation. + args (SimpleNamespace): Configuration for the validator. + model (nn.Module): Model to validate. + data (dict): Data dictionary. + device (torch.device): Device to use for validation. + batch_i (int): Current batch index. + training (bool): Whether the model is in training mode. + speed (float): Batch processing speed in seconds. + jdict (dict): Dictionary to store validation results. + save_dir (Path): Directory to save results. + """ + + def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None): + """ + Initializes a BaseValidator instance. + + Args: + dataloader (torch.utils.data.DataLoader): Dataloader to be used for validation. + save_dir (Path): Directory to save results. + pbar (tqdm.tqdm): Progress bar for displaying progress. + args (SimpleNamespace): Configuration for the validator. + """ + self.dataloader = dataloader + self.pbar = pbar + self.args = args or get_cfg(DEFAULT_CFG) + self.model = None + self.data = None + self.device = None + self.batch_i = None + self.training = True + self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + self.jdict = None + + project = self.args.project or Path(SETTINGS['runs_dir']) / self.args.task + name = self.args.name or f'{self.args.mode}' + self.save_dir = save_dir or increment_path(Path(project) / name, + exist_ok=self.args.exist_ok if RANK in (-1, 0) else True) + (self.save_dir / 'labels' if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) + + if self.args.conf is None: + self.args.conf = 0.001 # default conf=0.001 + + self.plots = {} + self.callbacks = _callbacks or callbacks.get_default_callbacks() + + @smart_inference_mode() + def __call__(self, trainer=None, model=None): + """ + Supports validation of a pre-trained model if passed or a model being trained + if trainer is passed (trainer gets priority). + """ + self.training = trainer is not None + if self.training: + self.device = trainer.device + self.data = trainer.data + model = trainer.ema.ema or trainer.model + self.args.half = self.device.type != 'cpu' # force FP16 val during training + model = model.half() if self.args.half else model.float() + self.model = model + self.loss = torch.zeros_like(trainer.loss_items, device=trainer.device) + self.args.plots = trainer.stopper.possible_stop or (trainer.epoch == trainer.epochs - 1) + model.eval() + else: + callbacks.add_integration_callbacks(self) + self.run_callbacks('on_val_start') + assert model is not None, 'Either trainer or model is needed for validation' + self.device = select_device(self.args.device, self.args.batch) + self.args.half &= self.device.type != 'cpu' + model = AutoBackend(model, device=self.device, dnn=self.args.dnn, data=self.args.data, fp16=self.args.half) + self.model = model + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_imgsz(self.args.imgsz, stride=stride) + if engine: + self.args.batch = model.batch_size + else: + self.device = model.device + if not pt and not jit: + self.args.batch = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing batch=1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + if isinstance(self.args.data, str) and self.args.data.endswith('.yaml'): + self.data = check_det_dataset(self.args.data) + elif self.args.task == 'classify': + self.data = check_cls_dataset(self.args.data, split=self.args.split) + else: + raise FileNotFoundError(emojis(f"Dataset '{self.args.data}' for task={self.args.task} not found ❌")) + + if self.device.type == 'cpu': + self.args.workers = 0 # faster CPU val as time dominated by inference, not dataloading + if not pt: + self.args.rect = False + self.dataloader = self.dataloader or self.get_dataloader(self.data.get(self.args.split), self.args.batch) + + model.eval() + model.warmup(imgsz=(1 if pt else self.args.batch, 3, imgsz, imgsz)) # warmup + + dt = Profile(), Profile(), Profile(), Profile() + n_batches = len(self.dataloader) + desc = self.get_desc() + # NOTE: keeping `not self.training` in tqdm will eliminate pbar after segmentation evaluation during training, + # which may affect classification task since this arg is in yolov5/classify/val.py. + # bar = tqdm(self.dataloader, desc, n_batches, not self.training, bar_format=TQDM_BAR_FORMAT) + bar = tqdm(self.dataloader, desc, n_batches, bar_format=TQDM_BAR_FORMAT) + self.init_metrics(de_parallel(model)) + self.jdict = [] # empty before each val + for batch_i, batch in enumerate(bar): + self.run_callbacks('on_val_batch_start') + self.batch_i = batch_i + # Preprocess + with dt[0]: + batch = self.preprocess(batch) + + # Inference + with dt[1]: + preds = model(batch['img'], augment=self.args.augment) + + # Loss + with dt[2]: + if self.training: + self.loss += model.loss(batch, preds)[1] + + # Postprocess + with dt[3]: + preds = self.postprocess(preds) + + self.update_metrics(preds, batch) + if self.args.plots and batch_i < 3: + self.plot_val_samples(batch, batch_i) + self.plot_predictions(batch, preds, batch_i) + + self.run_callbacks('on_val_batch_end') + stats = self.get_stats() + self.check_stats(stats) + self.speed = dict(zip(self.speed.keys(), (x.t / len(self.dataloader.dataset) * 1E3 for x in dt))) + self.finalize_metrics() + self.print_results() + self.run_callbacks('on_val_end') + if self.training: + model.float() + results = {**stats, **trainer.label_loss_items(self.loss.cpu() / len(self.dataloader), prefix='val')} + return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats + else: + LOGGER.info('Speed: %.1fms preprocess, %.1fms inference, %.1fms loss, %.1fms postprocess per image' % + tuple(self.speed.values())) + if self.args.save_json and self.jdict: + with open(str(self.save_dir / 'predictions.json'), 'w') as f: + LOGGER.info(f'Saving {f.name}...') + json.dump(self.jdict, f) # flatten and save + stats = self.eval_json(stats) # update stats + if self.args.plots or self.args.save_json: + LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}") + return stats + + def add_callback(self, event: str, callback): + """Appends the given callback.""" + self.callbacks[event].append(callback) + + def run_callbacks(self, event: str): + """Runs all callbacks associated with a specified event.""" + for callback in self.callbacks.get(event, []): + callback(self) + + def get_dataloader(self, dataset_path, batch_size): + """Get data loader from dataset path and batch size.""" + raise NotImplementedError('get_dataloader function not implemented for this validator') + + def build_dataset(self, img_path): + """Build dataset""" + raise NotImplementedError('build_dataset function not implemented in validator') + + def preprocess(self, batch): + """Preprocesses an input batch.""" + return batch + + def postprocess(self, preds): + """Describes and summarizes the purpose of 'postprocess()' but no details mentioned.""" + return preds + + def init_metrics(self, model): + """Initialize performance metrics for the YOLO model.""" + pass + + def update_metrics(self, preds, batch): + """Updates metrics based on predictions and batch.""" + pass + + def finalize_metrics(self, *args, **kwargs): + """Finalizes and returns all metrics.""" + pass + + def get_stats(self): + """Returns statistics about the model's performance.""" + return {} + + def check_stats(self, stats): + """Checks statistics.""" + pass + + def print_results(self): + """Prints the results of the model's predictions.""" + pass + + def get_desc(self): + """Get description of the YOLO model.""" + pass + + @property + def metric_keys(self): + """Returns the metric keys used in YOLO training/validation.""" + return [] + + def on_plot(self, name, data=None): + """Registers plots (e.g. to be consumed in callbacks)""" + self.plots[name] = {'data': data, 'timestamp': time.time()} + + # TODO: may need to put these following functions into callback + def plot_val_samples(self, batch, ni): + """Plots validation samples during training.""" + pass + + def plot_predictions(self, batch, preds, ni): + """Plots YOLO model predictions on batch images.""" + pass + + def pred_to_json(self, preds, batch): + """Convert predictions to JSON format.""" + pass + + def eval_json(self, stats): + """Evaluate and return JSON format of prediction statistics.""" + pass diff --git a/ultralytics/yolo/fastsam/__init__.py b/ultralytics/yolo/fastsam/__init__.py new file mode 100644 index 0000000..8f47772 --- /dev/null +++ b/ultralytics/yolo/fastsam/__init__.py @@ -0,0 +1,8 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .model import FastSAM +from .predict import FastSAMPredictor +from .prompt import FastSAMPrompt +from .val import FastSAMValidator + +__all__ = 'FastSAMPredictor', 'FastSAM', 'FastSAMPrompt', 'FastSAMValidator' diff --git a/ultralytics/yolo/fastsam/model.py b/ultralytics/yolo/fastsam/model.py new file mode 100644 index 0000000..36c7d42 --- /dev/null +++ b/ultralytics/yolo/fastsam/model.py @@ -0,0 +1,111 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +FastSAM model interface. + +Usage - Predict: + from ultralytics import FastSAM + + model = FastSAM('last.pt') + results = model.predict('ultralytics/assets/bus.jpg') +""" + +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.engine.exporter import Exporter +from ultralytics.yolo.engine.model import YOLO +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, ROOT, is_git_dir +from ultralytics.yolo.utils.checks import check_imgsz + +from ...yolo.utils.torch_utils import model_info, smart_inference_mode +from .predict import FastSAMPredictor + + +class FastSAM(YOLO): + + def __init__(self, model='FastSAM-x.pt'): + """Call the __init__ method of the parent class (YOLO) with the updated default model""" + if model == 'FastSAM.pt': + model = 'FastSAM-x.pt' + super().__init__(model=model) + # any additional initialization code for FastSAM + + @smart_inference_mode() + def predict(self, source=None, stream=False, **kwargs): + """ + Perform prediction using the YOLO model. + + Args: + source (str | int | PIL | np.ndarray): The source of the image to make predictions on. + Accepts all source types accepted by the YOLO model. + stream (bool): Whether to stream the predictions or not. Defaults to False. + **kwargs : Additional keyword arguments passed to the predictor. + Check the 'configuration' section in the documentation for all available options. + + Returns: + (List[ultralytics.yolo.engine.results.Results]): The prediction results. + """ + if source is None: + source = ROOT / 'assets' if is_git_dir() else 'https://ultralytics.com/images/bus.jpg' + LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using 'source={source}'.") + overrides = self.overrides.copy() + overrides['conf'] = 0.25 + overrides.update(kwargs) # prefer kwargs + overrides['mode'] = kwargs.get('mode', 'predict') + assert overrides['mode'] in ['track', 'predict'] + overrides['save'] = kwargs.get('save', False) # do not save by default if called in Python + self.predictor = FastSAMPredictor(overrides=overrides) + self.predictor.setup_model(model=self.model, verbose=False) + + return self.predictor(source, stream=stream) + + def train(self, **kwargs): + """Function trains models but raises an error as FastSAM models do not support training.""" + raise NotImplementedError("FastSAM models don't support training") + + def val(self, **kwargs): + """Run validation given dataset.""" + overrides = dict(task='segment', mode='val') + overrides.update(kwargs) # prefer kwargs + args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides) + args.imgsz = check_imgsz(args.imgsz, max_dim=1) + validator = FastSAM(args=args) + validator(model=self.model) + self.metrics = validator.metrics + return validator.metrics + + @smart_inference_mode() + def export(self, **kwargs): + """ + Export model. + + Args: + **kwargs : Any other args accepted by the predictors. To see all args check 'configuration' section in docs + """ + overrides = dict(task='detect') + overrides.update(kwargs) + overrides['mode'] = 'export' + args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides) + args.task = self.task + if args.imgsz == DEFAULT_CFG.imgsz: + args.imgsz = self.model.args['imgsz'] # use trained imgsz unless custom value is passed + if args.batch == DEFAULT_CFG.batch: + args.batch = 1 # default to 1 if not modified + return Exporter(overrides=args)(model=self.model) + + def info(self, detailed=False, verbose=True): + """ + Logs model info. + + Args: + detailed (bool): Show detailed information about model. + verbose (bool): Controls verbosity. + """ + return model_info(self.model, detailed=detailed, verbose=verbose, imgsz=640) + + def __call__(self, source=None, stream=False, **kwargs): + """Calls the 'predict' function with given arguments to perform object detection.""" + return self.predict(source, stream, **kwargs) + + def __getattr__(self, attr): + """Raises error if object has no requested attribute.""" + name = self.__class__.__name__ + raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") diff --git a/ultralytics/yolo/fastsam/predict.py b/ultralytics/yolo/fastsam/predict.py new file mode 100644 index 0000000..0a6ac27 --- /dev/null +++ b/ultralytics/yolo/fastsam/predict.py @@ -0,0 +1,53 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch + +from ultralytics.yolo.engine.results import Results +from ultralytics.yolo.fastsam.utils import bbox_iou +from ultralytics.yolo.utils import DEFAULT_CFG, ops +from ultralytics.yolo.v8.detect.predict import DetectionPredictor + + +class FastSAMPredictor(DetectionPredictor): + + def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + super().__init__(cfg, overrides, _callbacks) + self.args.task = 'segment' + + def postprocess(self, preds, img, orig_imgs): + """TODO: filter by classes.""" + p = ops.non_max_suppression(preds[0], + self.args.conf, + self.args.iou, + agnostic=self.args.agnostic_nms, + max_det=self.args.max_det, + nc=len(self.model.names), + classes=self.args.classes) + full_box = torch.zeros_like(p[0][0]) + full_box[2], full_box[3], full_box[4], full_box[6:] = img.shape[3], img.shape[2], 1.0, 1.0 + full_box = full_box.view(1, -1) + critical_iou_index = bbox_iou(full_box[0][:4], p[0][:, :4], iou_thres=0.9, image_shape=img.shape[2:]) + if critical_iou_index.numel() != 0: + full_box[0][4] = p[0][critical_iou_index][:, 4] + full_box[0][6:] = p[0][critical_iou_index][:, 6:] + p[0][critical_iou_index] = full_box + results = [] + proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported + for i, pred in enumerate(p): + orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs + path = self.batch[0] + img_path = path[i] if isinstance(path, list) else path + if not len(pred): # save empty boxes + results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6])) + continue + if self.args.retina_masks: + if not isinstance(orig_imgs, torch.Tensor): + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) + masks = ops.process_mask_native(proto[i], pred[:, 6:], pred[:, :4], orig_img.shape[:2]) # HWC + else: + masks = ops.process_mask(proto[i], pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True) # HWC + if not isinstance(orig_imgs, torch.Tensor): + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) + results.append( + Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], masks=masks)) + return results diff --git a/ultralytics/yolo/fastsam/prompt.py b/ultralytics/yolo/fastsam/prompt.py new file mode 100644 index 0000000..d34968d --- /dev/null +++ b/ultralytics/yolo/fastsam/prompt.py @@ -0,0 +1,406 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import os + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import torch +from PIL import Image + + +class FastSAMPrompt: + + def __init__(self, img_path, results, device='cuda') -> None: + # self.img_path = img_path + self.device = device + self.results = results + self.img_path = img_path + self.ori_img = cv2.imread(img_path) + + # Import and assign clip + try: + import clip # for linear_assignment + except ImportError: + from ultralytics.yolo.utils.checks import check_requirements + check_requirements('git+https://github.com/openai/CLIP.git') # required before installing lap from source + import clip + self.clip = clip + + @staticmethod + def _segment_image(image, bbox): + image_array = np.array(image) + segmented_image_array = np.zeros_like(image_array) + x1, y1, x2, y2 = bbox + segmented_image_array[y1:y2, x1:x2] = image_array[y1:y2, x1:x2] + segmented_image = Image.fromarray(segmented_image_array) + black_image = Image.new('RGB', image.size, (255, 255, 255)) + # transparency_mask = np.zeros_like((), dtype=np.uint8) + transparency_mask = np.zeros((image_array.shape[0], image_array.shape[1]), dtype=np.uint8) + transparency_mask[y1:y2, x1:x2] = 255 + transparency_mask_image = Image.fromarray(transparency_mask, mode='L') + black_image.paste(segmented_image, mask=transparency_mask_image) + return black_image + + @staticmethod + def _format_results(result, filter=0): + annotations = [] + n = len(result.masks.data) + for i in range(n): + mask = result.masks.data[i] == 1.0 + + if torch.sum(mask) < filter: + continue + annotation = { + 'id': i, + 'segmentation': mask.cpu().numpy(), + 'bbox': result.boxes.data[i], + 'score': result.boxes.conf[i]} + annotation['area'] = annotation['segmentation'].sum() + annotations.append(annotation) + return annotations + + @staticmethod + def filter_masks(annotations): # filter the overlap mask + annotations.sort(key=lambda x: x['area'], reverse=True) + to_remove = set() + for i in range(len(annotations)): + a = annotations[i] + for j in range(i + 1, len(annotations)): + b = annotations[j] + if i != j and j not in to_remove and b['area'] < a['area'] and \ + (a['segmentation'] & b['segmentation']).sum() / b['segmentation'].sum() > 0.8: + to_remove.add(j) + + return [a for i, a in enumerate(annotations) if i not in to_remove], to_remove + + @staticmethod + def _get_bbox_from_mask(mask): + mask = mask.astype(np.uint8) + contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + x1, y1, w, h = cv2.boundingRect(contours[0]) + x2, y2 = x1 + w, y1 + h + if len(contours) > 1: + for b in contours: + x_t, y_t, w_t, h_t = cv2.boundingRect(b) + # 将多个bbox合并成一个 + x1 = min(x1, x_t) + y1 = min(y1, y_t) + x2 = max(x2, x_t + w_t) + y2 = max(y2, y_t + h_t) + h = y2 - y1 + w = x2 - x1 + return [x1, y1, x2, y2] + + def plot(self, + annotations, + output, + bbox=None, + points=None, + point_label=None, + mask_random_color=True, + better_quality=True, + retina=False, + withContours=True): + if isinstance(annotations[0], dict): + annotations = [annotation['segmentation'] for annotation in annotations] + result_name = os.path.basename(self.img_path) + image = self.ori_img + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + original_h = image.shape[0] + original_w = image.shape[1] + # for macOS only + # plt.switch_backend('TkAgg') + plt.figure(figsize=(original_w / 100, original_h / 100)) + # Add subplot with no margin. + plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) + plt.margins(0, 0) + plt.gca().xaxis.set_major_locator(plt.NullLocator()) + plt.gca().yaxis.set_major_locator(plt.NullLocator()) + + plt.imshow(image) + if better_quality: + if isinstance(annotations[0], torch.Tensor): + annotations = np.array(annotations.cpu()) + for i, mask in enumerate(annotations): + mask = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)) + annotations[i] = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8)) + if self.device == 'cpu': + annotations = np.array(annotations) + self.fast_show_mask( + annotations, + plt.gca(), + random_color=mask_random_color, + bbox=bbox, + points=points, + pointlabel=point_label, + retinamask=retina, + target_height=original_h, + target_width=original_w, + ) + else: + if isinstance(annotations[0], np.ndarray): + annotations = torch.from_numpy(annotations) + self.fast_show_mask_gpu( + annotations, + plt.gca(), + random_color=mask_random_color, + bbox=bbox, + points=points, + pointlabel=point_label, + retinamask=retina, + target_height=original_h, + target_width=original_w, + ) + if isinstance(annotations, torch.Tensor): + annotations = annotations.cpu().numpy() + if withContours: + contour_all = [] + temp = np.zeros((original_h, original_w, 1)) + for i, mask in enumerate(annotations): + if type(mask) == dict: + mask = mask['segmentation'] + annotation = mask.astype(np.uint8) + if not retina: + annotation = cv2.resize( + annotation, + (original_w, original_h), + interpolation=cv2.INTER_NEAREST, + ) + contours, hierarchy = cv2.findContours(annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contour_all.extend(iter(contours)) + cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2) + color = np.array([0 / 255, 0 / 255, 1.0, 0.8]) + contour_mask = temp / 255 * color.reshape(1, 1, -1) + plt.imshow(contour_mask) + + save_path = output + if not os.path.exists(save_path): + os.makedirs(save_path) + plt.axis('off') + fig = plt.gcf() + plt.draw() + + try: + buf = fig.canvas.tostring_rgb() + except AttributeError: + fig.canvas.draw() + buf = fig.canvas.tostring_rgb() + cols, rows = fig.canvas.get_width_height() + img_array = np.frombuffer(buf, dtype=np.uint8).reshape(rows, cols, 3) + cv2.imwrite(os.path.join(save_path, result_name), cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)) + + # CPU post process + def fast_show_mask( + self, + annotation, + ax, + random_color=False, + bbox=None, + points=None, + pointlabel=None, + retinamask=True, + target_height=960, + target_width=960, + ): + msak_sum = annotation.shape[0] + height = annotation.shape[1] + weight = annotation.shape[2] + # 将annotation 按照面积 排序 + areas = np.sum(annotation, axis=(1, 2)) + sorted_indices = np.argsort(areas) + annotation = annotation[sorted_indices] + + index = (annotation != 0).argmax(axis=0) + if random_color: + color = np.random.random((msak_sum, 1, 1, 3)) + else: + color = np.ones((msak_sum, 1, 1, 3)) * np.array([30 / 255, 144 / 255, 1.0]) + transparency = np.ones((msak_sum, 1, 1, 1)) * 0.6 + visual = np.concatenate([color, transparency], axis=-1) + mask_image = np.expand_dims(annotation, -1) * visual + + show = np.zeros((height, weight, 4)) + h_indices, w_indices = np.meshgrid(np.arange(height), np.arange(weight), indexing='ij') + indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None)) + # 使用向量化索引更新show的值 + show[h_indices, w_indices, :] = mask_image[indices] + if bbox is not None: + x1, y1, x2, y2 = bbox + ax.add_patch(plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor='b', linewidth=1)) + # draw point + if points is not None: + plt.scatter( + [point[0] for i, point in enumerate(points) if pointlabel[i] == 1], + [point[1] for i, point in enumerate(points) if pointlabel[i] == 1], + s=20, + c='y', + ) + plt.scatter( + [point[0] for i, point in enumerate(points) if pointlabel[i] == 0], + [point[1] for i, point in enumerate(points) if pointlabel[i] == 0], + s=20, + c='m', + ) + + if not retinamask: + show = cv2.resize(show, (target_width, target_height), interpolation=cv2.INTER_NEAREST) + ax.imshow(show) + + def fast_show_mask_gpu( + self, + annotation, + ax, + random_color=False, + bbox=None, + points=None, + pointlabel=None, + retinamask=True, + target_height=960, + target_width=960, + ): + msak_sum = annotation.shape[0] + height = annotation.shape[1] + weight = annotation.shape[2] + areas = torch.sum(annotation, dim=(1, 2)) + sorted_indices = torch.argsort(areas, descending=False) + annotation = annotation[sorted_indices] + # 找每个位置第一个非零值下标 + index = (annotation != 0).to(torch.long).argmax(dim=0) + if random_color: + color = torch.rand((msak_sum, 1, 1, 3)).to(annotation.device) + else: + color = torch.ones((msak_sum, 1, 1, 3)).to(annotation.device) * torch.tensor([30 / 255, 144 / 255, 1.0]).to( + annotation.device) + transparency = torch.ones((msak_sum, 1, 1, 1)).to(annotation.device) * 0.6 + visual = torch.cat([color, transparency], dim=-1) + mask_image = torch.unsqueeze(annotation, -1) * visual + # 按index取数,index指每个位置选哪个batch的数,把mask_image转成一个batch的形式 + show = torch.zeros((height, weight, 4)).to(annotation.device) + h_indices, w_indices = torch.meshgrid(torch.arange(height), torch.arange(weight), indexing='ij') + indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None)) + # 使用向量化索引更新show的值 + show[h_indices, w_indices, :] = mask_image[indices] + show_cpu = show.cpu().numpy() + if bbox is not None: + x1, y1, x2, y2 = bbox + ax.add_patch(plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor='b', linewidth=1)) + # draw point + if points is not None: + plt.scatter( + [point[0] for i, point in enumerate(points) if pointlabel[i] == 1], + [point[1] for i, point in enumerate(points) if pointlabel[i] == 1], + s=20, + c='y', + ) + plt.scatter( + [point[0] for i, point in enumerate(points) if pointlabel[i] == 0], + [point[1] for i, point in enumerate(points) if pointlabel[i] == 0], + s=20, + c='m', + ) + if not retinamask: + show_cpu = cv2.resize(show_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST) + ax.imshow(show_cpu) + + # clip + @torch.no_grad() + def retrieve(self, model, preprocess, elements, search_text: str, device) -> int: + preprocessed_images = [preprocess(image).to(device) for image in elements] + tokenized_text = self.clip.tokenize([search_text]).to(device) + stacked_images = torch.stack(preprocessed_images) + image_features = model.encode_image(stacked_images) + text_features = model.encode_text(tokenized_text) + image_features /= image_features.norm(dim=-1, keepdim=True) + text_features /= text_features.norm(dim=-1, keepdim=True) + probs = 100.0 * image_features @ text_features.T + return probs[:, 0].softmax(dim=0) + + def _crop_image(self, format_results): + + image = Image.fromarray(cv2.cvtColor(self.ori_img, cv2.COLOR_BGR2RGB)) + ori_w, ori_h = image.size + annotations = format_results + mask_h, mask_w = annotations[0]['segmentation'].shape + if ori_w != mask_w or ori_h != mask_h: + image = image.resize((mask_w, mask_h)) + cropped_boxes = [] + cropped_images = [] + not_crop = [] + filter_id = [] + # annotations, _ = filter_masks(annotations) + # filter_id = list(_) + for _, mask in enumerate(annotations): + if np.sum(mask['segmentation']) <= 100: + filter_id.append(_) + continue + bbox = self._get_bbox_from_mask(mask['segmentation']) # mask 的 bbox + cropped_boxes.append(self._segment_image(image, bbox)) # 保存裁剪的图片 + # cropped_boxes.append(segment_image(image,mask["segmentation"])) + cropped_images.append(bbox) # 保存裁剪的图片的bbox + + return cropped_boxes, cropped_images, not_crop, filter_id, annotations + + def box_prompt(self, bbox): + + assert (bbox[2] != 0 and bbox[3] != 0) + masks = self.results[0].masks.data + target_height = self.ori_img.shape[0] + target_width = self.ori_img.shape[1] + h = masks.shape[1] + w = masks.shape[2] + if h != target_height or w != target_width: + bbox = [ + int(bbox[0] * w / target_width), + int(bbox[1] * h / target_height), + int(bbox[2] * w / target_width), + int(bbox[3] * h / target_height), ] + bbox[0] = max(round(bbox[0]), 0) + bbox[1] = max(round(bbox[1]), 0) + bbox[2] = min(round(bbox[2]), w) + bbox[3] = min(round(bbox[3]), h) + + # IoUs = torch.zeros(len(masks), dtype=torch.float32) + bbox_area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0]) + + masks_area = torch.sum(masks[:, bbox[1]:bbox[3], bbox[0]:bbox[2]], dim=(1, 2)) + orig_masks_area = torch.sum(masks, dim=(1, 2)) + + union = bbox_area + orig_masks_area - masks_area + IoUs = masks_area / union + max_iou_index = torch.argmax(IoUs) + + return np.array([masks[max_iou_index].cpu().numpy()]) + + def point_prompt(self, points, pointlabel): # numpy 处理 + + masks = self._format_results(self.results[0], 0) + target_height = self.ori_img.shape[0] + target_width = self.ori_img.shape[1] + h = masks[0]['segmentation'].shape[0] + w = masks[0]['segmentation'].shape[1] + if h != target_height or w != target_width: + points = [[int(point[0] * w / target_width), int(point[1] * h / target_height)] for point in points] + onemask = np.zeros((h, w)) + for i, annotation in enumerate(masks): + mask = annotation['segmentation'] if type(annotation) == dict else annotation + for i, point in enumerate(points): + if mask[point[1], point[0]] == 1 and pointlabel[i] == 1: + onemask += mask + if mask[point[1], point[0]] == 1 and pointlabel[i] == 0: + onemask -= mask + onemask = onemask >= 1 + return np.array([onemask]) + + def text_prompt(self, text): + format_results = self._format_results(self.results[0], 0) + cropped_boxes, cropped_images, not_crop, filter_id, annotations = self._crop_image(format_results) + clip_model, preprocess = self.clip.load('ViT-B/32', device=self.device) + scores = self.retrieve(clip_model, preprocess, cropped_boxes, text, device=self.device) + max_idx = scores.argsort() + max_idx = max_idx[-1] + max_idx += sum(np.array(filter_id) <= int(max_idx)) + return np.array([annotations[max_idx]['segmentation']]) + + def everything_prompt(self): + return self.results[0].masks.data diff --git a/ultralytics/yolo/fastsam/utils.py b/ultralytics/yolo/fastsam/utils.py new file mode 100644 index 0000000..dcc71dc --- /dev/null +++ b/ultralytics/yolo/fastsam/utils.py @@ -0,0 +1,64 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch + + +def adjust_bboxes_to_image_border(boxes, image_shape, threshold=20): + """ + Adjust bounding boxes to stick to image border if they are within a certain threshold. + + Args: + boxes (torch.Tensor): (n, 4) + image_shape (tuple): (height, width) + threshold (int): pixel threshold + + Returns: + adjusted_boxes (torch.Tensor): adjusted bounding boxes + """ + + # Image dimensions + h, w = image_shape + + # Adjust boxes + boxes[boxes[:, 0] < threshold, 0] = 0 # x1 + boxes[boxes[:, 1] < threshold, 1] = 0 # y1 + boxes[boxes[:, 2] > w - threshold, 2] = w # x2 + boxes[boxes[:, 3] > h - threshold, 3] = h # y2 + return boxes + + +def bbox_iou(box1, boxes, iou_thres=0.9, image_shape=(640, 640), raw_output=False): + """ + Compute the Intersection-Over-Union of a bounding box with respect to an array of other bounding boxes. + + Args: + box1 (torch.Tensor): (4, ) + boxes (torch.Tensor): (n, 4) + + Returns: + high_iou_indices (torch.Tensor): Indices of boxes with IoU > thres + """ + boxes = adjust_bboxes_to_image_border(boxes, image_shape) + # obtain coordinates for intersections + x1 = torch.max(box1[0], boxes[:, 0]) + y1 = torch.max(box1[1], boxes[:, 1]) + x2 = torch.min(box1[2], boxes[:, 2]) + y2 = torch.min(box1[3], boxes[:, 3]) + + # compute the area of intersection + intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0) + + # compute the area of both individual boxes + box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1]) + box2_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) + + # compute the area of union + union = box1_area + box2_area - intersection + + # compute the IoU + iou = intersection / union # Should be shape (n, ) + if raw_output: + return 0 if iou.numel() == 0 else iou + + # return indices of boxes with IoU > thres + return torch.nonzero(iou > iou_thres).flatten() diff --git a/ultralytics/yolo/fastsam/val.py b/ultralytics/yolo/fastsam/val.py new file mode 100644 index 0000000..250bd5e --- /dev/null +++ b/ultralytics/yolo/fastsam/val.py @@ -0,0 +1,244 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import numpy as np +import torch +import torch.nn.functional as F + +from ultralytics.yolo.utils import LOGGER, NUM_THREADS, ops +from ultralytics.yolo.utils.checks import check_requirements +from ultralytics.yolo.utils.metrics import SegmentMetrics, box_iou, mask_iou +from ultralytics.yolo.utils.plotting import output_to_target, plot_images +from ultralytics.yolo.v8.detect import DetectionValidator + + +class FastSAMValidator(DetectionValidator): + + def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None): + """Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.""" + super().__init__(dataloader, save_dir, pbar, args, _callbacks) + self.args.task = 'segment' + self.metrics = SegmentMetrics(save_dir=self.save_dir, on_plot=self.on_plot) + + def preprocess(self, batch): + """Preprocesses batch by converting masks to float and sending to device.""" + batch = super().preprocess(batch) + batch['masks'] = batch['masks'].to(self.device).float() + return batch + + def init_metrics(self, model): + """Initialize metrics and select mask processing function based on save_json flag.""" + super().init_metrics(model) + self.plot_masks = [] + if self.args.save_json: + check_requirements('pycocotools>=2.0.6') + self.process = ops.process_mask_upsample # more accurate + else: + self.process = ops.process_mask # faster + + def get_desc(self): + """Return a formatted description of evaluation metrics.""" + return ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', + 'R', 'mAP50', 'mAP50-95)') + + def postprocess(self, preds): + """Postprocesses YOLO predictions and returns output detections with proto.""" + p = ops.non_max_suppression(preds[0], + self.args.conf, + self.args.iou, + labels=self.lb, + multi_label=True, + agnostic=self.args.single_cls, + max_det=self.args.max_det, + nc=self.nc) + proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported + return p, proto + + def update_metrics(self, preds, batch): + """Metrics.""" + for si, (pred, proto) in enumerate(zip(preds[0], preds[1])): + idx = batch['batch_idx'] == si + cls = batch['cls'][idx] + bbox = batch['bboxes'][idx] + nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions + shape = batch['ori_shape'][si] + correct_masks = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init + correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init + self.seen += 1 + + if npr == 0: + if nl: + self.stats.append((correct_bboxes, correct_masks, *torch.zeros( + (2, 0), device=self.device), cls.squeeze(-1))) + if self.args.plots: + self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1)) + continue + + # Masks + midx = [si] if self.args.overlap_mask else idx + gt_masks = batch['masks'][midx] + pred_masks = self.process(proto, pred[:, 6:], pred[:, :4], shape=batch['img'][si].shape[1:]) + + # Predictions + if self.args.single_cls: + pred[:, 5] = 0 + predn = pred.clone() + ops.scale_boxes(batch['img'][si].shape[1:], predn[:, :4], shape, + ratio_pad=batch['ratio_pad'][si]) # native-space pred + + # Evaluate + if nl: + height, width = batch['img'].shape[2:] + tbox = ops.xywh2xyxy(bbox) * torch.tensor( + (width, height, width, height), device=self.device) # target boxes + ops.scale_boxes(batch['img'][si].shape[1:], tbox, shape, + ratio_pad=batch['ratio_pad'][si]) # native-space labels + labelsn = torch.cat((cls, tbox), 1) # native-space labels + correct_bboxes = self._process_batch(predn, labelsn) + # TODO: maybe remove these `self.` arguments as they already are member variable + correct_masks = self._process_batch(predn, + labelsn, + pred_masks, + gt_masks, + overlap=self.args.overlap_mask, + masks=True) + if self.args.plots: + self.confusion_matrix.process_batch(predn, labelsn) + + # Append correct_masks, correct_boxes, pconf, pcls, tcls + self.stats.append((correct_bboxes, correct_masks, pred[:, 4], pred[:, 5], cls.squeeze(-1))) + + pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) + if self.args.plots and self.batch_i < 3: + self.plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot + + # Save + if self.args.save_json: + pred_masks = ops.scale_image(pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), + shape, + ratio_pad=batch['ratio_pad'][si]) + self.pred_to_json(predn, batch['im_file'][si], pred_masks) + # if self.args.save_txt: + # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + + def finalize_metrics(self, *args, **kwargs): + """Sets speed and confusion matrix for evaluation metrics.""" + self.metrics.speed = self.speed + self.metrics.confusion_matrix = self.confusion_matrix + + def _process_batch(self, detections, labels, pred_masks=None, gt_masks=None, overlap=False, masks=False): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + if masks: + if overlap: + nl = len(labels) + index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 + gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) + gt_masks = torch.where(gt_masks == index, 1.0, 0.0) + if gt_masks.shape[1:] != pred_masks.shape[1:]: + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] + gt_masks = gt_masks.gt_(0.5) + iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) + else: # boxes + iou = box_iou(labels[:, 1:], detections[:, :4]) + + correct = np.zeros((detections.shape[0], self.iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(self.iouv)): + x = torch.where((iou >= self.iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), + 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=detections.device) + + def plot_val_samples(self, batch, ni): + """Plots validation samples with bounding box labels.""" + plot_images(batch['img'], + batch['batch_idx'], + batch['cls'].squeeze(-1), + batch['bboxes'], + batch['masks'], + paths=batch['im_file'], + fname=self.save_dir / f'val_batch{ni}_labels.jpg', + names=self.names, + on_plot=self.on_plot) + + def plot_predictions(self, batch, preds, ni): + """Plots batch predictions with masks and bounding boxes.""" + plot_images( + batch['img'], + *output_to_target(preds[0], max_det=15), # not set to self.args.max_det due to slow plotting speed + torch.cat(self.plot_masks, dim=0) if len(self.plot_masks) else self.plot_masks, + paths=batch['im_file'], + fname=self.save_dir / f'val_batch{ni}_pred.jpg', + names=self.names, + on_plot=self.on_plot) # pred + self.plot_masks.clear() + + def pred_to_json(self, predn, filename, pred_masks): + """Save one JSON result.""" + # Example result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + from pycocotools.mask import encode # noqa + + def single_encode(x): + """Encode predicted masks as RLE and append results to jdict.""" + rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] + rle['counts'] = rle['counts'].decode('utf-8') + return rle + + stem = Path(filename).stem + image_id = int(stem) if stem.isnumeric() else stem + box = ops.xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + pred_masks = np.transpose(pred_masks, (2, 0, 1)) + with ThreadPool(NUM_THREADS) as pool: + rles = pool.map(single_encode, pred_masks) + for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): + self.jdict.append({ + 'image_id': image_id, + 'category_id': self.class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5), + 'segmentation': rles[i]}) + + def eval_json(self, stats): + """Return COCO-style object detection evaluation metrics.""" + if self.args.save_json and self.is_coco and len(self.jdict): + anno_json = self.data['path'] / 'annotations/instances_val2017.json' # annotations + pred_json = self.save_dir / 'predictions.json' # predictions + LOGGER.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...') + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements('pycocotools>=2.0.6') + from pycocotools.coco import COCO # noqa + from pycocotools.cocoeval import COCOeval # noqa + + for x in anno_json, pred_json: + assert x.is_file(), f'{x} file not found' + anno = COCO(str(anno_json)) # init annotations api + pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) + for i, eval in enumerate([COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm')]): + if self.is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval + eval.evaluate() + eval.accumulate() + eval.summarize() + idx = i * 4 + 2 + stats[self.metrics.keys[idx + 1]], stats[ + self.metrics.keys[idx]] = eval.stats[:2] # update mAP50-95 and mAP50 + except Exception as e: + LOGGER.warning(f'pycocotools unable to run: {e}') + return stats diff --git a/ultralytics/yolo/nas/__init__.py b/ultralytics/yolo/nas/__init__.py new file mode 100644 index 0000000..eec3837 --- /dev/null +++ b/ultralytics/yolo/nas/__init__.py @@ -0,0 +1,7 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .model import NAS +from .predict import NASPredictor +from .val import NASValidator + +__all__ = 'NASPredictor', 'NASValidator', 'NAS' diff --git a/ultralytics/yolo/nas/model.py b/ultralytics/yolo/nas/model.py new file mode 100644 index 0000000..bfe7dcd --- /dev/null +++ b/ultralytics/yolo/nas/model.py @@ -0,0 +1,133 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +YOLO-NAS model interface. + +Usage - Predict: + from ultralytics import NAS + + model = NAS('yolo_nas_s') + results = model.predict('ultralytics/assets/bus.jpg') +""" + +from pathlib import Path + +import torch + +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.engine.exporter import Exporter +from ultralytics.yolo.utils import DEFAULT_CFG, DEFAULT_CFG_DICT, LOGGER, ROOT, is_git_dir +from ultralytics.yolo.utils.checks import check_imgsz + +from ...yolo.utils.torch_utils import model_info, smart_inference_mode +from .predict import NASPredictor +from .val import NASValidator + + +class NAS: + + def __init__(self, model='yolo_nas_s.pt') -> None: + # Load or create new NAS model + import super_gradients + + self.predictor = None + suffix = Path(model).suffix + if suffix == '.pt': + self._load(model) + elif suffix == '': + self.model = super_gradients.training.models.get(model, pretrained_weights='coco') + self.task = 'detect' + self.model.args = DEFAULT_CFG_DICT # attach args to model + + # Standardize model + self.model.fuse = lambda verbose=True: self.model + self.model.stride = torch.tensor([32]) + self.model.names = dict(enumerate(self.model._class_names)) + self.model.is_fused = lambda: False # for info() + self.model.yaml = {} # for info() + self.model.pt_path = model # for export() + self.model.task = 'detect' # for export() + self.info() + + @smart_inference_mode() + def _load(self, weights: str): + self.model = torch.load(weights) + + @smart_inference_mode() + def predict(self, source=None, stream=False, **kwargs): + """ + Perform prediction using the YOLO model. + + Args: + source (str | int | PIL | np.ndarray): The source of the image to make predictions on. + Accepts all source types accepted by the YOLO model. + stream (bool): Whether to stream the predictions or not. Defaults to False. + **kwargs : Additional keyword arguments passed to the predictor. + Check the 'configuration' section in the documentation for all available options. + + Returns: + (List[ultralytics.yolo.engine.results.Results]): The prediction results. + """ + if source is None: + source = ROOT / 'assets' if is_git_dir() else 'https://ultralytics.com/images/bus.jpg' + LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using 'source={source}'.") + overrides = dict(conf=0.25, task='detect', mode='predict') + overrides.update(kwargs) # prefer kwargs + if not self.predictor: + self.predictor = NASPredictor(overrides=overrides) + self.predictor.setup_model(model=self.model) + else: # only update args if predictor is already setup + self.predictor.args = get_cfg(self.predictor.args, overrides) + return self.predictor(source, stream=stream) + + def train(self, **kwargs): + """Function trains models but raises an error as NAS models do not support training.""" + raise NotImplementedError("NAS models don't support training") + + def val(self, **kwargs): + """Run validation given dataset.""" + overrides = dict(task='detect', mode='val') + overrides.update(kwargs) # prefer kwargs + args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides) + args.imgsz = check_imgsz(args.imgsz, max_dim=1) + validator = NASValidator(args=args) + validator(model=self.model) + self.metrics = validator.metrics + return validator.metrics + + @smart_inference_mode() + def export(self, **kwargs): + """ + Export model. + + Args: + **kwargs : Any other args accepted by the predictors. To see all args check 'configuration' section in docs + """ + overrides = dict(task='detect') + overrides.update(kwargs) + overrides['mode'] = 'export' + args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides) + args.task = self.task + if args.imgsz == DEFAULT_CFG.imgsz: + args.imgsz = self.model.args['imgsz'] # use trained imgsz unless custom value is passed + if args.batch == DEFAULT_CFG.batch: + args.batch = 1 # default to 1 if not modified + return Exporter(overrides=args)(model=self.model) + + def info(self, detailed=False, verbose=True): + """ + Logs model info. + + Args: + detailed (bool): Show detailed information about model. + verbose (bool): Controls verbosity. + """ + return model_info(self.model, detailed=detailed, verbose=verbose, imgsz=640) + + def __call__(self, source=None, stream=False, **kwargs): + """Calls the 'predict' function with given arguments to perform object detection.""" + return self.predict(source, stream, **kwargs) + + def __getattr__(self, attr): + """Raises error if object has no requested attribute.""" + name = self.__class__.__name__ + raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") diff --git a/ultralytics/yolo/nas/predict.py b/ultralytics/yolo/nas/predict.py new file mode 100644 index 0000000..e135bc1 --- /dev/null +++ b/ultralytics/yolo/nas/predict.py @@ -0,0 +1,35 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch + +from ultralytics.yolo.engine.predictor import BasePredictor +from ultralytics.yolo.engine.results import Results +from ultralytics.yolo.utils import ops +from ultralytics.yolo.utils.ops import xyxy2xywh + + +class NASPredictor(BasePredictor): + + def postprocess(self, preds_in, img, orig_imgs): + """Postprocesses predictions and returns a list of Results objects.""" + + # Cat boxes and class scores + boxes = xyxy2xywh(preds_in[0][0]) + preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1) + + preds = ops.non_max_suppression(preds, + self.args.conf, + self.args.iou, + agnostic=self.args.agnostic_nms, + max_det=self.args.max_det, + classes=self.args.classes) + + results = [] + for i, pred in enumerate(preds): + orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs + if not isinstance(orig_imgs, torch.Tensor): + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) + path = self.batch[0] + img_path = path[i] if isinstance(path, list) else path + results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred)) + return results diff --git a/ultralytics/yolo/nas/val.py b/ultralytics/yolo/nas/val.py new file mode 100644 index 0000000..474cf6b --- /dev/null +++ b/ultralytics/yolo/nas/val.py @@ -0,0 +1,25 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch + +from ultralytics.yolo.utils import ops +from ultralytics.yolo.utils.ops import xyxy2xywh +from ultralytics.yolo.v8.detect import DetectionValidator + +__all__ = ['NASValidator'] + + +class NASValidator(DetectionValidator): + + def postprocess(self, preds_in): + """Apply Non-maximum suppression to prediction outputs.""" + boxes = xyxy2xywh(preds_in[0][0]) + preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1) + return ops.non_max_suppression(preds, + self.args.conf, + self.args.iou, + labels=self.lb, + multi_label=False, + agnostic=self.args.single_cls, + max_det=self.args.max_det, + max_time_img=0.5) diff --git a/ultralytics/yolo/utils/__init__.py b/ultralytics/yolo/utils/__init__.py new file mode 100644 index 0000000..5160322 --- /dev/null +++ b/ultralytics/yolo/utils/__init__.py @@ -0,0 +1,809 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import contextlib +import inspect +import logging.config +import os +import platform +import re +import subprocess +import sys +import threading +import urllib +import uuid +from pathlib import Path +from types import SimpleNamespace +from typing import Union + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import torch +import yaml + +from ultralytics import __version__ + +# PyTorch Multi-GPU DDP Constants +RANK = int(os.getenv('RANK', -1)) +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + +# Other Constants +FILE = Path(__file__).resolve() +ROOT = FILE.parents[2] # YOLO +DEFAULT_CFG_PATH = ROOT / 'yolo/cfg/default.yaml' +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +AUTOINSTALL = str(os.getenv('YOLO_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode +VERBOSE = str(os.getenv('YOLO_VERBOSE', True)).lower() == 'true' # global verbose mode +TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format +LOGGING_NAME = 'ultralytics' +MACOS, LINUX, WINDOWS = (platform.system() == x for x in ['Darwin', 'Linux', 'Windows']) # environment booleans +ARM64 = platform.machine() in ('arm64', 'aarch64') # ARM64 booleans +HELP_MSG = \ + """ + Usage examples for running YOLOv8: + + 1. Install the ultralytics package: + + pip install ultralytics + + 2. Use the Python SDK: + + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.yaml') # build a new model from scratch + model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + + # Use the model + results = model.train(data="coco128.yaml", epochs=3) # train the model + results = model.val() # evaluate model performance on the validation set + results = model('https://ultralytics.com/images/bus.jpg') # predict on an image + success = model.export(format='onnx') # export the model to ONNX format + + 3. Use the command line interface (CLI): + + YOLOv8 'yolo' CLI commands use the following syntax: + + yolo TASK MODE ARGS + + Where TASK (optional) is one of [detect, segment, classify] + MODE (required) is one of [train, val, predict, export] + ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults. + See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg' + + - Train a detection model for 10 epochs with an initial learning_rate of 0.01 + yolo detect train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01 + + - Predict a YouTube video using a pretrained segmentation model at image size 320: + yolo segment predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320 + + - Val a pretrained detection model at batch-size 1 and image size 640: + yolo detect val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640 + + - Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) + yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 + + - Run special commands: + yolo help + yolo checks + yolo version + yolo settings + yolo copy-cfg + yolo cfg + + Docs: https://docs.ultralytics.com + Community: https://community.ultralytics.com + GitHub: https://github.com/ultralytics/ultralytics + """ + +# Settings +torch.set_printoptions(linewidth=320, precision=4, profile='default') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads +os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' # for deterministic training +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab + + +class SimpleClass: + """ + Ultralytics SimpleClass is a base class providing helpful string representation, error reporting, and attribute + access methods for easier debugging and usage. + """ + + def __str__(self): + """Return a human-readable string representation of the object.""" + attr = [] + for a in dir(self): + v = getattr(self, a) + if not callable(v) and not a.startswith('_'): + if isinstance(v, SimpleClass): + # Display only the module and class name for subclasses + s = f'{a}: {v.__module__}.{v.__class__.__name__} object' + else: + s = f'{a}: {repr(v)}' + attr.append(s) + return f'{self.__module__}.{self.__class__.__name__} object with attributes:\n\n' + '\n'.join(attr) + + def __repr__(self): + """Return a machine-readable string representation of the object.""" + return self.__str__() + + def __getattr__(self, attr): + """Custom attribute access error message with helpful information.""" + name = self.__class__.__name__ + raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") + + +class IterableSimpleNamespace(SimpleNamespace): + """ + Ultralytics IterableSimpleNamespace is an extension class of SimpleNamespace that adds iterable functionality and + enables usage with dict() and for loops. + """ + + def __iter__(self): + """Return an iterator of key-value pairs from the namespace's attributes.""" + return iter(vars(self).items()) + + def __str__(self): + """Return a human-readable string representation of the object.""" + return '\n'.join(f'{k}={v}' for k, v in vars(self).items()) + + def __getattr__(self, attr): + """Custom attribute access error message with helpful information.""" + name = self.__class__.__name__ + raise AttributeError(f""" + '{name}' object has no attribute '{attr}'. This may be caused by a modified or out of date ultralytics + 'default.yaml' file.\nPlease update your code with 'pip install -U ultralytics' and if necessary replace + {DEFAULT_CFG_PATH} with the latest version from + https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/cfg/default.yaml + """) + + def get(self, key, default=None): + """Return the value of the specified key if it exists; otherwise, return the default value.""" + return getattr(self, key, default) + + +def plt_settings(rcparams=None, backend='Agg'): + """ + Decorator to temporarily set rc parameters and the backend for a plotting function. + + Usage: + decorator: @plt_settings({"font.size": 12}) + context manager: with plt_settings({"font.size": 12}): + + Args: + rcparams (dict): Dictionary of rc parameters to set. + backend (str, optional): Name of the backend to use. Defaults to 'Agg'. + + Returns: + (Callable): Decorated function with temporarily set rc parameters and backend. This decorator can be + applied to any function that needs to have specific matplotlib rc parameters and backend for its execution. + """ + + if rcparams is None: + rcparams = {'font.size': 11} + + def decorator(func): + """Decorator to apply temporary rc parameters and backend to a function.""" + + def wrapper(*args, **kwargs): + """Sets rc parameters and backend, calls the original function, and restores the settings.""" + original_backend = plt.get_backend() + plt.switch_backend(backend) + + with plt.rc_context(rcparams): + result = func(*args, **kwargs) + + plt.switch_backend(original_backend) + return result + + return wrapper + + return decorator + + +def set_logging(name=LOGGING_NAME, verbose=True): + """Sets up logging for the given name.""" + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR + logging.config.dictConfig({ + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + name: { + 'format': '%(message)s'}}, + 'handlers': { + name: { + 'class': 'logging.StreamHandler', + 'formatter': name, + 'level': level}}, + 'loggers': { + name: { + 'level': level, + 'handlers': [name], + 'propagate': False}}}) + + +def emojis(string=''): + """Return platform-dependent emoji-safe version of string.""" + return string.encode().decode('ascii', 'ignore') if WINDOWS else string + + +class EmojiFilter(logging.Filter): + """ + A custom logging filter class for removing emojis in log messages. + + This filter is particularly useful for ensuring compatibility with Windows terminals + that may not support the display of emojis in log messages. + """ + + def filter(self, record): + """Filter logs by emoji unicode characters on windows.""" + record.msg = emojis(record.msg) + return super().filter(record) + + +# Set logger +set_logging(LOGGING_NAME, verbose=VERBOSE) # run before defining LOGGER +LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.) +if WINDOWS: # emoji-safe logging + LOGGER.addFilter(EmojiFilter()) + + +class ThreadingLocked: + """ + A decorator class for ensuring thread-safe execution of a function or method. + This class can be used as a decorator to make sure that if the decorated function + is called from multiple threads, only one thread at a time will be able to execute the function. + + Attributes: + lock (threading.Lock): A lock object used to manage access to the decorated function. + + Usage: + @ThreadingLocked() + def my_function(): + # Your code here + pass + """ + + def __init__(self): + self.lock = threading.Lock() + + def __call__(self, f): + from functools import wraps + + @wraps(f) + def decorated(*args, **kwargs): + with self.lock: + return f(*args, **kwargs) + + return decorated + + +def yaml_save(file='data.yaml', data=None): + """ + Save YAML data to a file. + + Args: + file (str, optional): File name. Default is 'data.yaml'. + data (dict): Data to save in YAML format. + + Returns: + (None): Data is saved to the specified file. + """ + if data is None: + data = {} + file = Path(file) + if not file.parent.exists(): + # Create parent directories if they don't exist + file.parent.mkdir(parents=True, exist_ok=True) + + # Convert Path objects to strings + for k, v in data.items(): + if isinstance(v, Path): + data[k] = str(v) + + # Dump data to file in YAML format + with open(file, 'w') as f: + yaml.safe_dump(data, f, sort_keys=False, allow_unicode=True) + + +def yaml_load(file='data.yaml', append_filename=False): + """ + Load YAML data from a file. + + Args: + file (str, optional): File name. Default is 'data.yaml'. + append_filename (bool): Add the YAML filename to the YAML dictionary. Default is False. + + Returns: + (dict): YAML data and file name. + """ + with open(file, errors='ignore', encoding='utf-8') as f: + s = f.read() # string + + # Remove special characters + if not s.isprintable(): + s = re.sub(r'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]+', '', s) + + # Add YAML filename to dict and return + return {**yaml.safe_load(s), 'yaml_file': str(file)} if append_filename else yaml.safe_load(s) + + +def yaml_print(yaml_file: Union[str, Path, dict]) -> None: + """ + Pretty prints a yaml file or a yaml-formatted dictionary. + + Args: + yaml_file: The file path of the yaml file or a yaml-formatted dictionary. + + Returns: + None + """ + yaml_dict = yaml_load(yaml_file) if isinstance(yaml_file, (str, Path)) else yaml_file + dump = yaml.dump(yaml_dict, sort_keys=False, allow_unicode=True) + LOGGER.info(f"Printing '{colorstr('bold', 'black', yaml_file)}'\n\n{dump}") + + +# Default configuration +DEFAULT_CFG_DICT = yaml_load(DEFAULT_CFG_PATH) +for k, v in DEFAULT_CFG_DICT.items(): + if isinstance(v, str) and v.lower() == 'none': + DEFAULT_CFG_DICT[k] = None +DEFAULT_CFG_KEYS = DEFAULT_CFG_DICT.keys() +DEFAULT_CFG = IterableSimpleNamespace(**DEFAULT_CFG_DICT) + + +def is_colab(): + """ + Check if the current script is running inside a Google Colab notebook. + + Returns: + (bool): True if running inside a Colab notebook, False otherwise. + """ + return 'COLAB_RELEASE_TAG' in os.environ or 'COLAB_BACKEND_VERSION' in os.environ + + +def is_kaggle(): + """ + Check if the current script is running inside a Kaggle kernel. + + Returns: + (bool): True if running inside a Kaggle kernel, False otherwise. + """ + return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + + +def is_jupyter(): + """ + Check if the current script is running inside a Jupyter Notebook. + Verified on Colab, Jupyterlab, Kaggle, Paperspace. + + Returns: + (bool): True if running inside a Jupyter Notebook, False otherwise. + """ + with contextlib.suppress(Exception): + from IPython import get_ipython + return get_ipython() is not None + return False + + +def is_docker() -> bool: + """ + Determine if the script is running inside a Docker container. + + Returns: + (bool): True if the script is running inside a Docker container, False otherwise. + """ + file = Path('/proc/self/cgroup') + if file.exists(): + with open(file) as f: + return 'docker' in f.read() + else: + return False + + +def is_online() -> bool: + """ + Check internet connectivity by attempting to connect to a known online host. + + Returns: + (bool): True if connection is successful, False otherwise. + """ + import socket + + for host in '1.1.1.1', '8.8.8.8', '223.5.5.5': # Cloudflare, Google, AliDNS: + try: + test_connection = socket.create_connection(address=(host, 53), timeout=2) + except (socket.timeout, socket.gaierror, OSError): + continue + else: + # If the connection was successful, close it to avoid a ResourceWarning + test_connection.close() + return True + return False + + +ONLINE = is_online() + + +def is_pip_package(filepath: str = __name__) -> bool: + """ + Determines if the file at the given filepath is part of a pip package. + + Args: + filepath (str): The filepath to check. + + Returns: + (bool): True if the file is part of a pip package, False otherwise. + """ + import importlib.util + + # Get the spec for the module + spec = importlib.util.find_spec(filepath) + + # Return whether the spec is not None and the origin is not None (indicating it is a package) + return spec is not None and spec.origin is not None + + +def is_dir_writeable(dir_path: Union[str, Path]) -> bool: + """ + Check if a directory is writeable. + + Args: + dir_path (str | Path): The path to the directory. + + Returns: + (bool): True if the directory is writeable, False otherwise. + """ + return os.access(str(dir_path), os.W_OK) + + +def is_pytest_running(): + """ + Determines whether pytest is currently running or not. + + Returns: + (bool): True if pytest is running, False otherwise. + """ + return ('PYTEST_CURRENT_TEST' in os.environ) or ('pytest' in sys.modules) or ('pytest' in Path(sys.argv[0]).stem) + + +def is_github_actions_ci() -> bool: + """ + Determine if the current environment is a GitHub Actions CI Python runner. + + Returns: + (bool): True if the current environment is a GitHub Actions CI Python runner, False otherwise. + """ + return 'GITHUB_ACTIONS' in os.environ and 'RUNNER_OS' in os.environ and 'RUNNER_TOOL_CACHE' in os.environ + + +def is_git_dir(): + """ + Determines whether the current file is part of a git repository. + If the current file is not part of a git repository, returns None. + + Returns: + (bool): True if current file is part of a git repository. + """ + return get_git_dir() is not None + + +def get_git_dir(): + """ + Determines whether the current file is part of a git repository and if so, returns the repository root directory. + If the current file is not part of a git repository, returns None. + + Returns: + (Path | None): Git root directory if found or None if not found. + """ + for d in Path(__file__).parents: + if (d / '.git').is_dir(): + return d + return None # no .git dir found + + +def get_git_origin_url(): + """ + Retrieves the origin URL of a git repository. + + Returns: + (str | None): The origin URL of the git repository. + """ + if is_git_dir(): + with contextlib.suppress(subprocess.CalledProcessError): + origin = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) + return origin.decode().strip() + return None # if not git dir or on error + + +def get_git_branch(): + """ + Returns the current git branch name. If not in a git repository, returns None. + + Returns: + (str | None): The current git branch name. + """ + if is_git_dir(): + with contextlib.suppress(subprocess.CalledProcessError): + origin = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) + return origin.decode().strip() + return None # if not git dir or on error + + +def get_default_args(func): + """Returns a dictionary of default arguments for a function. + + Args: + func (callable): The function to inspect. + + Returns: + (dict): A dictionary where each key is a parameter name, and each value is the default value of that parameter. + """ + signature = inspect.signature(func) + return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} + + +def get_user_config_dir(sub_dir='Ultralytics'): + """ + Get the user config directory. + + Args: + sub_dir (str): The name of the subdirectory to create. + + Returns: + (Path): The path to the user config directory. + """ + # Return the appropriate config directory for each operating system + if WINDOWS: + path = Path.home() / 'AppData' / 'Roaming' / sub_dir + elif MACOS: # macOS + path = Path.home() / 'Library' / 'Application Support' / sub_dir + elif LINUX: + path = Path.home() / '.config' / sub_dir + else: + raise ValueError(f'Unsupported operating system: {platform.system()}') + + # GCP and AWS lambda fix, only /tmp is writeable + if not is_dir_writeable(str(path.parent)): + path = Path('/tmp') / sub_dir + LOGGER.warning(f"WARNING ⚠️ user config directory is not writeable, defaulting to '{path}'.") + + # Create the subdirectory if it does not exist + path.mkdir(parents=True, exist_ok=True) + + return path + + +USER_CONFIG_DIR = Path(os.getenv('YOLO_CONFIG_DIR', get_user_config_dir())) # Ultralytics settings dir +SETTINGS_YAML = USER_CONFIG_DIR / 'settings.yaml' + + +def colorstr(*input): + """Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world').""" + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = { + 'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +class TryExcept(contextlib.ContextDecorator): + """YOLOv8 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager.""" + + def __init__(self, msg='', verbose=True): + """Initialize TryExcept class with optional message and verbosity settings.""" + self.msg = msg + self.verbose = verbose + + def __enter__(self): + """Executes when entering TryExcept context, initializes instance.""" + pass + + def __exit__(self, exc_type, value, traceback): + """Defines behavior when exiting a 'with' block, prints error message if necessary.""" + if self.verbose and value: + print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) + return True + + +def threaded(func): + """Multi-threads a target function and returns thread. Usage: @threaded decorator.""" + + def wrapper(*args, **kwargs): + """Multi-threads a given function and returns the thread.""" + thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) + thread.start() + return thread + + return wrapper + + +def set_sentry(): + """ + Initialize the Sentry SDK for error tracking and reporting. Only used if sentry_sdk package is installed and + sync=True in settings. Run 'yolo settings' to see and update settings YAML file. + + Conditions required to send errors (ALL conditions must be met or no errors will be reported): + - sentry_sdk package is installed + - sync=True in YOLO settings + - pytest is not running + - running in a pip package installation + - running in a non-git directory + - running with rank -1 or 0 + - online environment + - CLI used to run package (checked with 'yolo' as the name of the main CLI command) + + The function also configures Sentry SDK to ignore KeyboardInterrupt and FileNotFoundError + exceptions and to exclude events with 'out of memory' in their exception message. + + Additionally, the function sets custom tags and user information for Sentry events. + """ + + def before_send(event, hint): + """ + Modify the event before sending it to Sentry based on specific exception types and messages. + + Args: + event (dict): The event dictionary containing information about the error. + hint (dict): A dictionary containing additional information about the error. + + Returns: + dict: The modified event or None if the event should not be sent to Sentry. + """ + if 'exc_info' in hint: + exc_type, exc_value, tb = hint['exc_info'] + if exc_type in (KeyboardInterrupt, FileNotFoundError) \ + or 'out of memory' in str(exc_value): + return None # do not send event + + event['tags'] = { + 'sys_argv': sys.argv[0], + 'sys_argv_name': Path(sys.argv[0]).name, + 'install': 'git' if is_git_dir() else 'pip' if is_pip_package() else 'other', + 'os': ENVIRONMENT} + return event + + if SETTINGS['sync'] and \ + RANK in (-1, 0) and \ + Path(sys.argv[0]).name == 'yolo' and \ + not TESTS_RUNNING and \ + ONLINE and \ + is_pip_package() and \ + not is_git_dir(): + + # If sentry_sdk package is not installed then return and do not use Sentry + try: + import sentry_sdk # noqa + except ImportError: + return + + sentry_sdk.init( + dsn='https://5ff1556b71594bfea135ff0203a0d290@o4504521589325824.ingest.sentry.io/4504521592406016', + debug=False, + traces_sample_rate=1.0, + release=__version__, + environment='production', # 'dev' or 'production' + before_send=before_send, + ignore_errors=[KeyboardInterrupt, FileNotFoundError]) + sentry_sdk.set_user({'id': SETTINGS['uuid']}) # SHA-256 anonymized UUID hash + + # Disable all sentry logging + for logger in 'sentry_sdk', 'sentry_sdk.errors': + logging.getLogger(logger).setLevel(logging.CRITICAL) + + +def get_settings(file=SETTINGS_YAML, version='0.0.3'): + """ + Loads a global Ultralytics settings YAML file or creates one with default values if it does not exist. + + Args: + file (Path): Path to the Ultralytics settings YAML file. Defaults to 'settings.yaml' in the USER_CONFIG_DIR. + version (str): Settings version. If min settings version not met, new default settings will be saved. + + Returns: + (dict): Dictionary of settings key-value pairs. + """ + import hashlib + + from ultralytics.yolo.utils.checks import check_version + from ultralytics.yolo.utils.torch_utils import torch_distributed_zero_first + + git_dir = get_git_dir() + root = git_dir or Path() + datasets_root = (root.parent if git_dir and is_dir_writeable(root.parent) else root).resolve() + defaults = { + 'datasets_dir': str(datasets_root / 'datasets'), # default datasets directory. + 'weights_dir': str(root / 'weights'), # default weights directory. + 'runs_dir': str(root / 'runs'), # default runs directory. + 'uuid': hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(), # SHA-256 anonymized UUID hash + 'sync': True, # sync analytics to help with YOLO development + 'api_key': '', # Ultralytics HUB API key (https://hub.ultralytics.com/) + 'settings_version': version} # Ultralytics settings version + + with torch_distributed_zero_first(RANK): + if not file.exists(): + yaml_save(file, defaults) + settings = yaml_load(file) + + # Check that settings keys and types match defaults + correct = \ + settings \ + and settings.keys() == defaults.keys() \ + and all(type(a) == type(b) for a, b in zip(settings.values(), defaults.values())) \ + and check_version(settings['settings_version'], version) + if not correct: + LOGGER.warning('WARNING ⚠️ Ultralytics settings reset to defaults. This is normal and may be due to a ' + 'recent ultralytics package update, but may have overwritten previous settings. ' + f"\nView and update settings with 'yolo settings' or at '{file}'") + settings = defaults # merge **defaults with **settings (prefer **settings) + yaml_save(file, settings) # save updated defaults + + return settings + + +def set_settings(kwargs, file=SETTINGS_YAML): + """ + Function that runs on a first-time ultralytics package installation to set up global settings and create necessary + directories. + """ + SETTINGS.update(kwargs) + yaml_save(file, SETTINGS) + + +def deprecation_warn(arg, new_arg, version=None): + """Issue a deprecation warning when a deprecated argument is used, suggesting an updated argument.""" + if not version: + version = float(__version__[:3]) + 0.2 # deprecate after 2nd major release + LOGGER.warning(f"WARNING ⚠️ '{arg}' is deprecated and will be removed in 'ultralytics {version}' in the future. " + f"Please use '{new_arg}' instead.") + + +def clean_url(url): + """Strip auth from URL, i.e. https://url.com/file.txt?auth -> https://url.com/file.txt.""" + url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ + return urllib.parse.unquote(url).split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + + +def url2file(url): + """Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt.""" + return Path(clean_url(url)).name + + +# Run below code on yolo/utils init ------------------------------------------------------------------------------------ + +# Check first-install steps +PREFIX = colorstr('Ultralytics: ') +SETTINGS = get_settings() +DATASETS_DIR = Path(SETTINGS['datasets_dir']) # global datasets directory +ENVIRONMENT = 'Colab' if is_colab() else 'Kaggle' if is_kaggle() else 'Jupyter' if is_jupyter() else \ + 'Docker' if is_docker() else platform.system() +TESTS_RUNNING = is_pytest_running() or is_github_actions_ci() +set_sentry() + +# Apply monkey patches if the script is being run from within the parent directory of the script's location +from .patches import imread, imshow, imwrite + +# torch.save = torch_save +if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename: + cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow diff --git a/ultralytics/yolo/utils/autobatch.py b/ultralytics/yolo/utils/autobatch.py new file mode 100644 index 0000000..0645f81 --- /dev/null +++ b/ultralytics/yolo/utils/autobatch.py @@ -0,0 +1,90 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Functions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch. +""" + +from copy import deepcopy + +import numpy as np +import torch + +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, colorstr +from ultralytics.yolo.utils.torch_utils import profile + + +def check_train_batch_size(model, imgsz=640, amp=True): + """ + Check YOLO training batch size using the autobatch() function. + + Args: + model (torch.nn.Module): YOLO model to check batch size for. + imgsz (int): Image size used for training. + amp (bool): If True, use automatic mixed precision (AMP) for training. + + Returns: + (int): Optimal batch size computed using the autobatch() function. + """ + + with torch.cuda.amp.autocast(amp): + return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size + + +def autobatch(model, imgsz=640, fraction=0.67, batch_size=DEFAULT_CFG.batch): + """ + Automatically estimate the best YOLO batch size to use a fraction of the available CUDA memory. + + Args: + model (torch.nn.module): YOLO model to compute batch size for. + imgsz (int, optional): The image size used as input for the YOLO model. Defaults to 640. + fraction (float, optional): The fraction of available CUDA memory to use. Defaults to 0.67. + batch_size (int, optional): The default batch size to use if an error is detected. Defaults to 16. + + Returns: + (int): The optimal batch size. + """ + + # Check device + prefix = colorstr('AutoBatch: ') + LOGGER.info(f'{prefix}Computing optimal batch size for imgsz={imgsz}') + device = next(model.parameters()).device # get model device + if device.type == 'cpu': + LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') + return batch_size + if torch.backends.cudnn.benchmark: + LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') + return batch_size + + # Inspect CUDA memory + gb = 1 << 30 # bytes to GiB (1024 ** 3) + d = str(device).upper() # 'CUDA:0' + properties = torch.cuda.get_device_properties(device) # device properties + t = properties.total_memory / gb # GiB total + r = torch.cuda.memory_reserved(device) / gb # GiB reserved + a = torch.cuda.memory_allocated(device) / gb # GiB allocated + f = t - (r + a) # GiB free + LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') + + # Profile batch sizes + batch_sizes = [1, 2, 4, 8, 16] + try: + img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] + results = profile(img, model, n=3, device=device) + + # Fit a solution + y = [x[2] for x in results if x] # memory [2] + p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit + b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) + if None in results: # some sizes failed + i = results.index(None) # first fail index + if b >= batch_sizes[i]: # y intercept above failure point + b = batch_sizes[max(i - 1, 0)] # select prior safe point + if b < 1 or b > 1024: # b outside of safe range + b = batch_size + LOGGER.info(f'{prefix}WARNING ⚠️ CUDA anomaly detected, using default batch-size {batch_size}.') + + fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted + LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') + return b + except Exception as e: + LOGGER.warning(f'{prefix}WARNING ⚠️ error detected: {e}, using default batch-size {batch_size}.') + return batch_size diff --git a/ultralytics/yolo/utils/benchmarks.py b/ultralytics/yolo/utils/benchmarks.py new file mode 100644 index 0000000..e84a1a6 --- /dev/null +++ b/ultralytics/yolo/utils/benchmarks.py @@ -0,0 +1,358 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Benchmark a YOLO model formats for speed and accuracy + +Usage: + from ultralytics.yolo.utils.benchmarks import ProfileModels, benchmark + ProfileModels(['yolov8n.yaml', 'yolov8s.yaml']).profile() + run_benchmarks(model='yolov8n.pt', imgsz=160) + +Format | `format=argument` | Model +--- | --- | --- +PyTorch | - | yolov8n.pt +TorchScript | `torchscript` | yolov8n.torchscript +ONNX | `onnx` | yolov8n.onnx +OpenVINO | `openvino` | yolov8n_openvino_model/ +TensorRT | `engine` | yolov8n.engine +CoreML | `coreml` | yolov8n.mlmodel +TensorFlow SavedModel | `saved_model` | yolov8n_saved_model/ +TensorFlow GraphDef | `pb` | yolov8n.pb +TensorFlow Lite | `tflite` | yolov8n.tflite +TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov8n_web_model/ +PaddlePaddle | `paddle` | yolov8n_paddle_model/ +ncnn | `ncnn` | yolov8n_ncnn_model/ +""" + +import glob +import platform +import time +from pathlib import Path + +import numpy as np +import torch.cuda +from tqdm import tqdm + +from ultralytics import YOLO +from ultralytics.yolo.cfg import TASK2DATA, TASK2METRIC +from ultralytics.yolo.engine.exporter import export_formats +from ultralytics.yolo.utils import LINUX, LOGGER, MACOS, ROOT, SETTINGS +from ultralytics.yolo.utils.checks import check_requirements, check_yolo +from ultralytics.yolo.utils.downloads import download +from ultralytics.yolo.utils.files import file_size +from ultralytics.yolo.utils.torch_utils import select_device + + +def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', + imgsz=160, + half=False, + int8=False, + device='cpu', + hard_fail=False): + """ + Benchmark a YOLO model across different formats for speed and accuracy. + + Args: + model (str | Path | optional): Path to the model file or directory. Default is + Path(SETTINGS['weights_dir']) / 'yolov8n.pt'. + imgsz (int, optional): Image size for the benchmark. Default is 160. + half (bool, optional): Use half-precision for the model if True. Default is False. + int8 (bool, optional): Use int8-precision for the model if True. Default is False. + device (str, optional): Device to run the benchmark on, either 'cpu' or 'cuda'. Default is 'cpu'. + hard_fail (bool | float | optional): If True or a float, assert benchmarks pass with given metric. + Default is False. + + Returns: + df (pandas.DataFrame): A pandas DataFrame with benchmark results for each format, including file size, + metric, and inference time. + """ + + import pandas as pd + pd.options.display.max_columns = 10 + pd.options.display.width = 120 + device = select_device(device, verbose=False) + if isinstance(model, (str, Path)): + model = YOLO(model) + + y = [] + t0 = time.time() + for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU) + emoji, filename = '❌', None # export defaults + try: + assert i != 9 or LINUX, 'Edge TPU export only supported on Linux' + if i == 10: + assert MACOS or LINUX, 'TF.js export only supported on macOS and Linux' + if 'cpu' in device.type: + assert cpu, 'inference not supported on CPU' + if 'cuda' in device.type: + assert gpu, 'inference not supported on GPU' + + # Export + if format == '-': + filename = model.ckpt_path or model.cfg + export = model # PyTorch format + else: + filename = model.export(imgsz=imgsz, format=format, half=half, int8=int8, device=device, verbose=False) + export = YOLO(filename, task=model.task) + assert suffix in str(filename), 'export failed' + emoji = '❎' # indicates export succeeded + + # Predict + assert model.task != 'pose' or i != 7, 'GraphDef Pose inference is not supported' + assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported + assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML + if not (ROOT / 'assets/bus.jpg').exists(): + download(url='https://ultralytics.com/images/bus.jpg', dir=ROOT / 'assets') + export.predict(ROOT / 'assets/bus.jpg', imgsz=imgsz, device=device, half=half) + + # Validate + data = TASK2DATA[model.task] # task to dataset, i.e. coco8.yaml for task=detect + key = TASK2METRIC[model.task] # task to metric, i.e. metrics/mAP50-95(B) for task=detect + results = export.val(data=data, + batch=1, + imgsz=imgsz, + plots=False, + device=device, + half=half, + int8=int8, + verbose=False) + metric, speed = results.results_dict[key], results.speed['inference'] + y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)]) + except Exception as e: + if hard_fail: + assert type(e) is AssertionError, f'Benchmark hard_fail for {name}: {e}' + LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}') + y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference + + # Print results + check_yolo(device=device) # print system info + df = pd.DataFrame(y, columns=['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)']) + + name = Path(model.ckpt_path).name + s = f'\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n' + LOGGER.info(s) + with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f: + f.write(s) + + if hard_fail and isinstance(hard_fail, float): + metrics = df[key].array # values to compare to floor + floor = hard_fail # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n + assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: one or more metric(s) < floor {floor}' + + return df + + +class ProfileModels: + """ + ProfileModels class for profiling different models on ONNX and TensorRT. + + This class profiles the performance of different models, provided their paths. The profiling includes parameters such as + model speed and FLOPs. + + Attributes: + paths (list): Paths of the models to profile. + num_timed_runs (int): Number of timed runs for the profiling. Default is 100. + num_warmup_runs (int): Number of warmup runs before profiling. Default is 10. + min_time (float): Minimum number of seconds to profile for. Default is 60. + imgsz (int): Image size used in the models. Default is 640. + + Methods: + profile(): Profiles the models and prints the result. + """ + + def __init__(self, + paths: list, + num_timed_runs=100, + num_warmup_runs=10, + min_time=60, + imgsz=640, + trt=True, + device=None): + self.paths = paths + self.num_timed_runs = num_timed_runs + self.num_warmup_runs = num_warmup_runs + self.min_time = min_time + self.imgsz = imgsz + self.trt = trt # run TensorRT profiling + self.device = device or torch.device(0 if torch.cuda.is_available() else 'cpu') + + def profile(self): + files = self.get_files() + + if not files: + print('No matching *.pt or *.onnx files found.') + return + + table_rows = [] + output = [] + for file in files: + engine_file = file.with_suffix('.engine') + if file.suffix in ('.pt', '.yaml'): + model = YOLO(str(file)) + model.fuse() # to report correct params and GFLOPs in model.info() + model_info = model.info() + if self.trt and self.device.type != 'cpu' and not engine_file.is_file(): + engine_file = model.export(format='engine', + half=True, + imgsz=self.imgsz, + device=self.device, + verbose=False) + onnx_file = model.export(format='onnx', + half=True, + imgsz=self.imgsz, + simplify=True, + device=self.device, + verbose=False) + elif file.suffix == '.onnx': + model_info = self.get_onnx_model_info(file) + onnx_file = file + else: + continue + + t_engine = self.profile_tensorrt_model(str(engine_file)) + t_onnx = self.profile_onnx_model(str(onnx_file)) + table_rows.append(self.generate_table_row(file.stem, t_onnx, t_engine, model_info)) + output.append(self.generate_results_dict(file.stem, t_onnx, t_engine, model_info)) + + self.print_table(table_rows) + return output + + def get_files(self): + files = [] + for path in self.paths: + path = Path(path) + if path.is_dir(): + extensions = ['*.pt', '*.onnx', '*.yaml'] + files.extend([file for ext in extensions for file in glob.glob(str(path / ext))]) + elif path.suffix in {'.pt', '.yaml'}: # add non-existing + files.append(str(path)) + else: + files.extend(glob.glob(str(path))) + + print(f'Profiling: {sorted(files)}') + return [Path(file) for file in sorted(files)] + + def get_onnx_model_info(self, onnx_file: str): + # return (num_layers, num_params, num_gradients, num_flops) + return 0.0, 0.0, 0.0, 0.0 + + def iterative_sigma_clipping(self, data, sigma=2, max_iters=3): + data = np.array(data) + for _ in range(max_iters): + mean, std = np.mean(data), np.std(data) + clipped_data = data[(data > mean - sigma * std) & (data < mean + sigma * std)] + if len(clipped_data) == len(data): + break + data = clipped_data + return data + + def profile_tensorrt_model(self, engine_file: str): + if not self.trt or not Path(engine_file).is_file(): + return 0.0, 0.0 + + # Model and input + model = YOLO(engine_file) + input_data = np.random.rand(self.imgsz, self.imgsz, 3).astype(np.float32) # must be FP32 + + # Warmup runs + elapsed = 0.0 + for _ in range(3): + start_time = time.time() + for _ in range(self.num_warmup_runs): + model(input_data, imgsz=self.imgsz, verbose=False) + elapsed = time.time() - start_time + + # Compute number of runs as higher of min_time or num_timed_runs + num_runs = max(round(self.min_time / elapsed * self.num_warmup_runs), self.num_timed_runs * 50) + + # Timed runs + run_times = [] + for _ in tqdm(range(num_runs), desc=engine_file): + results = model(input_data, imgsz=self.imgsz, verbose=False) + run_times.append(results[0].speed['inference']) # Convert to milliseconds + + run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=3) # sigma clipping + return np.mean(run_times), np.std(run_times) + + def profile_onnx_model(self, onnx_file: str): + check_requirements('onnxruntime') + import onnxruntime as ort + + # Session with either 'TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider' + sess_options = ort.SessionOptions() + sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL + sess_options.intra_op_num_threads = 8 # Limit the number of threads + sess = ort.InferenceSession(onnx_file, sess_options, providers=['CPUExecutionProvider']) + + input_tensor = sess.get_inputs()[0] + input_type = input_tensor.type + + # Mapping ONNX datatype to numpy datatype + if 'float16' in input_type: + input_dtype = np.float16 + elif 'float' in input_type: + input_dtype = np.float32 + elif 'double' in input_type: + input_dtype = np.float64 + elif 'int64' in input_type: + input_dtype = np.int64 + elif 'int32' in input_type: + input_dtype = np.int32 + else: + raise ValueError(f'Unsupported ONNX datatype {input_type}') + + input_data = np.random.rand(*input_tensor.shape).astype(input_dtype) + input_name = input_tensor.name + output_name = sess.get_outputs()[0].name + + # Warmup runs + elapsed = 0.0 + for _ in range(3): + start_time = time.time() + for _ in range(self.num_warmup_runs): + sess.run([output_name], {input_name: input_data}) + elapsed = time.time() - start_time + + # Compute number of runs as higher of min_time or num_timed_runs + num_runs = max(round(self.min_time / elapsed * self.num_warmup_runs), self.num_timed_runs) + + # Timed runs + run_times = [] + for _ in tqdm(range(num_runs), desc=onnx_file): + start_time = time.time() + sess.run([output_name], {input_name: input_data}) + run_times.append((time.time() - start_time) * 1000) # Convert to milliseconds + + run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=5) # sigma clipping + return np.mean(run_times), np.std(run_times) + + def generate_table_row(self, model_name, t_onnx, t_engine, model_info): + layers, params, gradients, flops = model_info + return f'| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.2f} ± {t_onnx[1]:.2f} ms | {t_engine[0]:.2f} ± {t_engine[1]:.2f} ms | {params / 1e6:.1f} | {flops:.1f} |' + + def generate_results_dict(self, model_name, t_onnx, t_engine, model_info): + layers, params, gradients, flops = model_info + return { + 'model/name': model_name, + 'model/parameters': params, + 'model/GFLOPs': round(flops, 3), + 'model/speed_ONNX(ms)': round(t_onnx[0], 3), + 'model/speed_TensorRT(ms)': round(t_engine[0], 3)} + + def print_table(self, table_rows): + gpu = torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'GPU' + header = f'| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
{gpu} TensorRT
(ms) | params
(M) | FLOPs
(B) |' + separator = '|-------------|---------------------|--------------------|------------------------------|-----------------------------------|------------------|-----------------|' + + print(f'\n\n{header}') + print(separator) + for row in table_rows: + print(row) + + +if __name__ == '__main__': + # Benchmark all export formats + benchmark() + + # Profiling models on ONNX and TensorRT + ProfileModels(['yolov8n.yaml', 'yolov8s.yaml']) diff --git a/ultralytics/yolo/utils/callbacks/__init__.py b/ultralytics/yolo/utils/callbacks/__init__.py new file mode 100644 index 0000000..8ad4ad6 --- /dev/null +++ b/ultralytics/yolo/utils/callbacks/__init__.py @@ -0,0 +1,5 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .base import add_integration_callbacks, default_callbacks, get_default_callbacks + +__all__ = 'add_integration_callbacks', 'default_callbacks', 'get_default_callbacks' diff --git a/ultralytics/yolo/utils/callbacks/base.py b/ultralytics/yolo/utils/callbacks/base.py new file mode 100644 index 0000000..0b17347 --- /dev/null +++ b/ultralytics/yolo/utils/callbacks/base.py @@ -0,0 +1,212 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Base callbacks +""" + +from collections import defaultdict +from copy import deepcopy + +# Trainer callbacks ---------------------------------------------------------------------------------------------------- + + +def on_pretrain_routine_start(trainer): + """Called before the pretraining routine starts.""" + pass + + +def on_pretrain_routine_end(trainer): + """Called after the pretraining routine ends.""" + pass + + +def on_train_start(trainer): + """Called when the training starts.""" + pass + + +def on_train_epoch_start(trainer): + """Called at the start of each training epoch.""" + pass + + +def on_train_batch_start(trainer): + """Called at the start of each training batch.""" + pass + + +def optimizer_step(trainer): + """Called when the optimizer takes a step.""" + pass + + +def on_before_zero_grad(trainer): + """Called before the gradients are set to zero.""" + pass + + +def on_train_batch_end(trainer): + """Called at the end of each training batch.""" + pass + + +def on_train_epoch_end(trainer): + """Called at the end of each training epoch.""" + pass + + +def on_fit_epoch_end(trainer): + """Called at the end of each fit epoch (train + val).""" + pass + + +def on_model_save(trainer): + """Called when the model is saved.""" + pass + + +def on_train_end(trainer): + """Called when the training ends.""" + pass + + +def on_params_update(trainer): + """Called when the model parameters are updated.""" + pass + + +def teardown(trainer): + """Called during the teardown of the training process.""" + pass + + +# Validator callbacks -------------------------------------------------------------------------------------------------- + + +def on_val_start(validator): + """Called when the validation starts.""" + pass + + +def on_val_batch_start(validator): + """Called at the start of each validation batch.""" + pass + + +def on_val_batch_end(validator): + """Called at the end of each validation batch.""" + pass + + +def on_val_end(validator): + """Called when the validation ends.""" + pass + + +# Predictor callbacks -------------------------------------------------------------------------------------------------- + + +def on_predict_start(predictor): + """Called when the prediction starts.""" + pass + + +def on_predict_batch_start(predictor): + """Called at the start of each prediction batch.""" + pass + + +def on_predict_batch_end(predictor): + """Called at the end of each prediction batch.""" + pass + + +def on_predict_postprocess_end(predictor): + """Called after the post-processing of the prediction ends.""" + pass + + +def on_predict_end(predictor): + """Called when the prediction ends.""" + pass + + +# Exporter callbacks --------------------------------------------------------------------------------------------------- + + +def on_export_start(exporter): + """Called when the model export starts.""" + pass + + +def on_export_end(exporter): + """Called when the model export ends.""" + pass + + +default_callbacks = { + # Run in trainer + 'on_pretrain_routine_start': [on_pretrain_routine_start], + 'on_pretrain_routine_end': [on_pretrain_routine_end], + 'on_train_start': [on_train_start], + 'on_train_epoch_start': [on_train_epoch_start], + 'on_train_batch_start': [on_train_batch_start], + 'optimizer_step': [optimizer_step], + 'on_before_zero_grad': [on_before_zero_grad], + 'on_train_batch_end': [on_train_batch_end], + 'on_train_epoch_end': [on_train_epoch_end], + 'on_fit_epoch_end': [on_fit_epoch_end], # fit = train + val + 'on_model_save': [on_model_save], + 'on_train_end': [on_train_end], + 'on_params_update': [on_params_update], + 'teardown': [teardown], + + # Run in validator + 'on_val_start': [on_val_start], + 'on_val_batch_start': [on_val_batch_start], + 'on_val_batch_end': [on_val_batch_end], + 'on_val_end': [on_val_end], + + # Run in predictor + 'on_predict_start': [on_predict_start], + 'on_predict_batch_start': [on_predict_batch_start], + 'on_predict_postprocess_end': [on_predict_postprocess_end], + 'on_predict_batch_end': [on_predict_batch_end], + 'on_predict_end': [on_predict_end], + + # Run in exporter + 'on_export_start': [on_export_start], + 'on_export_end': [on_export_end]} + + +def get_default_callbacks(): + """ + Return a copy of the default_callbacks dictionary with lists as default values. + + Returns: + (defaultdict): A defaultdict with keys from default_callbacks and empty lists as default values. + """ + return defaultdict(list, deepcopy(default_callbacks)) + + +def add_integration_callbacks(instance): + """ + Add integration callbacks from various sources to the instance's callbacks. + + Args: + instance (Trainer, Predictor, Validator, Exporter): An object with a 'callbacks' attribute that is a dictionary + of callback lists. + """ + from .clearml import callbacks as clearml_cb + from .comet import callbacks as comet_cb + from .dvc import callbacks as dvc_cb + from .hub import callbacks as hub_cb + from .mlflow import callbacks as mlflow_cb + from .neptune import callbacks as neptune_cb + from .raytune import callbacks as tune_cb + from .tensorboard import callbacks as tensorboard_cb + from .wb import callbacks as wb_cb + + for x in clearml_cb, comet_cb, hub_cb, mlflow_cb, neptune_cb, tune_cb, tensorboard_cb, wb_cb, dvc_cb: + for k, v in x.items(): + if v not in instance.callbacks[k]: # prevent duplicate callbacks addition + instance.callbacks[k].append(v) # callback[name].append(func) diff --git a/ultralytics/yolo/utils/callbacks/clearml.py b/ultralytics/yolo/utils/callbacks/clearml.py new file mode 100644 index 0000000..2cfdd73 --- /dev/null +++ b/ultralytics/yolo/utils/callbacks/clearml.py @@ -0,0 +1,143 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import re + +import matplotlib.image as mpimg +import matplotlib.pyplot as plt + +from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING +from ultralytics.yolo.utils.torch_utils import model_info_for_loggers + +try: + import clearml + from clearml import Task + from clearml.binding.frameworks.pytorch_bind import PatchPyTorchModelIO + from clearml.binding.matplotlib_bind import PatchedMatplotlib + + assert hasattr(clearml, '__version__') # verify package is not directory + assert not TESTS_RUNNING # do not log pytest +except (ImportError, AssertionError): + clearml = None + + +def _log_debug_samples(files, title='Debug Samples') -> None: + """ + Log files (images) as debug samples in the ClearML task. + + Args: + files (list): A list of file paths in PosixPath format. + title (str): A title that groups together images with the same values. + """ + task = Task.current_task() + if task: + for f in files: + if f.exists(): + it = re.search(r'_batch(\d+)', f.name) + iteration = int(it.groups()[0]) if it else 0 + task.get_logger().report_image(title=title, + series=f.name.replace(it.group(), ''), + local_path=str(f), + iteration=iteration) + + +def _log_plot(title, plot_path) -> None: + """ + Log an image as a plot in the plot section of ClearML. + + Args: + title (str): The title of the plot. + plot_path (str): The path to the saved image file. + """ + img = mpimg.imread(plot_path) + fig = plt.figure() + ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect='auto', xticks=[], yticks=[]) # no ticks + ax.imshow(img) + + Task.current_task().get_logger().report_matplotlib_figure(title=title, + series='', + figure=fig, + report_interactive=False) + + +def on_pretrain_routine_start(trainer): + """Runs at start of pretraining routine; initializes and connects/ logs task to ClearML.""" + try: + task = Task.current_task() + if task: + # Make sure the automatic pytorch and matplotlib bindings are disabled! + # We are logging these plots and model files manually in the integration + PatchPyTorchModelIO.update_current_task(None) + PatchedMatplotlib.update_current_task(None) + else: + task = Task.init(project_name=trainer.args.project or 'YOLOv8', + task_name=trainer.args.name, + tags=['YOLOv8'], + output_uri=True, + reuse_last_task_id=False, + auto_connect_frameworks={ + 'pytorch': False, + 'matplotlib': False}) + LOGGER.warning('ClearML Initialized a new task. If you want to run remotely, ' + 'please add clearml-init and connect your arguments before initializing YOLO.') + task.connect(vars(trainer.args), name='General') + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ ClearML installed but not initialized correctly, not logging this run. {e}') + + +def on_train_epoch_end(trainer): + task = Task.current_task() + + if task: + """Logs debug samples for the first epoch of YOLO training.""" + if trainer.epoch == 1: + _log_debug_samples(sorted(trainer.save_dir.glob('train_batch*.jpg')), 'Mosaic') + """Report the current training progress.""" + for k, v in trainer.validator.metrics.results_dict.items(): + task.get_logger().report_scalar('train', k, v, iteration=trainer.epoch) + + +def on_fit_epoch_end(trainer): + """Reports model information to logger at the end of an epoch.""" + task = Task.current_task() + if task: + # You should have access to the validation bboxes under jdict + task.get_logger().report_scalar(title='Epoch Time', + series='Epoch Time', + value=trainer.epoch_time, + iteration=trainer.epoch) + if trainer.epoch == 0: + for k, v in model_info_for_loggers(trainer).items(): + task.get_logger().report_single_value(k, v) + + +def on_val_end(validator): + """Logs validation results including labels and predictions.""" + if Task.current_task(): + # Log val_labels and val_pred + _log_debug_samples(sorted(validator.save_dir.glob('val*.jpg')), 'Validation') + + +def on_train_end(trainer): + """Logs final model and its name on training completion.""" + task = Task.current_task() + if task: + # Log final results, CM matrix + PR plots + files = [ + 'results.png', 'confusion_matrix.png', 'confusion_matrix_normalized.png', + *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter + for f in files: + _log_plot(title=f.stem, plot_path=f) + # Report final metrics + for k, v in trainer.validator.metrics.results_dict.items(): + task.get_logger().report_single_value(k, v) + # Log the final model + task.update_output_model(model_path=str(trainer.best), model_name=trainer.args.name, auto_delete_file=False) + + +callbacks = { + 'on_pretrain_routine_start': on_pretrain_routine_start, + 'on_train_epoch_end': on_train_epoch_end, + 'on_fit_epoch_end': on_fit_epoch_end, + 'on_val_end': on_val_end, + 'on_train_end': on_train_end} if clearml else {} diff --git a/ultralytics/yolo/utils/callbacks/comet.py b/ultralytics/yolo/utils/callbacks/comet.py new file mode 100644 index 0000000..94aeb8f --- /dev/null +++ b/ultralytics/yolo/utils/callbacks/comet.py @@ -0,0 +1,368 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import os +from pathlib import Path + +from ultralytics.yolo.utils import LOGGER, RANK, TESTS_RUNNING, ops +from ultralytics.yolo.utils.torch_utils import model_info_for_loggers + +try: + import comet_ml + + assert not TESTS_RUNNING # do not log pytest + assert hasattr(comet_ml, '__version__') # verify package is not directory +except (ImportError, AssertionError): + comet_ml = None + +# Ensures certain logging functions only run for supported tasks +COMET_SUPPORTED_TASKS = ['detect'] + +# Names of plots created by YOLOv8 that are logged to Comet +EVALUATION_PLOT_NAMES = 'F1_curve', 'P_curve', 'R_curve', 'PR_curve', 'confusion_matrix' +LABEL_PLOT_NAMES = 'labels', 'labels_correlogram' + +_comet_image_prediction_count = 0 + + +def _get_comet_mode(): + return os.getenv('COMET_MODE', 'online') + + +def _get_comet_model_name(): + return os.getenv('COMET_MODEL_NAME', 'YOLOv8') + + +def _get_eval_batch_logging_interval(): + return int(os.getenv('COMET_EVAL_BATCH_LOGGING_INTERVAL', 1)) + + +def _get_max_image_predictions_to_log(): + return int(os.getenv('COMET_MAX_IMAGE_PREDICTIONS', 100)) + + +def _scale_confidence_score(score): + scale = float(os.getenv('COMET_MAX_CONFIDENCE_SCORE', 100.0)) + return score * scale + + +def _should_log_confusion_matrix(): + return os.getenv('COMET_EVAL_LOG_CONFUSION_MATRIX', 'false').lower() == 'true' + + +def _should_log_image_predictions(): + return os.getenv('COMET_EVAL_LOG_IMAGE_PREDICTIONS', 'true').lower() == 'true' + + +def _get_experiment_type(mode, project_name): + """Return an experiment based on mode and project name.""" + if mode == 'offline': + return comet_ml.OfflineExperiment(project_name=project_name) + + return comet_ml.Experiment(project_name=project_name) + + +def _create_experiment(args): + """Ensures that the experiment object is only created in a single process during distributed training.""" + if RANK not in (-1, 0): + return + try: + comet_mode = _get_comet_mode() + _project_name = os.getenv('COMET_PROJECT_NAME', args.project) + experiment = _get_experiment_type(comet_mode, _project_name) + experiment.log_parameters(vars(args)) + experiment.log_others({ + 'eval_batch_logging_interval': _get_eval_batch_logging_interval(), + 'log_confusion_matrix_on_eval': _should_log_confusion_matrix(), + 'log_image_predictions': _should_log_image_predictions(), + 'max_image_predictions': _get_max_image_predictions_to_log(), }) + experiment.log_other('Created from', 'yolov8') + + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ Comet installed but not initialized correctly, not logging this run. {e}') + + +def _fetch_trainer_metadata(trainer): + """Returns metadata for YOLO training including epoch and asset saving status.""" + curr_epoch = trainer.epoch + 1 + + train_num_steps_per_epoch = len(trainer.train_loader.dataset) // trainer.batch_size + curr_step = curr_epoch * train_num_steps_per_epoch + final_epoch = curr_epoch == trainer.epochs + + save = trainer.args.save + save_period = trainer.args.save_period + save_interval = curr_epoch % save_period == 0 + save_assets = save and save_period > 0 and save_interval and not final_epoch + + return dict( + curr_epoch=curr_epoch, + curr_step=curr_step, + save_assets=save_assets, + final_epoch=final_epoch, + ) + + +def _scale_bounding_box_to_original_image_shape(box, resized_image_shape, original_image_shape, ratio_pad): + """YOLOv8 resizes images during training and the label values + are normalized based on this resized shape. This function rescales the + bounding box labels to the original image shape. + """ + + resized_image_height, resized_image_width = resized_image_shape + + # Convert normalized xywh format predictions to xyxy in resized scale format + box = ops.xywhn2xyxy(box, h=resized_image_height, w=resized_image_width) + # Scale box predictions from resized image scale back to original image scale + box = ops.scale_boxes(resized_image_shape, box, original_image_shape, ratio_pad) + # Convert bounding box format from xyxy to xywh for Comet logging + box = ops.xyxy2xywh(box) + # Adjust xy center to correspond top-left corner + box[:2] -= box[2:] / 2 + box = box.tolist() + + return box + + +def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, class_name_map=None): + """Format ground truth annotations for detection.""" + indices = batch['batch_idx'] == img_idx + bboxes = batch['bboxes'][indices] + if len(bboxes) == 0: + LOGGER.debug(f'COMET WARNING: Image: {image_path} has no bounding boxes labels') + return None + + cls_labels = batch['cls'][indices].squeeze(1).tolist() + if class_name_map: + cls_labels = [str(class_name_map[label]) for label in cls_labels] + + original_image_shape = batch['ori_shape'][img_idx] + resized_image_shape = batch['resized_shape'][img_idx] + ratio_pad = batch['ratio_pad'][img_idx] + + data = [] + for box, label in zip(bboxes, cls_labels): + box = _scale_bounding_box_to_original_image_shape(box, resized_image_shape, original_image_shape, ratio_pad) + data.append({ + 'boxes': [box], + 'label': f'gt_{label}', + 'score': _scale_confidence_score(1.0), }) + + return {'name': 'ground_truth', 'data': data} + + +def _format_prediction_annotations_for_detection(image_path, metadata, class_label_map=None): + """Format YOLO predictions for object detection visualization.""" + stem = image_path.stem + image_id = int(stem) if stem.isnumeric() else stem + + predictions = metadata.get(image_id) + if not predictions: + LOGGER.debug(f'COMET WARNING: Image: {image_path} has no bounding boxes predictions') + return None + + data = [] + for prediction in predictions: + boxes = prediction['bbox'] + score = _scale_confidence_score(prediction['score']) + cls_label = prediction['category_id'] + if class_label_map: + cls_label = str(class_label_map[cls_label]) + + data.append({'boxes': [boxes], 'label': cls_label, 'score': score}) + + return {'name': 'prediction', 'data': data} + + +def _fetch_annotations(img_idx, image_path, batch, prediction_metadata_map, class_label_map): + """Join the ground truth and prediction annotations if they exist.""" + ground_truth_annotations = _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, + class_label_map) + prediction_annotations = _format_prediction_annotations_for_detection(image_path, prediction_metadata_map, + class_label_map) + + annotations = [ + annotation for annotation in [ground_truth_annotations, prediction_annotations] if annotation is not None] + return [annotations] if annotations else None + + +def _create_prediction_metadata_map(model_predictions): + """Create metadata map for model predictions by groupings them based on image ID.""" + pred_metadata_map = {} + for prediction in model_predictions: + pred_metadata_map.setdefault(prediction['image_id'], []) + pred_metadata_map[prediction['image_id']].append(prediction) + + return pred_metadata_map + + +def _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch): + """Log the confusion matrix to Comet experiment.""" + conf_mat = trainer.validator.confusion_matrix.matrix + names = list(trainer.data['names'].values()) + ['background'] + experiment.log_confusion_matrix( + matrix=conf_mat, + labels=names, + max_categories=len(names), + epoch=curr_epoch, + step=curr_step, + ) + + +def _log_images(experiment, image_paths, curr_step, annotations=None): + """Logs images to the experiment with optional annotations.""" + if annotations: + for image_path, annotation in zip(image_paths, annotations): + experiment.log_image(image_path, name=image_path.stem, step=curr_step, annotations=annotation) + + else: + for image_path in image_paths: + experiment.log_image(image_path, name=image_path.stem, step=curr_step) + + +def _log_image_predictions(experiment, validator, curr_step): + """Logs predicted boxes for a single image during training.""" + global _comet_image_prediction_count + + task = validator.args.task + if task not in COMET_SUPPORTED_TASKS: + return + + jdict = validator.jdict + if not jdict: + return + + predictions_metadata_map = _create_prediction_metadata_map(jdict) + dataloader = validator.dataloader + class_label_map = validator.names + + batch_logging_interval = _get_eval_batch_logging_interval() + max_image_predictions = _get_max_image_predictions_to_log() + + for batch_idx, batch in enumerate(dataloader): + if (batch_idx + 1) % batch_logging_interval != 0: + continue + + image_paths = batch['im_file'] + for img_idx, image_path in enumerate(image_paths): + if _comet_image_prediction_count >= max_image_predictions: + return + + image_path = Path(image_path) + annotations = _fetch_annotations( + img_idx, + image_path, + batch, + predictions_metadata_map, + class_label_map, + ) + _log_images( + experiment, + [image_path], + curr_step, + annotations=annotations, + ) + _comet_image_prediction_count += 1 + + +def _log_plots(experiment, trainer): + """Logs evaluation plots and label plots for the experiment.""" + plot_filenames = [trainer.save_dir / f'{plots}.png' for plots in EVALUATION_PLOT_NAMES] + _log_images(experiment, plot_filenames, None) + + label_plot_filenames = [trainer.save_dir / f'{labels}.jpg' for labels in LABEL_PLOT_NAMES] + _log_images(experiment, label_plot_filenames, None) + + +def _log_model(experiment, trainer): + """Log the best-trained model to Comet.ml.""" + model_name = _get_comet_model_name() + experiment.log_model( + model_name, + file_or_folder=str(trainer.best), + file_name='best.pt', + overwrite=True, + ) + + +def on_pretrain_routine_start(trainer): + """Creates or resumes a CometML experiment at the start of a YOLO pre-training routine.""" + experiment = comet_ml.get_global_experiment() + is_alive = getattr(experiment, 'alive', False) + if not experiment or not is_alive: + _create_experiment(trainer.args) + + +def on_train_epoch_end(trainer): + """Log metrics and save batch images at the end of training epochs.""" + experiment = comet_ml.get_global_experiment() + if not experiment: + return + + metadata = _fetch_trainer_metadata(trainer) + curr_epoch = metadata['curr_epoch'] + curr_step = metadata['curr_step'] + + experiment.log_metrics( + trainer.label_loss_items(trainer.tloss, prefix='train'), + step=curr_step, + epoch=curr_epoch, + ) + + if curr_epoch == 1: + _log_images(experiment, trainer.save_dir.glob('train_batch*.jpg'), curr_step) + + +def on_fit_epoch_end(trainer): + """Logs model assets at the end of each epoch.""" + experiment = comet_ml.get_global_experiment() + if not experiment: + return + + metadata = _fetch_trainer_metadata(trainer) + curr_epoch = metadata['curr_epoch'] + curr_step = metadata['curr_step'] + save_assets = metadata['save_assets'] + + experiment.log_metrics(trainer.metrics, step=curr_step, epoch=curr_epoch) + experiment.log_metrics(trainer.lr, step=curr_step, epoch=curr_epoch) + if curr_epoch == 1: + experiment.log_metrics(model_info_for_loggers(trainer), step=curr_step, epoch=curr_epoch) + + if not save_assets: + return + + _log_model(experiment, trainer) + if _should_log_confusion_matrix(): + _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch) + if _should_log_image_predictions(): + _log_image_predictions(experiment, trainer.validator, curr_step) + + +def on_train_end(trainer): + """Perform operations at the end of training.""" + experiment = comet_ml.get_global_experiment() + if not experiment: + return + + metadata = _fetch_trainer_metadata(trainer) + curr_epoch = metadata['curr_epoch'] + curr_step = metadata['curr_step'] + plots = trainer.args.plots + + _log_model(experiment, trainer) + if plots: + _log_plots(experiment, trainer) + + _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch) + _log_image_predictions(experiment, trainer.validator, curr_step) + experiment.end() + + global _comet_image_prediction_count + _comet_image_prediction_count = 0 + + +callbacks = { + 'on_pretrain_routine_start': on_pretrain_routine_start, + 'on_train_epoch_end': on_train_epoch_end, + 'on_fit_epoch_end': on_fit_epoch_end, + 'on_train_end': on_train_end} if comet_ml else {} diff --git a/ultralytics/yolo/utils/callbacks/dvc.py b/ultralytics/yolo/utils/callbacks/dvc.py new file mode 100644 index 0000000..138100c --- /dev/null +++ b/ultralytics/yolo/utils/callbacks/dvc.py @@ -0,0 +1,136 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +import os + +import pkg_resources as pkg + +from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING +from ultralytics.yolo.utils.torch_utils import model_info_for_loggers + +try: + from importlib.metadata import version + + import dvclive + + assert not TESTS_RUNNING # do not log pytest + + ver = version('dvclive') + if pkg.parse_version(ver) < pkg.parse_version('2.11.0'): + LOGGER.debug(f'DVCLive is detected but version {ver} is incompatible (>=2.11 required).') + dvclive = None # noqa: F811 +except (ImportError, AssertionError, TypeError): + dvclive = None + +# DVCLive logger instance +live = None +_processed_plots = {} + +# `on_fit_epoch_end` is called on final validation (probably need to be fixed) +# for now this is the way we distinguish final evaluation of the best model vs +# last epoch validation +_training_epoch = False + + +def _logger_disabled(): + return os.getenv('ULTRALYTICS_DVC_DISABLED', 'false').lower() == 'true' + + +def _log_images(image_path, prefix=''): + if live: + live.log_image(os.path.join(prefix, image_path.name), image_path) + + +def _log_plots(plots, prefix=''): + for name, params in plots.items(): + timestamp = params['timestamp'] + if _processed_plots.get(name) != timestamp: + _log_images(name, prefix) + _processed_plots[name] = timestamp + + +def _log_confusion_matrix(validator): + targets = [] + preds = [] + matrix = validator.confusion_matrix.matrix + names = list(validator.names.values()) + if validator.confusion_matrix.task == 'detect': + names += ['background'] + + for ti, pred in enumerate(matrix.T.astype(int)): + for pi, num in enumerate(pred): + targets.extend([names[ti]] * num) + preds.extend([names[pi]] * num) + + live.log_sklearn_plot('confusion_matrix', targets, preds, name='cf.json', normalized=True) + + +def on_pretrain_routine_start(trainer): + try: + global live + if not _logger_disabled(): + live = dvclive.Live(save_dvc_exp=True, cache_images=True) + LOGGER.info( + 'DVCLive is detected and auto logging is enabled (can be disabled with `ULTRALYTICS_DVC_DISABLED=true`).' + ) + else: + LOGGER.debug('DVCLive is detected and auto logging is disabled via `ULTRALYTICS_DVC_DISABLED`.') + live = None + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ DVCLive installed but not initialized correctly, not logging this run. {e}') + + +def on_pretrain_routine_end(trainer): + _log_plots(trainer.plots, 'train') + + +def on_train_start(trainer): + if live: + live.log_params(trainer.args) + + +def on_train_epoch_start(trainer): + global _training_epoch + _training_epoch = True + + +def on_fit_epoch_end(trainer): + global _training_epoch + if live and _training_epoch: + all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics, **trainer.lr} + for metric, value in all_metrics.items(): + live.log_metric(metric, value) + + if trainer.epoch == 0: + for metric, value in model_info_for_loggers(trainer).items(): + live.log_metric(metric, value, plot=False) + + _log_plots(trainer.plots, 'train') + _log_plots(trainer.validator.plots, 'val') + + live.next_step() + _training_epoch = False + + +def on_train_end(trainer): + if live: + # At the end log the best metrics. It runs validator on the best model internally. + all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics, **trainer.lr} + for metric, value in all_metrics.items(): + live.log_metric(metric, value, plot=False) + + _log_plots(trainer.plots, 'eval') + _log_plots(trainer.validator.plots, 'eval') + _log_confusion_matrix(trainer.validator) + + if trainer.best.exists(): + live.log_artifact(trainer.best, copy=True) + + live.end() + + +callbacks = { + 'on_pretrain_routine_start': on_pretrain_routine_start, + 'on_pretrain_routine_end': on_pretrain_routine_end, + 'on_train_start': on_train_start, + 'on_train_epoch_start': on_train_epoch_start, + 'on_fit_epoch_end': on_fit_epoch_end, + 'on_train_end': on_train_end} if dvclive else {} diff --git a/ultralytics/yolo/utils/callbacks/hub.py b/ultralytics/yolo/utils/callbacks/hub.py new file mode 100644 index 0000000..e3b3427 --- /dev/null +++ b/ultralytics/yolo/utils/callbacks/hub.py @@ -0,0 +1,87 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import json +from time import time + +from ultralytics.hub.utils import PREFIX, events +from ultralytics.yolo.utils import LOGGER +from ultralytics.yolo.utils.torch_utils import model_info_for_loggers + + +def on_pretrain_routine_end(trainer): + """Logs info before starting timer for upload rate limit.""" + session = getattr(trainer, 'hub_session', None) + if session: + # Start timer for upload rate limit + LOGGER.info(f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀') + session.timers = {'metrics': time(), 'ckpt': time()} # start timer on session.rate_limit + + +def on_fit_epoch_end(trainer): + """Uploads training progress metrics at the end of each epoch.""" + session = getattr(trainer, 'hub_session', None) + if session: + # Upload metrics after val end + all_plots = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics} + if trainer.epoch == 0: + all_plots = {**all_plots, **model_info_for_loggers(trainer)} + session.metrics_queue[trainer.epoch] = json.dumps(all_plots) + if time() - session.timers['metrics'] > session.rate_limits['metrics']: + session.upload_metrics() + session.timers['metrics'] = time() # reset timer + session.metrics_queue = {} # reset queue + + +def on_model_save(trainer): + """Saves checkpoints to Ultralytics HUB with rate limiting.""" + session = getattr(trainer, 'hub_session', None) + if session: + # Upload checkpoints with rate limiting + is_best = trainer.best_fitness == trainer.fitness + if time() - session.timers['ckpt'] > session.rate_limits['ckpt']: + LOGGER.info(f'{PREFIX}Uploading checkpoint https://hub.ultralytics.com/models/{session.model_id}') + session.upload_model(trainer.epoch, trainer.last, is_best) + session.timers['ckpt'] = time() # reset timer + + +def on_train_end(trainer): + """Upload final model and metrics to Ultralytics HUB at the end of training.""" + session = getattr(trainer, 'hub_session', None) + if session: + # Upload final model and metrics with exponential standoff + LOGGER.info(f'{PREFIX}Syncing final model...') + session.upload_model(trainer.epoch, trainer.best, map=trainer.metrics.get('metrics/mAP50-95(B)', 0), final=True) + session.alive = False # stop heartbeats + LOGGER.info(f'{PREFIX}Done ✅\n' + f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀') + + +def on_train_start(trainer): + """Run events on train start.""" + events(trainer.args) + + +def on_val_start(validator): + """Runs events on validation start.""" + events(validator.args) + + +def on_predict_start(predictor): + """Run events on predict start.""" + events(predictor.args) + + +def on_export_start(exporter): + """Run events on export start.""" + events(exporter.args) + + +callbacks = { + 'on_pretrain_routine_end': on_pretrain_routine_end, + 'on_fit_epoch_end': on_fit_epoch_end, + 'on_model_save': on_model_save, + 'on_train_end': on_train_end, + 'on_train_start': on_train_start, + 'on_val_start': on_val_start, + 'on_predict_start': on_predict_start, + 'on_export_start': on_export_start} diff --git a/ultralytics/yolo/utils/callbacks/mlflow.py b/ultralytics/yolo/utils/callbacks/mlflow.py new file mode 100644 index 0000000..6c4b798 --- /dev/null +++ b/ultralytics/yolo/utils/callbacks/mlflow.py @@ -0,0 +1,71 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import os +import re +from pathlib import Path + +from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING, colorstr + +try: + import mlflow + + assert not TESTS_RUNNING # do not log pytest + assert hasattr(mlflow, '__version__') # verify package is not directory +except (ImportError, AssertionError): + mlflow = None + + +def on_pretrain_routine_end(trainer): + """Logs training parameters to MLflow.""" + global mlflow, run, run_id, experiment_name + + if os.environ.get('MLFLOW_TRACKING_URI') is None: + mlflow = None + + if mlflow: + mlflow_location = os.environ['MLFLOW_TRACKING_URI'] # "http://192.168.xxx.xxx:5000" + mlflow.set_tracking_uri(mlflow_location) + + experiment_name = os.environ.get('MLFLOW_EXPERIMENT_NAME') or trainer.args.project or '/Shared/YOLOv8' + run_name = os.environ.get('MLFLOW_RUN') or trainer.args.name + experiment = mlflow.get_experiment_by_name(experiment_name) + if experiment is None: + mlflow.create_experiment(experiment_name) + mlflow.set_experiment(experiment_name) + + prefix = colorstr('MLFlow: ') + try: + run, active_run = mlflow, mlflow.active_run() + if not active_run: + active_run = mlflow.start_run(experiment_id=experiment.experiment_id, run_name=run_name) + run_id = active_run.info.run_id + LOGGER.info(f'{prefix}Using run_id({run_id}) at {mlflow_location}') + run.log_params(vars(trainer.model.args)) + except Exception as err: + LOGGER.error(f'{prefix}Failing init - {repr(err)}') + LOGGER.warning(f'{prefix}Continuing without Mlflow') + + +def on_fit_epoch_end(trainer): + """Logs training metrics to Mlflow.""" + if mlflow: + metrics_dict = {f"{re.sub('[()]', '', k)}": float(v) for k, v in trainer.metrics.items()} + run.log_metrics(metrics=metrics_dict, step=trainer.epoch) + + +def on_train_end(trainer): + """Called at end of train loop to log model artifact info.""" + if mlflow: + root_dir = Path(__file__).resolve().parents[3] + run.log_artifact(trainer.last) + run.log_artifact(trainer.best) + run.pyfunc.log_model(artifact_path=experiment_name, + code_path=[str(root_dir)], + artifacts={'model_path': str(trainer.save_dir)}, + python_model=run.pyfunc.PythonModel()) + + +callbacks = { + 'on_pretrain_routine_end': on_pretrain_routine_end, + 'on_fit_epoch_end': on_fit_epoch_end, + 'on_train_end': on_train_end} if mlflow else {} diff --git a/ultralytics/yolo/utils/callbacks/neptune.py b/ultralytics/yolo/utils/callbacks/neptune.py new file mode 100644 index 0000000..be64341 --- /dev/null +++ b/ultralytics/yolo/utils/callbacks/neptune.py @@ -0,0 +1,103 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import matplotlib.image as mpimg +import matplotlib.pyplot as plt + +from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING +from ultralytics.yolo.utils.torch_utils import model_info_for_loggers + +try: + import neptune + from neptune.types import File + + assert not TESTS_RUNNING # do not log pytest + assert hasattr(neptune, '__version__') +except (ImportError, AssertionError): + neptune = None + +run = None # NeptuneAI experiment logger instance + + +def _log_scalars(scalars, step=0): + """Log scalars to the NeptuneAI experiment logger.""" + if run: + for k, v in scalars.items(): + run[k].append(value=v, step=step) + + +def _log_images(imgs_dict, group=''): + """Log scalars to the NeptuneAI experiment logger.""" + if run: + for k, v in imgs_dict.items(): + run[f'{group}/{k}'].upload(File(v)) + + +def _log_plot(title, plot_path): + """Log plots to the NeptuneAI experiment logger.""" + """ + Log image as plot in the plot section of NeptuneAI + + arguments: + title (str) Title of the plot + plot_path (PosixPath or str) Path to the saved image file + """ + img = mpimg.imread(plot_path) + fig = plt.figure() + ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect='auto', xticks=[], yticks=[]) # no ticks + ax.imshow(img) + run[f'Plots/{title}'].upload(fig) + + +def on_pretrain_routine_start(trainer): + """Callback function called before the training routine starts.""" + try: + global run + run = neptune.init_run(project=trainer.args.project or 'YOLOv8', name=trainer.args.name, tags=['YOLOv8']) + run['Configuration/Hyperparameters'] = {k: '' if v is None else v for k, v in vars(trainer.args).items()} + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ NeptuneAI installed but not initialized correctly, not logging this run. {e}') + + +def on_train_epoch_end(trainer): + """Callback function called at end of each training epoch.""" + _log_scalars(trainer.label_loss_items(trainer.tloss, prefix='train'), trainer.epoch + 1) + _log_scalars(trainer.lr, trainer.epoch + 1) + if trainer.epoch == 1: + _log_images({f.stem: str(f) for f in trainer.save_dir.glob('train_batch*.jpg')}, 'Mosaic') + + +def on_fit_epoch_end(trainer): + """Callback function called at end of each fit (train+val) epoch.""" + if run and trainer.epoch == 0: + run['Configuration/Model'] = model_info_for_loggers(trainer) + _log_scalars(trainer.metrics, trainer.epoch + 1) + + +def on_val_end(validator): + """Callback function called at end of each validation.""" + if run: + # Log val_labels and val_pred + _log_images({f.stem: str(f) for f in validator.save_dir.glob('val*.jpg')}, 'Validation') + + +def on_train_end(trainer): + """Callback function called at end of training.""" + if run: + # Log final results, CM matrix + PR plots + files = [ + 'results.png', 'confusion_matrix.png', 'confusion_matrix_normalized.png', + *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter + for f in files: + _log_plot(title=f.stem, plot_path=f) + # Log the final model + run[f'weights/{trainer.args.name or trainer.args.task}/{str(trainer.best.name)}'].upload(File(str( + trainer.best))) + + +callbacks = { + 'on_pretrain_routine_start': on_pretrain_routine_start, + 'on_train_epoch_end': on_train_epoch_end, + 'on_fit_epoch_end': on_fit_epoch_end, + 'on_val_end': on_val_end, + 'on_train_end': on_train_end} if neptune else {} diff --git a/ultralytics/yolo/utils/callbacks/raytune.py b/ultralytics/yolo/utils/callbacks/raytune.py new file mode 100644 index 0000000..1f53225 --- /dev/null +++ b/ultralytics/yolo/utils/callbacks/raytune.py @@ -0,0 +1,20 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +try: + import ray + from ray import tune + from ray.air import session +except (ImportError, AssertionError): + tune = None + + +def on_fit_epoch_end(trainer): + """Sends training metrics to Ray Tune at end of each epoch.""" + if ray.tune.is_session_enabled(): + metrics = trainer.metrics + metrics['epoch'] = trainer.epoch + session.report(metrics) + + +callbacks = { + 'on_fit_epoch_end': on_fit_epoch_end, } if tune else {} diff --git a/ultralytics/yolo/utils/callbacks/tensorboard.py b/ultralytics/yolo/utils/callbacks/tensorboard.py new file mode 100644 index 0000000..a436b9c --- /dev/null +++ b/ultralytics/yolo/utils/callbacks/tensorboard.py @@ -0,0 +1,47 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING, colorstr + +try: + from torch.utils.tensorboard import SummaryWriter + + assert not TESTS_RUNNING # do not log pytest +except (ImportError, AssertionError): + SummaryWriter = None + +writer = None # TensorBoard SummaryWriter instance + + +def _log_scalars(scalars, step=0): + """Logs scalar values to TensorBoard.""" + if writer: + for k, v in scalars.items(): + writer.add_scalar(k, v, step) + + +def on_pretrain_routine_start(trainer): + """Initialize TensorBoard logging with SummaryWriter.""" + if SummaryWriter: + try: + global writer + writer = SummaryWriter(str(trainer.save_dir)) + prefix = colorstr('TensorBoard: ') + LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/") + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}') + + +def on_batch_end(trainer): + """Logs scalar statistics at the end of a training batch.""" + _log_scalars(trainer.label_loss_items(trainer.tloss, prefix='train'), trainer.epoch + 1) + + +def on_fit_epoch_end(trainer): + """Logs epoch metrics at end of training epoch.""" + _log_scalars(trainer.metrics, trainer.epoch + 1) + + +callbacks = { + 'on_pretrain_routine_start': on_pretrain_routine_start, + 'on_fit_epoch_end': on_fit_epoch_end, + 'on_batch_end': on_batch_end} diff --git a/ultralytics/yolo/utils/callbacks/wb.py b/ultralytics/yolo/utils/callbacks/wb.py new file mode 100644 index 0000000..4b4c29b --- /dev/null +++ b/ultralytics/yolo/utils/callbacks/wb.py @@ -0,0 +1,60 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +from ultralytics.yolo.utils import TESTS_RUNNING +from ultralytics.yolo.utils.torch_utils import model_info_for_loggers + +try: + import wandb as wb + + assert hasattr(wb, '__version__') + assert not TESTS_RUNNING # do not log pytest +except (ImportError, AssertionError): + wb = None + +_processed_plots = {} + + +def _log_plots(plots, step): + for name, params in plots.items(): + timestamp = params['timestamp'] + if _processed_plots.get(name, None) != timestamp: + wb.run.log({name.stem: wb.Image(str(name))}, step=step) + _processed_plots[name] = timestamp + + +def on_pretrain_routine_start(trainer): + """Initiate and start project if module is present.""" + wb.run or wb.init(project=trainer.args.project or 'YOLOv8', name=trainer.args.name, config=vars(trainer.args)) + + +def on_fit_epoch_end(trainer): + """Logs training metrics and model information at the end of an epoch.""" + wb.run.log(trainer.metrics, step=trainer.epoch + 1) + _log_plots(trainer.plots, step=trainer.epoch + 1) + _log_plots(trainer.validator.plots, step=trainer.epoch + 1) + if trainer.epoch == 0: + wb.run.log(model_info_for_loggers(trainer), step=trainer.epoch + 1) + + +def on_train_epoch_end(trainer): + """Log metrics and save images at the end of each training epoch.""" + wb.run.log(trainer.label_loss_items(trainer.tloss, prefix='train'), step=trainer.epoch + 1) + wb.run.log(trainer.lr, step=trainer.epoch + 1) + if trainer.epoch == 1: + _log_plots(trainer.plots, step=trainer.epoch + 1) + + +def on_train_end(trainer): + """Save the best model as an artifact at end of training.""" + _log_plots(trainer.validator.plots, step=trainer.epoch + 1) + _log_plots(trainer.plots, step=trainer.epoch + 1) + art = wb.Artifact(type='model', name=f'run_{wb.run.id}_model') + if trainer.best.exists(): + art.add_file(trainer.best) + wb.run.log_artifact(art, aliases=['best']) + + +callbacks = { + 'on_pretrain_routine_start': on_pretrain_routine_start, + 'on_train_epoch_end': on_train_epoch_end, + 'on_fit_epoch_end': on_fit_epoch_end, + 'on_train_end': on_train_end} if wb else {} diff --git a/ultralytics/yolo/utils/checks.py b/ultralytics/yolo/utils/checks.py new file mode 100644 index 0000000..5908d3b --- /dev/null +++ b/ultralytics/yolo/utils/checks.py @@ -0,0 +1,460 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +import contextlib +import glob +import inspect +import math +import os +import platform +import re +import shutil +import subprocess +import time +from pathlib import Path +from typing import Optional + +import cv2 +import numpy as np +import pkg_resources as pkg +import psutil +import requests +import torch +from matplotlib import font_manager + +from ultralytics.yolo.utils import (AUTOINSTALL, LOGGER, ONLINE, ROOT, USER_CONFIG_DIR, ThreadingLocked, TryExcept, + clean_url, colorstr, downloads, emojis, is_colab, is_docker, is_jupyter, is_kaggle, + is_online, is_pip_package, url2file) + + +def is_ascii(s) -> bool: + """ + Check if a string is composed of only ASCII characters. + + Args: + s (str): String to be checked. + + Returns: + bool: True if the string is composed only of ASCII characters, False otherwise. + """ + # Convert list, tuple, None, etc. to string + s = str(s) + + # Check if the string is composed of only ASCII characters + return all(ord(c) < 128 for c in s) + + +def check_imgsz(imgsz, stride=32, min_dim=1, max_dim=2, floor=0): + """ + Verify image size is a multiple of the given stride in each dimension. If the image size is not a multiple of the + stride, update it to the nearest multiple of the stride that is greater than or equal to the given floor value. + + Args: + imgsz (int | cList[int]): Image size. + stride (int): Stride value. + min_dim (int): Minimum number of dimensions. + floor (int): Minimum allowed value for image size. + + Returns: + (List[int]): Updated image size. + """ + # Convert stride to integer if it is a tensor + stride = int(stride.max() if isinstance(stride, torch.Tensor) else stride) + + # Convert image size to list if it is an integer + if isinstance(imgsz, int): + imgsz = [imgsz] + elif isinstance(imgsz, (list, tuple)): + imgsz = list(imgsz) + else: + raise TypeError(f"'imgsz={imgsz}' is of invalid type {type(imgsz).__name__}. " + f"Valid imgsz types are int i.e. 'imgsz=640' or list i.e. 'imgsz=[640,640]'") + + # Apply max_dim + if len(imgsz) > max_dim: + msg = "'train' and 'val' imgsz must be an integer, while 'predict' and 'export' imgsz may be a [h, w] list " \ + "or an integer, i.e. 'yolo export imgsz=640,480' or 'yolo export imgsz=640'" + if max_dim != 1: + raise ValueError(f'imgsz={imgsz} is not a valid image size. {msg}') + LOGGER.warning(f"WARNING ⚠️ updating to 'imgsz={max(imgsz)}'. {msg}") + imgsz = [max(imgsz)] + # Make image size a multiple of the stride + sz = [max(math.ceil(x / stride) * stride, floor) for x in imgsz] + + # Print warning message if image size was updated + if sz != imgsz: + LOGGER.warning(f'WARNING ⚠️ imgsz={imgsz} must be multiple of max stride {stride}, updating to {sz}') + + # Add missing dimensions if necessary + sz = [sz[0], sz[0]] if min_dim == 2 and len(sz) == 1 else sz[0] if min_dim == 1 and len(sz) == 1 else sz + + return sz + + +def check_version(current: str = '0.0.0', + minimum: str = '0.0.0', + name: str = 'version ', + pinned: bool = False, + hard: bool = False, + verbose: bool = False) -> bool: + """ + Check current version against the required minimum version. + + Args: + current (str): Current version. + minimum (str): Required minimum version. + name (str): Name to be used in warning message. + pinned (bool): If True, versions must match exactly. If False, minimum version must be satisfied. + hard (bool): If True, raise an AssertionError if the minimum version is not met. + verbose (bool): If True, print warning message if minimum version is not met. + + Returns: + (bool): True if minimum version is met, False otherwise. + """ + current, minimum = (pkg.parse_version(x) for x in (current, minimum)) + result = (current == minimum) if pinned else (current >= minimum) # bool + warning_message = f'WARNING ⚠️ {name}{minimum} is required by YOLOv8, but {name}{current} is currently installed' + if hard: + assert result, emojis(warning_message) # assert min requirements met + if verbose and not result: + LOGGER.warning(warning_message) + return result + + +def check_latest_pypi_version(package_name='ultralytics'): + """ + Returns the latest version of a PyPI package without downloading or installing it. + + Parameters: + package_name (str): The name of the package to find the latest version for. + + Returns: + (str): The latest version of the package. + """ + with contextlib.suppress(Exception): + requests.packages.urllib3.disable_warnings() # Disable the InsecureRequestWarning + response = requests.get(f'https://pypi.org/pypi/{package_name}/json', timeout=3) + if response.status_code == 200: + return response.json()['info']['version'] + return None + + +def check_pip_update_available(): + """ + Checks if a new version of the ultralytics package is available on PyPI. + + Returns: + (bool): True if an update is available, False otherwise. + """ + if ONLINE and is_pip_package(): + with contextlib.suppress(Exception): + from ultralytics import __version__ + latest = check_latest_pypi_version() + if pkg.parse_version(__version__) < pkg.parse_version(latest): # update is available + LOGGER.info(f'New https://pypi.org/project/ultralytics/{latest} available 😃 ' + f"Update with 'pip install -U ultralytics'") + return True + return False + + +@ThreadingLocked() +def check_font(font='Arial.ttf'): + """ + Find font locally or download to user's configuration directory if it does not already exist. + + Args: + font (str): Path or name of font. + + Returns: + file (Path): Resolved font file path. + """ + name = Path(font).name + + # Check USER_CONFIG_DIR + file = USER_CONFIG_DIR / name + if file.exists(): + return file + + # Check system fonts + matches = [s for s in font_manager.findSystemFonts() if font in s] + if any(matches): + return matches[0] + + # Download to USER_CONFIG_DIR if missing + url = f'https://ultralytics.com/assets/{name}' + if downloads.is_url(url): + downloads.safe_download(url=url, file=file) + return file + + +def check_python(minimum: str = '3.7.0') -> bool: + """ + Check current python version against the required minimum version. + + Args: + minimum (str): Required minimum version of python. + + Returns: + None + """ + return check_version(platform.python_version(), minimum, name='Python ', hard=True) + + +@TryExcept() +def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=(), install=True, cmds=''): + """ + Check if installed dependencies meet YOLOv8 requirements and attempt to auto-update if needed. + + Args: + requirements (Union[Path, str, List[str]]): Path to a requirements.txt file, a single package requirement as a + string, or a list of package requirements as strings. + exclude (Tuple[str]): Tuple of package names to exclude from checking. + install (bool): If True, attempt to auto-update packages that don't meet requirements. + cmds (str): Additional commands to pass to the pip install command when auto-updating. + """ + prefix = colorstr('red', 'bold', 'requirements:') + check_python() # check python version + check_torchvision() # check torch-torchvision compatibility + file = None + if isinstance(requirements, Path): # requirements.txt file + file = requirements.resolve() + assert file.exists(), f'{prefix} {file} not found, check failed.' + with file.open() as f: + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] + elif isinstance(requirements, str): + requirements = [requirements] + + s = '' # console string + n = 0 # number of packages updates + for r in requirements: + rmin = r.split('/')[-1].replace('.git', '') # replace git+https://org/repo.git -> 'repo' + try: + pkg.require(rmin) + except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met + try: # attempt to import (slower but more accurate) + import importlib + importlib.import_module(next(pkg.parse_requirements(rmin)).name) + except ImportError: + s += f'"{r}" ' + n += 1 + + if s: + if install and AUTOINSTALL: # check environment variable + pkgs = file or requirements # missing packages + LOGGER.info(f"{prefix} Ultralytics requirement{'s' * (n > 1)} {pkgs} not found, attempting AutoUpdate...") + try: + t = time.time() + assert is_online(), 'AutoUpdate skipped (offline)' + LOGGER.info(subprocess.check_output(f'pip install --no-cache {s} {cmds}', shell=True).decode()) + dt = time.time() - t + LOGGER.info( + f"{prefix} AutoUpdate success ✅ {dt:.1f}s, installed {n} package{'s' * (n > 1)}: {pkgs}\n" + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n") + except Exception as e: + LOGGER.warning(f'{prefix} ❌ {e}') + return False + else: + return False + + return True + + +def check_torchvision(): + """ + Checks the installed versions of PyTorch and Torchvision to ensure they're compatible. + + This function checks the installed versions of PyTorch and Torchvision, and warns if they're incompatible according + to the provided compatibility table based on https://github.com/pytorch/vision#installation. The + compatibility table is a dictionary where the keys are PyTorch versions and the values are lists of compatible + Torchvision versions. + """ + + import torchvision + + # Compatibility table + compatibility_table = {'2.0': ['0.15'], '1.13': ['0.14'], '1.12': ['0.13']} + + # Extract only the major and minor versions + v_torch = '.'.join(torch.__version__.split('+')[0].split('.')[:2]) + v_torchvision = '.'.join(torchvision.__version__.split('+')[0].split('.')[:2]) + + if v_torch in compatibility_table: + compatible_versions = compatibility_table[v_torch] + if all(pkg.parse_version(v_torchvision) != pkg.parse_version(v) for v in compatible_versions): + print(f'WARNING ⚠️ torchvision=={v_torchvision} is incompatible with torch=={v_torch}.\n' + f"Run 'pip install torchvision=={compatible_versions[0]}' to fix torchvision or " + "'pip install -U torch torchvision' to update both.\n" + 'For a full compatibility table see https://github.com/pytorch/vision#installation') + + +def check_suffix(file='yolov8n.pt', suffix='.pt', msg=''): + """Check file(s) for acceptable suffix.""" + if file and suffix: + if isinstance(suffix, str): + suffix = (suffix, ) + for f in file if isinstance(file, (list, tuple)) else [file]: + s = Path(f).suffix.lower().strip() # file suffix + if len(s): + assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}, not {s}' + + +def check_yolov5u_filename(file: str, verbose: bool = True): + """Replace legacy YOLOv5 filenames with updated YOLOv5u filenames.""" + if ('yolov3' in file or 'yolov5' in file) and 'u' not in file: + original_file = file + file = re.sub(r'(.*yolov5([nsmlx]))\.pt', '\\1u.pt', file) # i.e. yolov5n.pt -> yolov5nu.pt + file = re.sub(r'(.*yolov5([nsmlx])6)\.pt', '\\1u.pt', file) # i.e. yolov5n6.pt -> yolov5n6u.pt + file = re.sub(r'(.*yolov3(|-tiny|-spp))\.pt', '\\1u.pt', file) # i.e. yolov3-spp.pt -> yolov3-sppu.pt + if file != original_file and verbose: + LOGGER.info(f"PRO TIP 💡 Replace 'model={original_file}' with new 'model={file}'.\nYOLOv5 'u' models are " + f'trained with https://github.com/ultralytics/ultralytics and feature improved performance vs ' + f'standard YOLOv5 models trained with https://github.com/ultralytics/yolov5.\n') + return file + + +def check_file(file, suffix='', download=True, hard=True): + """Search/download file (if necessary) and return path.""" + check_suffix(file, suffix) # optional + file = str(file).strip() # convert to string and strip spaces + file = check_yolov5u_filename(file) # yolov5n -> yolov5nu + if not file or ('://' not in file and Path(file).exists()): # exists ('://' check required in Windows Python<3.10) + return file + elif download and file.lower().startswith(('https://', 'http://', 'rtsp://', 'rtmp://')): # download + url = file # warning: Pathlib turns :// -> :/ + file = url2file(file) # '%2F' to '/', split https://url.com/file.txt?auth + if Path(file).exists(): + LOGGER.info(f'Found {clean_url(url)} locally at {file}') # file already exists + else: + downloads.safe_download(url=url, file=file, unzip=False) + return file + else: # search + files = [] + for d in 'models', 'datasets', 'tracker/cfg', 'yolo/cfg': # search directories + files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file + if not files and hard: + raise FileNotFoundError(f"'{file}' does not exist") + elif len(files) > 1 and hard: + raise FileNotFoundError(f"Multiple files match '{file}', specify exact path: {files}") + return files[0] if len(files) else [] # return file + + +def check_yaml(file, suffix=('.yaml', '.yml'), hard=True): + """Search/download YAML file (if necessary) and return path, checking suffix.""" + return check_file(file, suffix, hard=hard) + + +def check_imshow(warn=False): + """Check if environment supports image displays.""" + try: + assert not any((is_colab(), is_kaggle(), is_docker())) + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + if warn: + LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}') + return False + + +def check_yolo(verbose=True, device=''): + """Return a human-readable YOLO software and hardware summary.""" + from ultralytics.yolo.utils.torch_utils import select_device + + if is_jupyter(): + if check_requirements('wandb', install=False): + os.system('pip uninstall -y wandb') # uninstall wandb: unwanted account creation prompt with infinite hang + if is_colab(): + shutil.rmtree('sample_data', ignore_errors=True) # remove colab /sample_data directory + + if verbose: + # System info + gib = 1 << 30 # bytes per GiB + ram = psutil.virtual_memory().total + total, used, free = shutil.disk_usage('/') + s = f'({os.cpu_count()} CPUs, {ram / gib:.1f} GB RAM, {(total - free) / gib:.1f}/{total / gib:.1f} GB disk)' + with contextlib.suppress(Exception): # clear display if ipython is installed + from IPython import display + display.clear_output() + else: + s = '' + + select_device(device=device, newline=False) + LOGGER.info(f'Setup complete ✅ {s}') + + +def check_amp(model): + """ + This function checks the PyTorch Automatic Mixed Precision (AMP) functionality of a YOLOv8 model. + If the checks fail, it means there are anomalies with AMP on the system that may cause NaN losses or zero-mAP + results, so AMP will be disabled during training. + + Args: + model (nn.Module): A YOLOv8 model instance. + + Returns: + (bool): Returns True if the AMP functionality works correctly with YOLOv8 model, else False. + + Raises: + AssertionError: If the AMP checks fail, indicating anomalies with the AMP functionality on the system. + """ + device = next(model.parameters()).device # get model device + if device.type in ('cpu', 'mps'): + return False # AMP only used on CUDA devices + + def amp_allclose(m, im): + """All close FP32 vs AMP results.""" + a = m(im, device=device, verbose=False)[0].boxes.data # FP32 inference + with torch.cuda.amp.autocast(True): + b = m(im, device=device, verbose=False)[0].boxes.data # AMP inference + del m + return a.shape == b.shape and torch.allclose(a, b.float(), atol=0.5) # close to 0.5 absolute tolerance + + f = ROOT / 'assets/bus.jpg' # image to check + im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if ONLINE else np.ones((640, 640, 3)) + prefix = colorstr('AMP: ') + LOGGER.info(f'{prefix}running Automatic Mixed Precision (AMP) checks with YOLOv8n...') + warning_msg = "Setting 'amp=True'. If you experience zero-mAP or NaN losses you can disable AMP with amp=False." + try: + from ultralytics import YOLO + assert amp_allclose(YOLO('yolov8n.pt'), im) + LOGGER.info(f'{prefix}checks passed ✅') + except ConnectionError: + LOGGER.warning(f'{prefix}checks skipped ⚠️, offline and unable to download YOLOv8n. {warning_msg}') + except (AttributeError, ModuleNotFoundError): + LOGGER.warning( + f'{prefix}checks skipped ⚠️. Unable to load YOLOv8n due to possible Ultralytics package modifications. {warning_msg}' + ) + except AssertionError: + LOGGER.warning(f'{prefix}checks failed ❌. Anomalies were detected with AMP on your system that may lead to ' + f'NaN losses or zero-mAP results, so AMP will be disabled during training.') + return False + return True + + +def git_describe(path=ROOT): # path must be a directory + """Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe.""" + try: + assert (Path(path) / '.git').is_dir() + return subprocess.check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] + except AssertionError: + return '' + + +def print_args(args: Optional[dict] = None, show_file=True, show_func=False): + """Print function arguments (optional args dict).""" + + def strip_auth(v): + """Clean longer Ultralytics HUB URLs by stripping potential authentication information.""" + return clean_url(v) if (isinstance(v, str) and v.startswith('http') and len(v) > 100) else v + + x = inspect.currentframe().f_back # previous frame + file, _, func, _, _ = inspect.getframeinfo(x) + if args is None: # get args automatically + args, _, _, frm = inspect.getargvalues(x) + args = {k: v for k, v in frm.items() if k in args} + try: + file = Path(file).resolve().relative_to(ROOT).with_suffix('') + except ValueError: + file = Path(file).stem + s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') + LOGGER.info(colorstr(s) + ', '.join(f'{k}={strip_auth(v)}' for k, v in args.items())) diff --git a/ultralytics/yolo/utils/dist.py b/ultralytics/yolo/utils/dist.py new file mode 100644 index 0000000..6de029f --- /dev/null +++ b/ultralytics/yolo/utils/dist.py @@ -0,0 +1,67 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import os +import re +import shutil +import socket +import sys +import tempfile +from pathlib import Path + +from . import USER_CONFIG_DIR +from .torch_utils import TORCH_1_9 + + +def find_free_network_port() -> int: + """Finds a free port on localhost. + + It is useful in single-node training when we don't want to connect to a real main node but have to set the + `MASTER_PORT` environment variable. + """ + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('127.0.0.1', 0)) + return s.getsockname()[1] # port + + +def generate_ddp_file(trainer): + """Generates a DDP file and returns its file name.""" + module, name = f'{trainer.__class__.__module__}.{trainer.__class__.__name__}'.rsplit('.', 1) + + content = f'''overrides = {vars(trainer.args)} \nif __name__ == "__main__": + from {module} import {name} + from ultralytics.yolo.utils import DEFAULT_CFG_DICT + + cfg = DEFAULT_CFG_DICT.copy() + cfg.update(save_dir='') # handle the extra key 'save_dir' + trainer = {name}(cfg=cfg, overrides=overrides) + trainer.train()''' + (USER_CONFIG_DIR / 'DDP').mkdir(exist_ok=True) + with tempfile.NamedTemporaryFile(prefix='_temp_', + suffix=f'{id(trainer)}.py', + mode='w+', + encoding='utf-8', + dir=USER_CONFIG_DIR / 'DDP', + delete=False) as file: + file.write(content) + return file.name + + +def generate_ddp_command(world_size, trainer): + """Generates and returns command for distributed training.""" + import __main__ # noqa local import to avoid https://github.com/Lightning-AI/lightning/issues/15218 + if not trainer.resume: + shutil.rmtree(trainer.save_dir) # remove the save_dir + file = str(Path(sys.argv[0]).resolve()) + safe_pattern = re.compile(r'^[a-zA-Z0-9_. /\\-]{1,128}$') # allowed characters and maximum of 100 characters + if not (safe_pattern.match(file) and Path(file).exists() and file.endswith('.py')): # using CLI + file = generate_ddp_file(trainer) + dist_cmd = 'torch.distributed.run' if TORCH_1_9 else 'torch.distributed.launch' + port = find_free_network_port() + cmd = [sys.executable, '-m', dist_cmd, '--nproc_per_node', f'{world_size}', '--master_port', f'{port}', file] + return cmd, file + + +def ddp_cleanup(trainer, file): + """Delete temp file if created.""" + if f'{id(trainer)}.py' in file: # if temp_file suffix in file + os.remove(file) diff --git a/ultralytics/yolo/utils/downloads.py b/ultralytics/yolo/utils/downloads.py new file mode 100644 index 0000000..c131921 --- /dev/null +++ b/ultralytics/yolo/utils/downloads.py @@ -0,0 +1,271 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import contextlib +import shutil +import subprocess +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from urllib import parse, request +from zipfile import BadZipFile, ZipFile, is_zipfile + +import requests +import torch +from tqdm import tqdm + +from ultralytics.yolo.utils import LOGGER, checks, clean_url, emojis, is_online, url2file + +GITHUB_ASSET_NAMES = [f'yolov8{k}{suffix}.pt' for k in 'nsmlx' for suffix in ('', '6', '-cls', '-seg', '-pose')] + \ + [f'yolov5{k}u.pt' for k in 'nsmlx'] + \ + [f'yolov3{k}u.pt' for k in ('', '-spp', '-tiny')] + \ + [f'yolo_nas_{k}.pt' for k in 'sml'] + \ + [f'sam_{k}.pt' for k in 'bl'] + \ + [f'FastSAM-{k}.pt' for k in 'sx'] + \ + [f'rtdetr-{k}.pt' for k in 'lx'] + \ + ['mobile_sam.pt'] +GITHUB_ASSET_STEMS = [Path(k).stem for k in GITHUB_ASSET_NAMES] + + +def is_url(url, check=True): + """Check if string is URL and check if URL exists.""" + with contextlib.suppress(Exception): + url = str(url) + result = parse.urlparse(url) + assert all([result.scheme, result.netloc]) # check if is url + if check: + with request.urlopen(url) as response: + return response.getcode() == 200 # check if exists online + return True + return False + + +def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX'), exist_ok=False): + """ + Unzips a *.zip file to the specified path, excluding files containing strings in the exclude list. + + If the zipfile does not contain a single top-level directory, the function will create a new + directory with the same name as the zipfile (without the extension) to extract its contents. + If a path is not provided, the function will use the parent directory of the zipfile as the default path. + + Args: + file (str): The path to the zipfile to be extracted. + path (str, optional): The path to extract the zipfile to. Defaults to None. + exclude (tuple, optional): A tuple of filename strings to be excluded. Defaults to ('.DS_Store', '__MACOSX'). + exist_ok (bool, optional): Whether to overwrite existing contents if they exist. Defaults to False. + + Raises: + BadZipFile: If the provided file does not exist or is not a valid zipfile. + + Returns: + (Path): The path to the directory where the zipfile was extracted. + """ + if not (Path(file).exists() and is_zipfile(file)): + raise BadZipFile(f"File '{file}' does not exist or is a bad zip file.") + if path is None: + path = Path(file).parent # default path + + # Unzip the file contents + with ZipFile(file) as zipObj: + file_list = [f for f in zipObj.namelist() if all(x not in f for x in exclude)] + top_level_dirs = {Path(f).parts[0] for f in file_list} + + if len(top_level_dirs) > 1 or not file_list[0].endswith('/'): + path = Path(path) / Path(file).stem # define new unzip directory + + # Check if destination directory already exists and contains files + extract_path = Path(path) / list(top_level_dirs)[0] + if extract_path.exists() and any(extract_path.iterdir()) and not exist_ok: + # If it exists and is not empty, return the path without unzipping + LOGGER.info(f'Skipping {file} unzip (already unzipped)') + return path + + for f in file_list: + zipObj.extract(f, path=path) + + return path # return unzip dir + + +def check_disk_space(url='https://ultralytics.com/assets/coco128.zip', sf=1.5, hard=True): + """ + Check if there is sufficient disk space to download and store a file. + + Args: + url (str, optional): The URL to the file. Defaults to 'https://ultralytics.com/assets/coco128.zip'. + sf (float, optional): Safety factor, the multiplier for the required free space. Defaults to 2.0. + hard (bool, optional): Whether to throw an error or not on insufficient disk space. Defaults to True. + + Returns: + (bool): True if there is sufficient disk space, False otherwise. + """ + with contextlib.suppress(Exception): + gib = 1 << 30 # bytes per GiB + data = int(requests.head(url).headers['Content-Length']) / gib # file size (GB) + total, used, free = (x / gib for x in shutil.disk_usage('/')) # bytes + if data * sf < free: + return True # sufficient space + + # Insufficient space + text = (f'WARNING ⚠️ Insufficient free disk space {free:.1f} GB < {data * sf:.3f} GB required, ' + f'Please free {data * sf - free:.1f} GB additional disk space and try again.') + if hard: + raise MemoryError(text) + else: + LOGGER.warning(text) + return False + + # Pass if error + return True + + +def safe_download(url, + file=None, + dir=None, + unzip=True, + delete=False, + curl=False, + retry=3, + min_bytes=1E0, + progress=True): + """ + Downloads files from a URL, with options for retrying, unzipping, and deleting the downloaded file. + + Args: + url (str): The URL of the file to be downloaded. + file (str, optional): The filename of the downloaded file. + If not provided, the file will be saved with the same name as the URL. + dir (str, optional): The directory to save the downloaded file. + If not provided, the file will be saved in the current working directory. + unzip (bool, optional): Whether to unzip the downloaded file. Default: True. + delete (bool, optional): Whether to delete the downloaded file after unzipping. Default: False. + curl (bool, optional): Whether to use curl command line tool for downloading. Default: False. + retry (int, optional): The number of times to retry the download in case of failure. Default: 3. + min_bytes (float, optional): The minimum number of bytes that the downloaded file should have, to be considered + a successful download. Default: 1E0. + progress (bool, optional): Whether to display a progress bar during the download. Default: True. + """ + f = dir / url2file(url) if dir else Path(file) # URL converted to filename + if '://' not in str(url) and Path(url).is_file(): # URL exists ('://' check required in Windows Python<3.10) + f = Path(url) # filename + elif not f.is_file(): # URL and file do not exist + assert dir or file, 'dir or file required for download' + f = dir / url2file(url) if dir else Path(file) + desc = f'Downloading {clean_url(url)} to {f}' + LOGGER.info(f'{desc}...') + f.parent.mkdir(parents=True, exist_ok=True) # make directory if missing + check_disk_space(url) + for i in range(retry + 1): + try: + if curl or i > 0: # curl download with retry, continue + s = 'sS' * (not progress) # silent + r = subprocess.run(['curl', '-#', f'-{s}L', url, '-o', f, '--retry', '3', '-C', '-']).returncode + assert r == 0, f'Curl return value {r}' + else: # urllib download + method = 'torch' + if method == 'torch': + torch.hub.download_url_to_file(url, f, progress=progress) + else: + from ultralytics.yolo.utils import TQDM_BAR_FORMAT + with request.urlopen(url) as response, tqdm(total=int(response.getheader('Content-Length', 0)), + desc=desc, + disable=not progress, + unit='B', + unit_scale=True, + unit_divisor=1024, + bar_format=TQDM_BAR_FORMAT) as pbar: + with open(f, 'wb') as f_opened: + for data in response: + f_opened.write(data) + pbar.update(len(data)) + + if f.exists(): + if f.stat().st_size > min_bytes: + break # success + f.unlink() # remove partial downloads + except Exception as e: + if i == 0 and not is_online(): + raise ConnectionError(emojis(f'❌ Download failure for {url}. Environment is not online.')) from e + elif i >= retry: + raise ConnectionError(emojis(f'❌ Download failure for {url}. Retry limit reached.')) from e + LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...') + + if unzip and f.exists() and f.suffix in ('', '.zip', '.tar', '.gz'): + unzip_dir = dir or f.parent # unzip to dir if provided else unzip in place + LOGGER.info(f'Unzipping {f} to {unzip_dir.absolute()}...') + if is_zipfile(f): + unzip_dir = unzip_file(file=f, path=unzip_dir) # unzip + elif f.suffix == '.tar': + subprocess.run(['tar', 'xf', f, '--directory', unzip_dir], check=True) # unzip + elif f.suffix == '.gz': + subprocess.run(['tar', 'xfz', f, '--directory', unzip_dir], check=True) # unzip + if delete: + f.unlink() # remove zip + return unzip_dir + + +def get_github_assets(repo='ultralytics/assets', version='latest'): + """Return GitHub repo tag and assets (i.e. ['yolov8n.pt', 'yolov8s.pt', ...]).""" + if version != 'latest': + version = f'tags/{version}' # i.e. tags/v6.2 + response = requests.get(f'https://api.github.com/repos/{repo}/releases/{version}').json() # github api + return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets + + +def attempt_download_asset(file, repo='ultralytics/assets', release='v0.0.0'): + """Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc.""" + from ultralytics.yolo.utils import SETTINGS # scoped for circular import + + # YOLOv3/5u updates + file = str(file) + file = checks.check_yolov5u_filename(file) + file = Path(file.strip().replace("'", '')) + if file.exists(): + return str(file) + elif (SETTINGS['weights_dir'] / file).exists(): + return str(SETTINGS['weights_dir'] / file) + else: + # URL specified + name = Path(parse.unquote(str(file))).name # decode '%2F' to '/' etc. + if str(file).startswith(('http:/', 'https:/')): # download + url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + file = url2file(name) # parse authentication https://url.com/file.txt?auth... + if Path(file).is_file(): + LOGGER.info(f'Found {clean_url(url)} locally at {file}') # file already exists + else: + safe_download(url=url, file=file, min_bytes=1E5) + return file + + # GitHub assets + assets = GITHUB_ASSET_NAMES + try: + tag, assets = get_github_assets(repo, release) + except Exception: + try: + tag, assets = get_github_assets(repo) # latest release + except Exception: + try: + tag = subprocess.check_output(['git', 'tag']).decode().split()[-1] + except Exception: + tag = release + + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) + if name in assets: + safe_download(url=f'https://github.com/{repo}/releases/download/{tag}/{name}', file=file, min_bytes=1E5) + + return str(file) + + +def download(url, dir=Path.cwd(), unzip=True, delete=False, curl=False, threads=1, retry=3): + """Downloads and unzips files concurrently if threads > 1, else sequentially.""" + dir = Path(dir) + dir.mkdir(parents=True, exist_ok=True) # make directory + if threads > 1: + with ThreadPool(threads) as pool: + pool.map( + lambda x: safe_download( + url=x[0], dir=x[1], unzip=unzip, delete=delete, curl=curl, retry=retry, progress=threads <= 1), + zip(url, repeat(dir))) + pool.close() + pool.join() + else: + for u in [url] if isinstance(url, (str, Path)) else url: + safe_download(url=u, dir=dir, unzip=unzip, delete=delete, curl=curl, retry=retry) diff --git a/ultralytics/yolo/utils/errors.py b/ultralytics/yolo/utils/errors.py new file mode 100644 index 0000000..7163d4d --- /dev/null +++ b/ultralytics/yolo/utils/errors.py @@ -0,0 +1,10 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from ultralytics.yolo.utils import emojis + + +class HUBModelError(Exception): + + def __init__(self, message='Model not found. Please check model URL and try again.'): + """Create an exception for when a model is not found.""" + super().__init__(emojis(message)) diff --git a/ultralytics/yolo/utils/files.py b/ultralytics/yolo/utils/files.py new file mode 100644 index 0000000..6359947 --- /dev/null +++ b/ultralytics/yolo/utils/files.py @@ -0,0 +1,100 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import contextlib +import glob +import os +import shutil +from datetime import datetime +from pathlib import Path + + +class WorkingDirectory(contextlib.ContextDecorator): + """Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager.""" + + def __init__(self, new_dir): + """Sets the working directory to 'new_dir' upon instantiation.""" + self.dir = new_dir # new dir + self.cwd = Path.cwd().resolve() # current dir + + def __enter__(self): + """Changes the current directory to the specified directory.""" + os.chdir(self.dir) + + def __exit__(self, exc_type, exc_val, exc_tb): + """Restore the current working directory on context exit.""" + os.chdir(self.cwd) + + +def increment_path(path, exist_ok=False, sep='', mkdir=False): + """ + Increments a file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. + + If the path exists and exist_ok is not set to True, the path will be incremented by appending a number and sep to + the end of the path. If the path is a file, the file extension will be preserved. If the path is a directory, the + number will be appended directly to the end of the path. If mkdir is set to True, the path will be created as a + directory if it does not already exist. + + Args: + path (str, pathlib.Path): Path to increment. + exist_ok (bool, optional): If True, the path will not be incremented and returned as-is. Defaults to False. + sep (str, optional): Separator to use between the path and the incrementation number. Defaults to ''. + mkdir (bool, optional): Create a directory if it does not exist. Defaults to False. + + Returns: + (pathlib.Path): Incremented path. + """ + path = Path(path) # os-agnostic + if path.exists() and not exist_ok: + path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') + + # Method 1 + for n in range(2, 9999): + p = f'{path}{sep}{n}{suffix}' # increment path + if not os.path.exists(p): # + break + path = Path(p) + + if mkdir: + path.mkdir(parents=True, exist_ok=True) # make directory + + return path + + +def file_age(path=__file__): + """Return days since last file update.""" + dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta + return dt.days # + dt.seconds / 86400 # fractional days + + +def file_date(path=__file__): + """Return human-readable file modification date, i.e. '2021-3-26'.""" + t = datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def file_size(path): + """Return file/dir size (MB).""" + if isinstance(path, (str, Path)): + mb = 1 << 20 # bytes to MiB (1024 ** 2) + path = Path(path) + if path.is_file(): + return path.stat().st_size / mb + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb + return 0.0 + + +def get_latest_run(search_dir='.'): + """Return path to most recent 'last.pt' in /runs (i.e. to --resume from).""" + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def make_dirs(dir='new_dir/'): + """Create directories.""" + dir = Path(dir) + if dir.exists(): + shutil.rmtree(dir) # delete dir + for p in dir, dir / 'labels', dir / 'images': + p.mkdir(parents=True, exist_ok=True) # make dir + return dir diff --git a/ultralytics/yolo/utils/instance.py b/ultralytics/yolo/utils/instance.py new file mode 100644 index 0000000..68f9613 --- /dev/null +++ b/ultralytics/yolo/utils/instance.py @@ -0,0 +1,392 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from collections import abc +from itertools import repeat +from numbers import Number +from typing import List + +import numpy as np + +from .ops import ltwh2xywh, ltwh2xyxy, resample_segments, xywh2ltwh, xywh2xyxy, xyxy2ltwh, xyxy2xywh + + +def _ntuple(n): + """From PyTorch internals.""" + + def parse(x): + """Parse bounding boxes format between XYWH and LTWH.""" + return x if isinstance(x, abc.Iterable) else tuple(repeat(x, n)) + + return parse + + +to_2tuple = _ntuple(2) +to_4tuple = _ntuple(4) + +# `xyxy` means left top and right bottom +# `xywh` means center x, center y and width, height(yolo format) +# `ltwh` means left top and width, height(coco format) +_formats = ['xyxy', 'xywh', 'ltwh'] + +__all__ = 'Bboxes', # tuple or list + + +class Bboxes: + """Now only numpy is supported.""" + + def __init__(self, bboxes, format='xyxy') -> None: + assert format in _formats, f'Invalid bounding box format: {format}, format must be one of {_formats}' + bboxes = bboxes[None, :] if bboxes.ndim == 1 else bboxes + assert bboxes.ndim == 2 + assert bboxes.shape[1] == 4 + self.bboxes = bboxes + self.format = format + # self.normalized = normalized + + # def convert(self, format): + # assert format in _formats + # if self.format == format: + # bboxes = self.bboxes + # elif self.format == "xyxy": + # if format == "xywh": + # bboxes = xyxy2xywh(self.bboxes) + # else: + # bboxes = xyxy2ltwh(self.bboxes) + # elif self.format == "xywh": + # if format == "xyxy": + # bboxes = xywh2xyxy(self.bboxes) + # else: + # bboxes = xywh2ltwh(self.bboxes) + # else: + # if format == "xyxy": + # bboxes = ltwh2xyxy(self.bboxes) + # else: + # bboxes = ltwh2xywh(self.bboxes) + # + # return Bboxes(bboxes, format) + + def convert(self, format): + """Converts bounding box format from one type to another.""" + assert format in _formats, f'Invalid bounding box format: {format}, format must be one of {_formats}' + if self.format == format: + return + elif self.format == 'xyxy': + bboxes = xyxy2xywh(self.bboxes) if format == 'xywh' else xyxy2ltwh(self.bboxes) + elif self.format == 'xywh': + bboxes = xywh2xyxy(self.bboxes) if format == 'xyxy' else xywh2ltwh(self.bboxes) + else: + bboxes = ltwh2xyxy(self.bboxes) if format == 'xyxy' else ltwh2xywh(self.bboxes) + self.bboxes = bboxes + self.format = format + + def areas(self): + """Return box areas.""" + self.convert('xyxy') + return (self.bboxes[:, 2] - self.bboxes[:, 0]) * (self.bboxes[:, 3] - self.bboxes[:, 1]) + + # def denormalize(self, w, h): + # if not self.normalized: + # return + # assert (self.bboxes <= 1.0).all() + # self.bboxes[:, 0::2] *= w + # self.bboxes[:, 1::2] *= h + # self.normalized = False + # + # def normalize(self, w, h): + # if self.normalized: + # return + # assert (self.bboxes > 1.0).any() + # self.bboxes[:, 0::2] /= w + # self.bboxes[:, 1::2] /= h + # self.normalized = True + + def mul(self, scale): + """ + Args: + scale (tuple | list | int): the scale for four coords. + """ + if isinstance(scale, Number): + scale = to_4tuple(scale) + assert isinstance(scale, (tuple, list)) + assert len(scale) == 4 + self.bboxes[:, 0] *= scale[0] + self.bboxes[:, 1] *= scale[1] + self.bboxes[:, 2] *= scale[2] + self.bboxes[:, 3] *= scale[3] + + def add(self, offset): + """ + Args: + offset (tuple | list | int): the offset for four coords. + """ + if isinstance(offset, Number): + offset = to_4tuple(offset) + assert isinstance(offset, (tuple, list)) + assert len(offset) == 4 + self.bboxes[:, 0] += offset[0] + self.bboxes[:, 1] += offset[1] + self.bboxes[:, 2] += offset[2] + self.bboxes[:, 3] += offset[3] + + def __len__(self): + """Return the number of boxes.""" + return len(self.bboxes) + + @classmethod + def concatenate(cls, boxes_list: List['Bboxes'], axis=0) -> 'Bboxes': + """ + Concatenate a list of Bboxes objects into a single Bboxes object. + + Args: + boxes_list (List[Bboxes]): A list of Bboxes objects to concatenate. + axis (int, optional): The axis along which to concatenate the bounding boxes. + Defaults to 0. + + Returns: + Bboxes: A new Bboxes object containing the concatenated bounding boxes. + + Note: + The input should be a list or tuple of Bboxes objects. + """ + assert isinstance(boxes_list, (list, tuple)) + if not boxes_list: + return cls(np.empty(0)) + assert all(isinstance(box, Bboxes) for box in boxes_list) + + if len(boxes_list) == 1: + return boxes_list[0] + return cls(np.concatenate([b.bboxes for b in boxes_list], axis=axis)) + + def __getitem__(self, index) -> 'Bboxes': + """ + Retrieve a specific bounding box or a set of bounding boxes using indexing. + + Args: + index (int, slice, or np.ndarray): The index, slice, or boolean array to select + the desired bounding boxes. + + Returns: + Bboxes: A new Bboxes object containing the selected bounding boxes. + + Raises: + AssertionError: If the indexed bounding boxes do not form a 2-dimensional matrix. + + Note: + When using boolean indexing, make sure to provide a boolean array with the same + length as the number of bounding boxes. + """ + if isinstance(index, int): + return Bboxes(self.bboxes[index].view(1, -1)) + b = self.bboxes[index] + assert b.ndim == 2, f'Indexing on Bboxes with {index} failed to return a matrix!' + return Bboxes(b) + + +class Instances: + + def __init__(self, bboxes, segments=None, keypoints=None, bbox_format='xywh', normalized=True) -> None: + """ + Args: + bboxes (ndarray): bboxes with shape [N, 4]. + segments (list | ndarray): segments. + keypoints (ndarray): keypoints(x, y, visible) with shape [N, 17, 3]. + """ + if segments is None: + segments = [] + self._bboxes = Bboxes(bboxes=bboxes, format=bbox_format) + self.keypoints = keypoints + self.normalized = normalized + + if len(segments) > 0: + # list[np.array(1000, 2)] * num_samples + segments = resample_segments(segments) + # (N, 1000, 2) + segments = np.stack(segments, axis=0) + else: + segments = np.zeros((0, 1000, 2), dtype=np.float32) + self.segments = segments + + def convert_bbox(self, format): + """Convert bounding box format.""" + self._bboxes.convert(format=format) + + @property + def bbox_areas(self): + """Calculate the area of bounding boxes.""" + return self._bboxes.areas() + + def scale(self, scale_w, scale_h, bbox_only=False): + """this might be similar with denormalize func but without normalized sign.""" + self._bboxes.mul(scale=(scale_w, scale_h, scale_w, scale_h)) + if bbox_only: + return + self.segments[..., 0] *= scale_w + self.segments[..., 1] *= scale_h + if self.keypoints is not None: + self.keypoints[..., 0] *= scale_w + self.keypoints[..., 1] *= scale_h + + def denormalize(self, w, h): + """Denormalizes boxes, segments, and keypoints from normalized coordinates.""" + if not self.normalized: + return + self._bboxes.mul(scale=(w, h, w, h)) + self.segments[..., 0] *= w + self.segments[..., 1] *= h + if self.keypoints is not None: + self.keypoints[..., 0] *= w + self.keypoints[..., 1] *= h + self.normalized = False + + def normalize(self, w, h): + """Normalize bounding boxes, segments, and keypoints to image dimensions.""" + if self.normalized: + return + self._bboxes.mul(scale=(1 / w, 1 / h, 1 / w, 1 / h)) + self.segments[..., 0] /= w + self.segments[..., 1] /= h + if self.keypoints is not None: + self.keypoints[..., 0] /= w + self.keypoints[..., 1] /= h + self.normalized = True + + def add_padding(self, padw, padh): + """Handle rect and mosaic situation.""" + assert not self.normalized, 'you should add padding with absolute coordinates.' + self._bboxes.add(offset=(padw, padh, padw, padh)) + self.segments[..., 0] += padw + self.segments[..., 1] += padh + if self.keypoints is not None: + self.keypoints[..., 0] += padw + self.keypoints[..., 1] += padh + + def __getitem__(self, index) -> 'Instances': + """ + Retrieve a specific instance or a set of instances using indexing. + + Args: + index (int, slice, or np.ndarray): The index, slice, or boolean array to select + the desired instances. + + Returns: + Instances: A new Instances object containing the selected bounding boxes, + segments, and keypoints if present. + + Note: + When using boolean indexing, make sure to provide a boolean array with the same + length as the number of instances. + """ + segments = self.segments[index] if len(self.segments) else self.segments + keypoints = self.keypoints[index] if self.keypoints is not None else None + bboxes = self.bboxes[index] + bbox_format = self._bboxes.format + return Instances( + bboxes=bboxes, + segments=segments, + keypoints=keypoints, + bbox_format=bbox_format, + normalized=self.normalized, + ) + + def flipud(self, h): + """Flips the coordinates of bounding boxes, segments, and keypoints vertically.""" + if self._bboxes.format == 'xyxy': + y1 = self.bboxes[:, 1].copy() + y2 = self.bboxes[:, 3].copy() + self.bboxes[:, 1] = h - y2 + self.bboxes[:, 3] = h - y1 + else: + self.bboxes[:, 1] = h - self.bboxes[:, 1] + self.segments[..., 1] = h - self.segments[..., 1] + if self.keypoints is not None: + self.keypoints[..., 1] = h - self.keypoints[..., 1] + + def fliplr(self, w): + """Reverses the order of the bounding boxes and segments horizontally.""" + if self._bboxes.format == 'xyxy': + x1 = self.bboxes[:, 0].copy() + x2 = self.bboxes[:, 2].copy() + self.bboxes[:, 0] = w - x2 + self.bboxes[:, 2] = w - x1 + else: + self.bboxes[:, 0] = w - self.bboxes[:, 0] + self.segments[..., 0] = w - self.segments[..., 0] + if self.keypoints is not None: + self.keypoints[..., 0] = w - self.keypoints[..., 0] + + def clip(self, w, h): + """Clips bounding boxes, segments, and keypoints values to stay within image boundaries.""" + ori_format = self._bboxes.format + self.convert_bbox(format='xyxy') + self.bboxes[:, [0, 2]] = self.bboxes[:, [0, 2]].clip(0, w) + self.bboxes[:, [1, 3]] = self.bboxes[:, [1, 3]].clip(0, h) + if ori_format != 'xyxy': + self.convert_bbox(format=ori_format) + self.segments[..., 0] = self.segments[..., 0].clip(0, w) + self.segments[..., 1] = self.segments[..., 1].clip(0, h) + if self.keypoints is not None: + self.keypoints[..., 0] = self.keypoints[..., 0].clip(0, w) + self.keypoints[..., 1] = self.keypoints[..., 1].clip(0, h) + + def remove_zero_area_boxes(self): + """Remove zero-area boxes, i.e. after clipping some boxes may have zero width or height. This removes them.""" + good = self.bbox_areas > 0 + if not all(good): + self._bboxes = self._bboxes[good] + if len(self.segments): + self.segments = self.segments[good] + if self.keypoints is not None: + self.keypoints = self.keypoints[good] + return good + + def update(self, bboxes, segments=None, keypoints=None): + """Updates instance variables.""" + self._bboxes = Bboxes(bboxes, format=self._bboxes.format) + if segments is not None: + self.segments = segments + if keypoints is not None: + self.keypoints = keypoints + + def __len__(self): + """Return the length of the instance list.""" + return len(self.bboxes) + + @classmethod + def concatenate(cls, instances_list: List['Instances'], axis=0) -> 'Instances': + """ + Concatenates a list of Instances objects into a single Instances object. + + Args: + instances_list (List[Instances]): A list of Instances objects to concatenate. + axis (int, optional): The axis along which the arrays will be concatenated. Defaults to 0. + + Returns: + Instances: A new Instances object containing the concatenated bounding boxes, + segments, and keypoints if present. + + Note: + The `Instances` objects in the list should have the same properties, such as + the format of the bounding boxes, whether keypoints are present, and if the + coordinates are normalized. + """ + assert isinstance(instances_list, (list, tuple)) + if not instances_list: + return cls(np.empty(0)) + assert all(isinstance(instance, Instances) for instance in instances_list) + + if len(instances_list) == 1: + return instances_list[0] + + use_keypoint = instances_list[0].keypoints is not None + bbox_format = instances_list[0]._bboxes.format + normalized = instances_list[0].normalized + + cat_boxes = np.concatenate([ins.bboxes for ins in instances_list], axis=axis) + cat_segments = np.concatenate([b.segments for b in instances_list], axis=axis) + cat_keypoints = np.concatenate([b.keypoints for b in instances_list], axis=axis) if use_keypoint else None + return cls(cat_boxes, cat_segments, cat_keypoints, bbox_format, normalized) + + @property + def bboxes(self): + """Return bounding boxes.""" + return self._bboxes.bboxes diff --git a/ultralytics/yolo/utils/loss.py b/ultralytics/yolo/utils/loss.py new file mode 100644 index 0000000..c63ea13 --- /dev/null +++ b/ultralytics/yolo/utils/loss.py @@ -0,0 +1,411 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ultralytics.yolo.utils.metrics import OKS_SIGMA +from ultralytics.yolo.utils.ops import crop_mask, xywh2xyxy, xyxy2xywh +from ultralytics.yolo.utils.tal import TaskAlignedAssigner, dist2bbox, make_anchors + +from .metrics import bbox_iou +from .tal import bbox2dist +import numpy as np + +class VarifocalLoss(nn.Module): + """Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367.""" + + def __init__(self): + """Initialize the VarifocalLoss class.""" + super().__init__() + + def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0): + """Computes varfocal loss.""" + weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label + with torch.cuda.amp.autocast(enabled=False): + loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), reduction='none') * + weight).mean(1).sum() + return loss + + +# Losses +class FocalLoss(nn.Module): + """Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5).""" + + def __init__(self, ): + super().__init__() + + def forward(self, pred, label, gamma=1.5, alpha=0.25): + """Calculates and updates confusion matrix for object detection/classification tasks.""" + loss = F.binary_cross_entropy_with_logits(pred, label, reduction='none') + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = pred.sigmoid() # prob from logits + p_t = label * pred_prob + (1 - label) * (1 - pred_prob) + modulating_factor = (1.0 - p_t) ** gamma + loss *= modulating_factor + if alpha > 0: + alpha_factor = label * alpha + (1 - label) * (1 - alpha) + loss *= alpha_factor + return loss.mean(1).sum() + + +class BboxLoss(nn.Module): + + def __init__(self, reg_max, use_dfl=False): + """Initialize the BboxLoss module with regularization maximum and DFL settings.""" + super().__init__() + self.reg_max = reg_max + self.use_dfl = use_dfl + + def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask): + """IoU loss.""" + weight = target_scores.sum(-1)[fg_mask].unsqueeze(-1) + iou = bbox_iou(pred_bboxes[fg_mask], target_bboxes[fg_mask], xywh=False, CIoU=True) + loss_iou = ((1.0 - iou) * weight).sum() / target_scores_sum + + # DFL loss + if self.use_dfl: + target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max) + loss_dfl = self._df_loss(pred_dist[fg_mask].view(-1, self.reg_max + 1), target_ltrb[fg_mask]) * weight + loss_dfl = loss_dfl.sum() / target_scores_sum + else: + loss_dfl = torch.tensor(0.0).to(pred_dist.device) + + return loss_iou, loss_dfl + + @staticmethod + def _df_loss(pred_dist, target): + """Return sum of left and right DFL losses.""" + # Distribution Focal Loss (DFL) proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391 + tl = target.long() # target left + tr = tl + 1 # target right + wl = tr - target # weight left + wr = 1 - wl # weight right + return (F.cross_entropy(pred_dist, tl.view(-1), reduction='none').view(tl.shape) * wl + + F.cross_entropy(pred_dist, tr.view(-1), reduction='none').view(tl.shape) * wr).mean(-1, keepdim=True) + + +class WingLoss(nn.Module): + def __init__(self, w=10, e=2): + super(WingLoss, self).__init__() + # https://arxiv.org/pdf/1711.06753v4.pdf Figure 5 + self.w = w + self.e = e + self.C = self.w - self.w * np.log(1 + self.w / self.e) + + def forward(self, x, t, sigma=1): + weight = torch.ones_like(t) + weight[torch.where(t==-1)] = 0 + diff = weight * (x - t) + abs_diff = diff.abs() + flag = (abs_diff.data < self.w).float() + y = flag * self.w * torch.log(1 + abs_diff / self.e) + (1 - flag) * (abs_diff - self.C) + return y + +class KeypointLoss(nn.Module): + def __init__(self, sigmas) -> None: + super().__init__() + self.sigmas = sigmas + self.loss_fcn = WingLoss() + + def forward(self, pred_kpts, gt_kpts, kpt_mask, area): + """Calculates keypoint loss factor and Euclidean distance loss for predicted and actual keypoints.""" + #d = (pred_kpts[..., 0] - gt_kpts[..., 0]) ** 2 + (pred_kpts[..., 1] - gt_kpts[..., 1]) ** 2 + #print(pred_kpts[..., 0].shape, gt_kpts[..., 0].shape, pred_kpts[..., 1].shape, gt_kpts[..., 1].shape) + d = self.loss_fcn(pred_kpts[..., 0], gt_kpts[..., 0]) + self.loss_fcn(pred_kpts[..., 1], gt_kpts[..., 1]) + kpt_loss_factor = (torch.sum(kpt_mask != 0) + torch.sum(kpt_mask == 0)) / (torch.sum(kpt_mask != 0) + 1e-9) + # e = d / (2 * (area * self.sigmas) ** 2 + 1e-9) # from formula + e = d / (2 * self.sigmas) ** 2 / (area + 1e-9) / 2 # from cocoeval + return kpt_loss_factor * ((1 - torch.exp(-e)) * kpt_mask).mean() + + +# Criterion class for computing Detection training losses +class v8DetectionLoss: + + def __init__(self, model): # model must be de-paralleled + + device = next(model.parameters()).device # get model device + h = model.args # hyperparameters + + m = model.model[-1] # Detect() module + self.bce = nn.BCEWithLogitsLoss(reduction='none') + self.hyp = h + self.stride = m.stride # model strides + self.nc = m.nc # number of classes + self.no = m.no + self.reg_max = m.reg_max + self.device = device + + self.use_dfl = m.reg_max > 1 + + self.assigner = TaskAlignedAssigner(topk=10, num_classes=self.nc, alpha=0.5, beta=6.0) + self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=self.use_dfl).to(device) + self.proj = torch.arange(m.reg_max, dtype=torch.float, device=device) + + def preprocess(self, targets, batch_size, scale_tensor): + """Preprocesses the target counts and matches with the input batch size to output a tensor.""" + if targets.shape[0] == 0: + out = torch.zeros(batch_size, 0, 5, device=self.device) + else: + i = targets[:, 0] # image index + _, counts = i.unique(return_counts=True) + counts = counts.to(dtype=torch.int32) + out = torch.zeros(batch_size, counts.max(), 5, device=self.device) + for j in range(batch_size): + matches = i == j + n = matches.sum() + if n: + out[j, :n] = targets[matches, 1:] + out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor)) + return out + + def bbox_decode(self, anchor_points, pred_dist): + """Decode predicted object bounding box coordinates from anchor points and distribution.""" + if self.use_dfl: + b, a, c = pred_dist.shape # batch, anchors, channels + pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2) + return dist2bbox(pred_dist, anchor_points, xywh=False) + + def __call__(self, preds, batch): + """Calculate the sum of the loss for box, cls and dfl multiplied by batch size.""" + loss = torch.zeros(3, device=self.device) # box, cls, dfl + feats = preds[1] if isinstance(preds, tuple) else preds + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + batch_size = pred_scores.shape[0] + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + targets = torch.cat((batch['batch_idx'].view(-1, 1), batch['cls'].view(-1, 1), batch['bboxes']), 1) + targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + + _, target_bboxes, target_scores, fg_mask, _ = self.assigner( + pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt) + + target_scores_sum = max(target_scores.sum(), 1) + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[1] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + + # bbox loss + if fg_mask.sum(): + target_bboxes /= stride_tensor + loss[0], loss[2] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores, + target_scores_sum, fg_mask) + + loss[0] *= self.hyp.box # box gain + loss[1] *= self.hyp.cls # cls gain + loss[2] *= self.hyp.dfl # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) + + +# Criterion class for computing training losses +class v8SegmentationLoss(v8DetectionLoss): + + def __init__(self, model): # model must be de-paralleled + super().__init__(model) + self.nm = model.model[-1].nm # number of masks + self.overlap = model.args.overlap_mask + + def __call__(self, preds, batch): + """Calculate and return the loss for the YOLO model.""" + loss = torch.zeros(4, device=self.device) # box, cls, dfl + feats, pred_masks, proto = preds if len(preds) == 3 else preds[1] + batch_size, _, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + + # b, grids, .. + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + pred_masks = pred_masks.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + try: + batch_idx = batch['batch_idx'].view(-1, 1) + targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes']), 1) + targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + except RuntimeError as e: + raise TypeError('ERROR ❌ segment dataset incorrectly formatted or not a segment dataset.\n' + "This error can occur when incorrectly training a 'segment' model on a 'detect' dataset, " + "i.e. 'yolo train model=yolov8n-seg.pt data=coco128.yaml'.\nVerify your dataset is a " + "correctly formatted 'segment' dataset using 'data=coco128-seg.yaml' " + 'as an example.\nSee https://docs.ultralytics.com/tasks/segment/ for help.') from e + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + + _, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner( + pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt) + + target_scores_sum = max(target_scores.sum(), 1) + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[2] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + + if fg_mask.sum(): + # bbox loss + loss[0], loss[3] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes / stride_tensor, + target_scores, target_scores_sum, fg_mask) + # masks loss + masks = batch['masks'].to(self.device).float() + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + + for i in range(batch_size): + if fg_mask[i].sum(): + mask_idx = target_gt_idx[i][fg_mask[i]] + if self.overlap: + gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0) + else: + gt_mask = masks[batch_idx.view(-1) == i][mask_idx] + xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]] + marea = xyxy2xywh(xyxyn)[:, 2:].prod(1) + mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device) + loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy, marea) # seg + + # WARNING: lines below prevents Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove + else: + loss[1] += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss + + # WARNING: lines below prevent Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove + else: + loss[1] += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss + + loss[0] *= self.hyp.box # box gain + loss[1] *= self.hyp.box / batch_size # seg gain + loss[2] *= self.hyp.cls # cls gain + loss[3] *= self.hyp.dfl # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + """Mask loss for one image.""" + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n, 32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + + +# Criterion class for computing training losses +class v8PoseLoss(v8DetectionLoss): + + def __init__(self, model): # model must be de-paralleled + super().__init__(model) + self.kpt_shape = model.model[-1].kpt_shape + self.bce_pose = nn.BCEWithLogitsLoss() + is_pose = self.kpt_shape == [17, 3] + nkpt = self.kpt_shape[0] # number of keypoints + sigmas = torch.from_numpy(OKS_SIGMA).to(self.device) if is_pose else torch.ones(nkpt, device=self.device) / nkpt + self.keypoint_loss = KeypointLoss(sigmas=sigmas) + + def __call__(self, preds, batch): + """Calculate the total loss and detach it.""" + loss = torch.zeros(5, device=self.device) # box, cls, dfl, kpt_location, kpt_visibility + feats, pred_kpts = preds if isinstance(preds[0], list) else preds[1] + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + + # b, grids, .. + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + pred_kpts = pred_kpts.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + batch_size = pred_scores.shape[0] + batch_idx = batch['batch_idx'].view(-1, 1) + targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes']), 1) + targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + pred_kpts = self.kpts_decode(anchor_points, pred_kpts.view(batch_size, -1, *self.kpt_shape)) # (b, h*w, 17, 3) + + _, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner( + pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt) + + target_scores_sum = max(target_scores.sum(), 1) + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[3] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + + # bbox loss + if fg_mask.sum(): + target_bboxes /= stride_tensor + loss[0], loss[4] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores, + target_scores_sum, fg_mask) + keypoints = batch['keypoints'].to(self.device).float().clone() + keypoints[..., 0] *= imgsz[1] + keypoints[..., 1] *= imgsz[0] + for i in range(batch_size): + if fg_mask[i].sum(): + idx = target_gt_idx[i][fg_mask[i]] + gt_kpt = keypoints[batch_idx.view(-1) == i][idx] # (n, 51) + gt_kpt[..., 0] /= stride_tensor[fg_mask[i]] + gt_kpt[..., 1] /= stride_tensor[fg_mask[i]] + area = xyxy2xywh(target_bboxes[i][fg_mask[i]])[:, 2:].prod(1, keepdim=True) + pred_kpt = pred_kpts[i][fg_mask[i]] + kpt_mask = gt_kpt[..., 2] != 0 + loss[1] += self.keypoint_loss(pred_kpt, gt_kpt, kpt_mask, area) # pose loss + # kpt_score loss + if pred_kpt.shape[-1] == 3: + loss[2] += self.bce_pose(pred_kpt[..., 2], kpt_mask.float()) # keypoint obj loss + + loss[0] *= self.hyp.box # box gain + loss[1] *= self.hyp.pose / batch_size # pose gain + loss[2] *= self.hyp.kobj / batch_size # kobj gain + loss[3] *= self.hyp.cls # cls gain + loss[4] *= self.hyp.dfl # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) + + def kpts_decode(self, anchor_points, pred_kpts): + """Decodes predicted keypoints to image coordinates.""" + y = pred_kpts.clone() + y[..., :2] *= 2.0 + y[..., 0] += anchor_points[:, [0]] - 0.5 + y[..., 1] += anchor_points[:, [1]] - 0.5 + return y + + +class v8ClassificationLoss: + + def __call__(self, preds, batch): + """Compute the classification loss between predictions and true labels.""" + loss = torch.nn.functional.cross_entropy(preds, batch['cls'], reduction='sum') / 64 + loss_items = loss.detach() + return loss, loss_items diff --git a/ultralytics/yolo/utils/metrics.py b/ultralytics/yolo/utils/metrics.py new file mode 100644 index 0000000..cd90321 --- /dev/null +++ b/ultralytics/yolo/utils/metrics.py @@ -0,0 +1,977 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Model validation metrics +""" +import math +import warnings +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + +from ultralytics.yolo.utils import LOGGER, SimpleClass, TryExcept, plt_settings + +OKS_SIGMA = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0 + + +# Boxes +def box_area(box): + """Return box area, where box shape is xyxy(4,n).""" + return (box[2] - box[0]) * (box[3] - box[1]) + + +def bbox_ioa(box1, box2, eps=1e-7): + """ + Calculate the intersection over box2 area given box1 and box2. Boxes are in x1y1x2y2 format. + + Args: + box1 (np.array): A numpy array of shape (n, 4) representing n bounding boxes. + box2 (np.array): A numpy array of shape (m, 4) representing m bounding boxes. + eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. + + Returns: + (np.array): A numpy array of shape (n, m) representing the intersection over box2 area. + """ + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1.T + b2_x1, b2_y1, b2_x2, b2_y2 = box2.T + + # Intersection area + inter_area = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * \ + (np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + +def box_iou(box1, box2, eps=1e-7): + """ + Calculate intersection-over-union (IoU) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Based on https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + + Args: + box1 (torch.Tensor): A tensor of shape (N, 4) representing N bounding boxes. + box2 (torch.Tensor): A tensor of shape (M, 4) representing M bounding boxes. + eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. + + Returns: + (torch.Tensor): An NxM tensor containing the pairwise IoU values for every element in box1 and box2. + """ + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) + inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp_(0).prod(2) + + # IoU = inter / (area1 + area2 - inter) + return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps) + + +def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + """ + Calculate Intersection over Union (IoU) of box1(1, 4) to box2(n, 4). + + Args: + box1 (torch.Tensor): A tensor representing a single bounding box with shape (1, 4). + box2 (torch.Tensor): A tensor representing n bounding boxes with shape (n, 4). + xywh (bool, optional): If True, input boxes are in (x, y, w, h) format. If False, input boxes are in + (x1, y1, x2, y2) format. Defaults to True. + GIoU (bool, optional): If True, calculate Generalized IoU. Defaults to False. + DIoU (bool, optional): If True, calculate Distance IoU. Defaults to False. + CIoU (bool, optional): If True, calculate Complete IoU. Defaults to False. + eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. + + Returns: + (torch.Tensor): IoU, GIoU, DIoU, or CIoU values depending on the specified flags. + """ + + # Get the coordinates of bounding boxes + if xywh: # transform from xywh to xyxy + (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) + w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 + b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ + b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ + else: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) + b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + + # Intersection area + inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp_(0) * \ + (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp_(0) + + # Union Area + union = w1 * h1 + w2 * h2 - inter + eps + + # IoU + iou = inter / union + if CIoU or DIoU or GIoU: + cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width + ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 + if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + return iou - rho2 / c2 # DIoU + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf + return iou # IoU + + +def mask_iou(mask1, mask2, eps=1e-7): + """ + Calculate masks IoU. + + Args: + mask1 (torch.Tensor): A tensor of shape (N, n) where N is the number of ground truth objects and n is the + product of image width and height. + mask2 (torch.Tensor): A tensor of shape (M, n) where M is the number of predicted objects and n is the + product of image width and height. + eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. + + Returns: + (torch.Tensor): A tensor of shape (N, M) representing masks IoU. + """ + intersection = torch.matmul(mask1, mask2.T).clamp_(0) + union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def kpt_iou(kpt1, kpt2, area, sigma, eps=1e-7): + """ + Calculate Object Keypoint Similarity (OKS). + + Args: + kpt1 (torch.Tensor): A tensor of shape (N, 17, 3) representing ground truth keypoints. + kpt2 (torch.Tensor): A tensor of shape (M, 17, 3) representing predicted keypoints. + area (torch.Tensor): A tensor of shape (N,) representing areas from ground truth. + sigma (list): A list containing 17 values representing keypoint scales. + eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. + + Returns: + (torch.Tensor): A tensor of shape (N, M) representing keypoint similarities. + """ + d = (kpt1[:, None, :, 0] - kpt2[..., 0]) ** 2 + (kpt1[:, None, :, 1] - kpt2[..., 1]) ** 2 # (N, M, 17) + sigma = torch.tensor(sigma, device=kpt1.device, dtype=kpt1.dtype) # (17, ) + kpt_mask = kpt1[..., 2] != 0 # (N, 17) + e = d / (2 * sigma) ** 2 / (area[:, None, None] + eps) / 2 # from cocoeval + # e = d / ((area[None, :, None] + eps) * sigma) ** 2 / 2 # from formula + return (torch.exp(-e) * kpt_mask[:, None]).sum(-1) / (kpt_mask.sum(-1)[:, None] + eps) + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class ConfusionMatrix: + """ + A class for calculating and updating a confusion matrix for object detection and classification tasks. + + Attributes: + task (str): The type of task, either 'detect' or 'classify'. + matrix (np.array): The confusion matrix, with dimensions depending on the task. + nc (int): The number of classes. + conf (float): The confidence threshold for detections. + iou_thres (float): The Intersection over Union threshold. + """ + + def __init__(self, nc, conf=0.25, iou_thres=0.45, task='detect'): + """Initialize attributes for the YOLO model.""" + self.task = task + self.matrix = np.zeros((nc + 1, nc + 1)) if self.task == 'detect' else np.zeros((nc, nc)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_cls_preds(self, preds, targets): + """ + Update confusion matrix for classification task + + Args: + preds (Array[N, min(nc,5)]): Predicted class labels. + targets (Array[N, 1]): Ground truth class labels. + """ + preds, targets = torch.cat(preds)[:, 0], torch.cat(targets) + for p, t in zip(preds.cpu().numpy(), targets.cpu().numpy()): + self.matrix[p][t] += 1 + + def process_batch(self, detections, labels): + """ + Update confusion matrix for object detection task. + + Args: + detections (Array[N, 6]): Detected bounding boxes and their associated information. + Each row should contain (x1, y1, x2, y2, conf, class). + labels (Array[M, 5]): Ground truth bounding boxes and their associated class labels. + Each row should contain (class, x1, y1, x2, y2). + """ + if detections is None: + gt_classes = labels.int() + for gc in gt_classes: + self.matrix[self.nc, gc] += 1 # background FN + return + + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(int) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[detection_classes[m1[j]], gc] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # true background + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # predicted background + + def matrix(self): + """Returns the confusion matrix.""" + return self.matrix + + def tp_fp(self): + """Returns true positives and false positives.""" + tp = self.matrix.diagonal() # true positives + fp = self.matrix.sum(1) - tp # false positives + # fn = self.matrix.sum(0) - tp # false negatives (missed detections) + return (tp[:-1], fp[:-1]) if self.task == 'detect' else (tp, fp) # remove background class if task=detect + + @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure') + @plt_settings() + def plot(self, normalize=True, save_dir='', names=(), on_plot=None): + """ + Plot the confusion matrix using seaborn and save it to a file. + + Args: + normalize (bool): Whether to normalize the confusion matrix. + save_dir (str): Directory where the plot will be saved. + names (tuple): Names of classes, used as labels on the plot. + on_plot (func): An optional callback to pass plots path and data when they are rendered. + """ + import seaborn as sn + + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) + nc, nn = self.nc, len(names) # number of classes, names + sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size + labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + ticklabels = (list(names) + ['background']) if labels else 'auto' + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, + ax=ax, + annot=nc < 30, + annot_kws={ + 'size': 8}, + cmap='Blues', + fmt='.2f' if normalize else '.0f', + square=True, + vmin=0.0, + xticklabels=ticklabels, + yticklabels=ticklabels).set_facecolor((1, 1, 1)) + title = 'Confusion Matrix' + ' Normalized' * normalize + ax.set_xlabel('True') + ax.set_ylabel('Predicted') + ax.set_title(title) + plot_fname = Path(save_dir) / f'{title.lower().replace(" ", "_")}.png' + fig.savefig(plot_fname, dpi=250) + plt.close(fig) + if on_plot: + on_plot(plot_fname) + + def print(self): + """ + Print the confusion matrix to the console. + """ + for i in range(self.nc + 1): + LOGGER.info(' '.join(map(str, self.matrix[i]))) + + +def smooth(y, f=0.05): + """Box filter of fraction f.""" + nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) + p = np.ones(nf // 2) # ones padding + yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded + return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed + + +@plt_settings() +def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=(), on_plot=None): + """Plots a precision-recall curve.""" + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') + ax.set_title('Precision-Recall Curve') + fig.savefig(save_dir, dpi=250) + plt.close(fig) + if on_plot: + on_plot(save_dir) + + +@plt_settings() +def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric', on_plot=None): + """Plots a metric-confidence curve.""" + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = smooth(py.mean(0), 0.05) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') + ax.set_title(f'{ylabel}-Confidence Curve') + fig.savefig(save_dir, dpi=250) + plt.close(fig) + if on_plot: + on_plot(save_dir) + + +def compute_ap(recall, precision): + """ + Compute the average precision (AP) given the recall and precision curves. + + Arguments: + recall (list): The recall curve. + precision (list): The precision curve. + + Returns: + (float): Average precision. + (np.ndarray): Precision envelope curve. + (np.ndarray): Modified recall curve with sentinel values added at the beginning and end. + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x-axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +def ap_per_class(tp, + conf, + pred_cls, + target_cls, + plot=False, + on_plot=None, + save_dir=Path(), + names=(), + eps=1e-16, + prefix=''): + """ + Computes the average precision per class for object detection evaluation. + + Args: + tp (np.ndarray): Binary array indicating whether the detection is correct (True) or not (False). + conf (np.ndarray): Array of confidence scores of the detections. + pred_cls (np.ndarray): Array of predicted classes of the detections. + target_cls (np.ndarray): Array of true classes of the detections. + plot (bool, optional): Whether to plot PR curves or not. Defaults to False. + on_plot (func, optional): A callback to pass plots path and data when they are rendered. Defaults to None. + save_dir (Path, optional): Directory to save the PR curves. Defaults to an empty path. + names (tuple, optional): Tuple of class names to plot PR curves. Defaults to an empty tuple. + eps (float, optional): A small value to avoid division by zero. Defaults to 1e-16. + prefix (str, optional): A prefix string for saving the plot files. Defaults to an empty string. + + Returns: + (tuple): A tuple of six arrays and one array of unique classes, where: + tp (np.ndarray): True positive counts for each class. + fp (np.ndarray): False positive counts for each class. + p (np.ndarray): Precision values at each confidence threshold. + r (np.ndarray): Recall values at each confidence threshold. + f1 (np.ndarray): F1-score values at each confidence threshold. + ap (np.ndarray): Average precision for each class at different IoU thresholds. + unique_classes (np.ndarray): An array of unique classes that have data. + + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes, nt = np.unique(target_cls, return_counts=True) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = nt[ci] # number of labels + n_p = i.sum() # number of predictions + if n_p == 0 or n_l == 0: + continue + + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + eps) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + eps) + names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data + names = dict(enumerate(names)) # to dict + if plot: + plot_pr_curve(px, py, ap, save_dir / f'{prefix}PR_curve.png', names, on_plot=on_plot) + plot_mc_curve(px, f1, save_dir / f'{prefix}F1_curve.png', names, ylabel='F1', on_plot=on_plot) + plot_mc_curve(px, p, save_dir / f'{prefix}P_curve.png', names, ylabel='Precision', on_plot=on_plot) + plot_mc_curve(px, r, save_dir / f'{prefix}R_curve.png', names, ylabel='Recall', on_plot=on_plot) + + i = smooth(f1.mean(0), 0.1).argmax() # max F1 index + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype(int) + + +class Metric(SimpleClass): + """ + Class for computing evaluation metrics for YOLOv8 model. + + Attributes: + p (list): Precision for each class. Shape: (nc,). + r (list): Recall for each class. Shape: (nc,). + f1 (list): F1 score for each class. Shape: (nc,). + all_ap (list): AP scores for all classes and all IoU thresholds. Shape: (nc, 10). + ap_class_index (list): Index of class for each AP score. Shape: (nc,). + nc (int): Number of classes. + + Methods: + ap50(): AP at IoU threshold of 0.5 for all classes. Returns: List of AP scores. Shape: (nc,) or []. + ap(): AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: List of AP scores. Shape: (nc,) or []. + mp(): Mean precision of all classes. Returns: Float. + mr(): Mean recall of all classes. Returns: Float. + map50(): Mean AP at IoU threshold of 0.5 for all classes. Returns: Float. + map75(): Mean AP at IoU threshold of 0.75 for all classes. Returns: Float. + map(): Mean AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: Float. + mean_results(): Mean of results, returns mp, mr, map50, map. + class_result(i): Class-aware result, returns p[i], r[i], ap50[i], ap[i]. + maps(): mAP of each class. Returns: Array of mAP scores, shape: (nc,). + fitness(): Model fitness as a weighted combination of metrics. Returns: Float. + update(results): Update metric attributes with new evaluation results. + + """ + + def __init__(self) -> None: + self.p = [] # (nc, ) + self.r = [] # (nc, ) + self.f1 = [] # (nc, ) + self.all_ap = [] # (nc, 10) + self.ap_class_index = [] # (nc, ) + self.nc = 0 + + @property + def ap50(self): + """ + Returns the Average Precision (AP) at an IoU threshold of 0.5 for all classes. + + Returns: + (np.ndarray, list): Array of shape (nc,) with AP50 values per class, or an empty list if not available. + """ + return self.all_ap[:, 0] if len(self.all_ap) else [] + + @property + def ap(self): + """ + Returns the Average Precision (AP) at an IoU threshold of 0.5-0.95 for all classes. + + Returns: + (np.ndarray, list): Array of shape (nc,) with AP50-95 values per class, or an empty list if not available. + """ + return self.all_ap.mean(1) if len(self.all_ap) else [] + + @property + def mp(self): + """ + Returns the Mean Precision of all classes. + + Returns: + (float): The mean precision of all classes. + """ + return self.p.mean() if len(self.p) else 0.0 + + @property + def mr(self): + """ + Returns the Mean Recall of all classes. + + Returns: + (float): The mean recall of all classes. + """ + return self.r.mean() if len(self.r) else 0.0 + + @property + def map50(self): + """ + Returns the mean Average Precision (mAP) at an IoU threshold of 0.5. + + Returns: + (float): The mAP50 at an IoU threshold of 0.5. + """ + return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 + + @property + def map75(self): + """ + Returns the mean Average Precision (mAP) at an IoU threshold of 0.75. + + Returns: + (float): The mAP50 at an IoU threshold of 0.75. + """ + return self.all_ap[:, 5].mean() if len(self.all_ap) else 0.0 + + @property + def map(self): + """ + Returns the mean Average Precision (mAP) over IoU thresholds of 0.5 - 0.95 in steps of 0.05. + + Returns: + (float): The mAP over IoU thresholds of 0.5 - 0.95 in steps of 0.05. + """ + return self.all_ap.mean() if len(self.all_ap) else 0.0 + + def mean_results(self): + """Mean of results, return mp, mr, map50, map.""" + return [self.mp, self.mr, self.map50, self.map] + + def class_result(self, i): + """class-aware result, return p[i], r[i], ap50[i], ap[i].""" + return self.p[i], self.r[i], self.ap50[i], self.ap[i] + + @property + def maps(self): + """mAP of each class.""" + maps = np.zeros(self.nc) + self.map + for i, c in enumerate(self.ap_class_index): + maps[c] = self.ap[i] + return maps + + def fitness(self): + """Model fitness as a weighted combination of metrics.""" + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (np.array(self.mean_results()) * w).sum() + + def update(self, results): + """ + Args: + results (tuple): A tuple of (p, r, ap, f1, ap_class) + """ + self.p, self.r, self.f1, self.all_ap, self.ap_class_index = results + + +class DetMetrics(SimpleClass): + """ + This class is a utility class for computing detection metrics such as precision, recall, and mean average precision + (mAP) of an object detection model. + + Args: + save_dir (Path): A path to the directory where the output plots will be saved. Defaults to current directory. + plot (bool): A flag that indicates whether to plot precision-recall curves for each class. Defaults to False. + on_plot (func): An optional callback to pass plots path and data when they are rendered. Defaults to None. + names (tuple of str): A tuple of strings that represents the names of the classes. Defaults to an empty tuple. + + Attributes: + save_dir (Path): A path to the directory where the output plots will be saved. + plot (bool): A flag that indicates whether to plot the precision-recall curves for each class. + on_plot (func): An optional callback to pass plots path and data when they are rendered. + names (tuple of str): A tuple of strings that represents the names of the classes. + box (Metric): An instance of the Metric class for storing the results of the detection metrics. + speed (dict): A dictionary for storing the execution time of different parts of the detection process. + + Methods: + process(tp, conf, pred_cls, target_cls): Updates the metric results with the latest batch of predictions. + keys: Returns a list of keys for accessing the computed detection metrics. + mean_results: Returns a list of mean values for the computed detection metrics. + class_result(i): Returns a list of values for the computed detection metrics for a specific class. + maps: Returns a dictionary of mean average precision (mAP) values for different IoU thresholds. + fitness: Computes the fitness score based on the computed detection metrics. + ap_class_index: Returns a list of class indices sorted by their average precision (AP) values. + results_dict: Returns a dictionary that maps detection metric keys to their computed values. + """ + + def __init__(self, save_dir=Path('.'), plot=False, on_plot=None, names=()) -> None: + self.save_dir = save_dir + self.plot = plot + self.on_plot = on_plot + self.names = names + self.box = Metric() + self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + + def process(self, tp, conf, pred_cls, target_cls): + """Process predicted results for object detection and update metrics.""" + results = ap_per_class(tp, + conf, + pred_cls, + target_cls, + plot=self.plot, + save_dir=self.save_dir, + names=self.names, + on_plot=self.on_plot)[2:] + self.box.nc = len(self.names) + self.box.update(results) + + @property + def keys(self): + """Returns a list of keys for accessing specific metrics.""" + return ['metrics/precision(B)', 'metrics/recall(B)', 'metrics/mAP50(B)', 'metrics/mAP50-95(B)'] + + def mean_results(self): + """Calculate mean of detected objects & return precision, recall, mAP50, and mAP50-95.""" + return self.box.mean_results() + + def class_result(self, i): + """Return the result of evaluating the performance of an object detection model on a specific class.""" + return self.box.class_result(i) + + @property + def maps(self): + """Returns mean Average Precision (mAP) scores per class.""" + return self.box.maps + + @property + def fitness(self): + """Returns the fitness of box object.""" + return self.box.fitness() + + @property + def ap_class_index(self): + """Returns the average precision index per class.""" + return self.box.ap_class_index + + @property + def results_dict(self): + """Returns dictionary of computed performance metrics and statistics.""" + return dict(zip(self.keys + ['fitness'], self.mean_results() + [self.fitness])) + + +class SegmentMetrics(SimpleClass): + """ + Calculates and aggregates detection and segmentation metrics over a given set of classes. + + Args: + save_dir (Path): Path to the directory where the output plots should be saved. Default is the current directory. + plot (bool): Whether to save the detection and segmentation plots. Default is False. + on_plot (func): An optional callback to pass plots path and data when they are rendered. Defaults to None. + names (list): List of class names. Default is an empty list. + + Attributes: + save_dir (Path): Path to the directory where the output plots should be saved. + plot (bool): Whether to save the detection and segmentation plots. + on_plot (func): An optional callback to pass plots path and data when they are rendered. + names (list): List of class names. + box (Metric): An instance of the Metric class to calculate box detection metrics. + seg (Metric): An instance of the Metric class to calculate mask segmentation metrics. + speed (dict): Dictionary to store the time taken in different phases of inference. + + Methods: + process(tp_m, tp_b, conf, pred_cls, target_cls): Processes metrics over the given set of predictions. + mean_results(): Returns the mean of the detection and segmentation metrics over all the classes. + class_result(i): Returns the detection and segmentation metrics of class `i`. + maps: Returns the mean Average Precision (mAP) scores for IoU thresholds ranging from 0.50 to 0.95. + fitness: Returns the fitness scores, which are a single weighted combination of metrics. + ap_class_index: Returns the list of indices of classes used to compute Average Precision (AP). + results_dict: Returns the dictionary containing all the detection and segmentation metrics and fitness score. + """ + + def __init__(self, save_dir=Path('.'), plot=False, on_plot=None, names=()) -> None: + self.save_dir = save_dir + self.plot = plot + self.on_plot = on_plot + self.names = names + self.box = Metric() + self.seg = Metric() + self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + + def process(self, tp_b, tp_m, conf, pred_cls, target_cls): + """ + Processes the detection and segmentation metrics over the given set of predictions. + + Args: + tp_b (list): List of True Positive boxes. + tp_m (list): List of True Positive masks. + conf (list): List of confidence scores. + pred_cls (list): List of predicted classes. + target_cls (list): List of target classes. + """ + + results_mask = ap_per_class(tp_m, + conf, + pred_cls, + target_cls, + plot=self.plot, + on_plot=self.on_plot, + save_dir=self.save_dir, + names=self.names, + prefix='Mask')[2:] + self.seg.nc = len(self.names) + self.seg.update(results_mask) + results_box = ap_per_class(tp_b, + conf, + pred_cls, + target_cls, + plot=self.plot, + on_plot=self.on_plot, + save_dir=self.save_dir, + names=self.names, + prefix='Box')[2:] + self.box.nc = len(self.names) + self.box.update(results_box) + + @property + def keys(self): + """Returns a list of keys for accessing metrics.""" + return [ + 'metrics/precision(B)', 'metrics/recall(B)', 'metrics/mAP50(B)', 'metrics/mAP50-95(B)', + 'metrics/precision(M)', 'metrics/recall(M)', 'metrics/mAP50(M)', 'metrics/mAP50-95(M)'] + + def mean_results(self): + """Return the mean metrics for bounding box and segmentation results.""" + return self.box.mean_results() + self.seg.mean_results() + + def class_result(self, i): + """Returns classification results for a specified class index.""" + return self.box.class_result(i) + self.seg.class_result(i) + + @property + def maps(self): + """Returns mAP scores for object detection and semantic segmentation models.""" + return self.box.maps + self.seg.maps + + @property + def fitness(self): + """Get the fitness score for both segmentation and bounding box models.""" + return self.seg.fitness() + self.box.fitness() + + @property + def ap_class_index(self): + """Boxes and masks have the same ap_class_index.""" + return self.box.ap_class_index + + @property + def results_dict(self): + """Returns results of object detection model for evaluation.""" + return dict(zip(self.keys + ['fitness'], self.mean_results() + [self.fitness])) + + +class PoseMetrics(SegmentMetrics): + """ + Calculates and aggregates detection and pose metrics over a given set of classes. + + Args: + save_dir (Path): Path to the directory where the output plots should be saved. Default is the current directory. + plot (bool): Whether to save the detection and segmentation plots. Default is False. + on_plot (func): An optional callback to pass plots path and data when they are rendered. Defaults to None. + names (list): List of class names. Default is an empty list. + + Attributes: + save_dir (Path): Path to the directory where the output plots should be saved. + plot (bool): Whether to save the detection and segmentation plots. + on_plot (func): An optional callback to pass plots path and data when they are rendered. + names (list): List of class names. + box (Metric): An instance of the Metric class to calculate box detection metrics. + pose (Metric): An instance of the Metric class to calculate mask segmentation metrics. + speed (dict): Dictionary to store the time taken in different phases of inference. + + Methods: + process(tp_m, tp_b, conf, pred_cls, target_cls): Processes metrics over the given set of predictions. + mean_results(): Returns the mean of the detection and segmentation metrics over all the classes. + class_result(i): Returns the detection and segmentation metrics of class `i`. + maps: Returns the mean Average Precision (mAP) scores for IoU thresholds ranging from 0.50 to 0.95. + fitness: Returns the fitness scores, which are a single weighted combination of metrics. + ap_class_index: Returns the list of indices of classes used to compute Average Precision (AP). + results_dict: Returns the dictionary containing all the detection and segmentation metrics and fitness score. + """ + + def __init__(self, save_dir=Path('.'), plot=False, on_plot=None, names=()) -> None: + super().__init__(save_dir, plot, names) + self.save_dir = save_dir + self.plot = plot + self.on_plot = on_plot + self.names = names + self.box = Metric() + self.pose = Metric() + self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + + def __getattr__(self, attr): + """Raises an AttributeError if an invalid attribute is accessed.""" + name = self.__class__.__name__ + raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") + + def process(self, tp_b, tp_p, conf, pred_cls, target_cls): + """ + Processes the detection and pose metrics over the given set of predictions. + + Args: + tp_b (list): List of True Positive boxes. + tp_p (list): List of True Positive keypoints. + conf (list): List of confidence scores. + pred_cls (list): List of predicted classes. + target_cls (list): List of target classes. + """ + + results_pose = ap_per_class(tp_p, + conf, + pred_cls, + target_cls, + plot=self.plot, + on_plot=self.on_plot, + save_dir=self.save_dir, + names=self.names, + prefix='Pose')[2:] + self.pose.nc = len(self.names) + self.pose.update(results_pose) + results_box = ap_per_class(tp_b, + conf, + pred_cls, + target_cls, + plot=self.plot, + on_plot=self.on_plot, + save_dir=self.save_dir, + names=self.names, + prefix='Box')[2:] + self.box.nc = len(self.names) + self.box.update(results_box) + + @property + def keys(self): + """Returns list of evaluation metric keys.""" + return [ + 'metrics/precision(B)', 'metrics/recall(B)', 'metrics/mAP50(B)', 'metrics/mAP50-95(B)', + 'metrics/precision(P)', 'metrics/recall(P)', 'metrics/mAP50(P)', 'metrics/mAP50-95(P)'] + + def mean_results(self): + """Return the mean results of box and pose.""" + return self.box.mean_results() + self.pose.mean_results() + + def class_result(self, i): + """Return the class-wise detection results for a specific class i.""" + return self.box.class_result(i) + self.pose.class_result(i) + + @property + def maps(self): + """Returns the mean average precision (mAP) per class for both box and pose detections.""" + return self.box.maps + self.pose.maps + + @property + def fitness(self): + """Computes classification metrics and speed using the `targets` and `pred` inputs.""" + return self.pose.fitness() + self.box.fitness() + + +class ClassifyMetrics(SimpleClass): + """ + Class for computing classification metrics including top-1 and top-5 accuracy. + + Attributes: + top1 (float): The top-1 accuracy. + top5 (float): The top-5 accuracy. + speed (Dict[str, float]): A dictionary containing the time taken for each step in the pipeline. + + Properties: + fitness (float): The fitness of the model, which is equal to top-5 accuracy. + results_dict (Dict[str, Union[float, str]]): A dictionary containing the classification metrics and fitness. + keys (List[str]): A list of keys for the results_dict. + + Methods: + process(targets, pred): Processes the targets and predictions to compute classification metrics. + """ + + def __init__(self) -> None: + self.top1 = 0 + self.top5 = 0 + self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + + def process(self, targets, pred): + """Target classes and predicted classes.""" + pred, targets = torch.cat(pred), torch.cat(targets) + correct = (targets[:, None] == pred).float() + acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy + self.top1, self.top5 = acc.mean(0).tolist() + + @property + def fitness(self): + """Returns top-5 accuracy as fitness score.""" + return self.top5 + + @property + def results_dict(self): + """Returns a dictionary with model's performance metrics and fitness score.""" + return dict(zip(self.keys + ['fitness'], [self.top1, self.top5, self.fitness])) + + @property + def keys(self): + """Returns a list of keys for the results_dict property.""" + return ['metrics/accuracy_top1', 'metrics/accuracy_top5'] diff --git a/ultralytics/yolo/utils/ops.py b/ultralytics/yolo/utils/ops.py new file mode 100644 index 0000000..bb9ca49 --- /dev/null +++ b/ultralytics/yolo/utils/ops.py @@ -0,0 +1,739 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import contextlib +import math +import re +import time + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +import torchvision + +from ultralytics.yolo.utils import LOGGER + +from .metrics import box_iou + + +class Profile(contextlib.ContextDecorator): + """ + YOLOv8 Profile class. + Usage: as a decorator with @Profile() or as a context manager with 'with Profile():' + """ + + def __init__(self, t=0.0): + """ + Initialize the Profile class. + + Args: + t (float): Initial time. Defaults to 0.0. + """ + self.t = t + self.cuda = torch.cuda.is_available() + + def __enter__(self): + """ + Start timing. + """ + self.start = self.time() + return self + + def __exit__(self, type, value, traceback): + """ + Stop timing. + """ + self.dt = self.time() - self.start # delta-time + self.t += self.dt # accumulate dt + + def time(self): + """ + Get current time. + """ + if self.cuda: + torch.cuda.synchronize() + return time.time() + + +def coco80_to_coco91_class(): # + """ + Converts 80-index (val2014) to 91-index (paper). + For details see https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/. + + Example: + a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + """ + return [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + + +def segment2box(segment, width=640, height=640): + """ + Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + + Args: + segment (torch.Tensor): the segment label + width (int): the width of the image. Defaults to 640 + height (int): The height of the image. Defaults to 640 + + Returns: + (np.ndarray): the minimum and maximum x and y values of the segment. + """ + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()], dtype=segment.dtype) if any(x) else np.zeros( + 4, dtype=segment.dtype) # xyxy + + +def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None, padding=True): + """ + Rescales bounding boxes (in the format of xyxy) from the shape of the image they were originally specified in + (img1_shape) to the shape of a different image (img0_shape). + + Args: + img1_shape (tuple): The shape of the image that the bounding boxes are for, in the format of (height, width). + boxes (torch.Tensor): the bounding boxes of the objects in the image, in the format of (x1, y1, x2, y2) + img0_shape (tuple): the shape of the target image, in the format of (height, width). + ratio_pad (tuple): a tuple of (ratio, pad) for scaling the boxes. If not provided, the ratio and pad will be + calculated based on the size difference between the two images. + padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular + rescaling. + + Returns: + boxes (torch.Tensor): The scaled bounding boxes, in the format of (x1, y1, x2, y2) + """ + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = round((img1_shape[1] - img0_shape[1] * gain) / 2 - 0.1), round( + (img1_shape[0] - img0_shape[0] * gain) / 2 - 0.1) # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + if padding: + boxes[..., [0, 2]] -= pad[0] # x padding + boxes[..., [1, 3]] -= pad[1] # y padding + boxes[..., :4] /= gain + clip_boxes(boxes, img0_shape) + return boxes + + +def make_divisible(x, divisor): + """ + Returns the nearest number that is divisible by the given divisor. + + Args: + x (int): The number to make divisible. + divisor (int | torch.Tensor): The divisor. + + Returns: + (int): The nearest number divisible by the divisor. + """ + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int + return math.ceil(x / divisor) * divisor + + +def non_max_suppression( + prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300, + nc=0, # number of classes (optional) + max_time_img=0.05, + max_nms=30000, + max_wh=7680, +): + """ + Perform non-maximum suppression (NMS) on a set of boxes, with support for masks and multiple labels per box. + + Arguments: + prediction (torch.Tensor): A tensor of shape (batch_size, num_classes + 4 + num_masks, num_boxes) + containing the predicted boxes, classes, and masks. The tensor should be in the format + output by a model, such as YOLO. + conf_thres (float): The confidence threshold below which boxes will be filtered out. + Valid values are between 0.0 and 1.0. + iou_thres (float): The IoU threshold below which boxes will be filtered out during NMS. + Valid values are between 0.0 and 1.0. + classes (List[int]): A list of class indices to consider. If None, all classes will be considered. + agnostic (bool): If True, the model is agnostic to the number of classes, and all + classes will be considered as one. + multi_label (bool): If True, each box may have multiple labels. + labels (List[List[Union[int, float, torch.Tensor]]]): A list of lists, where each inner + list contains the apriori labels for a given image. The list should be in the format + output by a dataloader, with each label being a tuple of (class_index, x1, y1, x2, y2). + max_det (int): The maximum number of boxes to keep after NMS. + nc (int, optional): The number of classes output by the model. Any indices after this will be considered masks. + max_time_img (float): The maximum time (seconds) for processing one image. + max_nms (int): The maximum number of boxes into torchvision.ops.nms(). + max_wh (int): The maximum box width and height in pixels + + Returns: + (List[torch.Tensor]): A list of length batch_size, where each element is a tensor of + shape (num_boxes, 6 + num_masks) containing the kept boxes, with columns + (x1, y1, x2, y2, confidence, class, mask1, mask2, ...). + """ + + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + if isinstance(prediction, (list, tuple)): # YOLOv8 model in validation model, output = (inference_out, loss_out) + prediction = prediction[0] # select only inference output + + device = prediction.device + mps = 'mps' in device.type # Apple MPS + if mps: # MPS not fully supported yet, convert tensors to CPU before NMS + prediction = prediction.cpu() + bs = prediction.shape[0] # batch size + nc = nc or (prediction.shape[1] - 4) # number of classes + nm = prediction.shape[1] - nc - 4 + mi = 4 + nc # mask start index + xc = prediction[:, 4:mi].amax(1) > conf_thres # candidates + + # Settings + # min_wh = 2 # (pixels) minimum box width and height + time_limit = 0.5 + max_time_img * bs # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + prediction = prediction.transpose(-1, -2) # shape(1,84,6300) to shape(1,6300,84) + prediction[..., :4] = xywh2xyxy(prediction[..., :4]) # xywh to xyxy + + t = time.time() + output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[:, 2:4] < min_wh) | (x[:, 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + lb = labels[xi] + v = torch.zeros((len(lb), nc + nm + 5), device=x.device) + v[:, :4] = lb[:, 1:5] # box + v[range(len(lb)), lb[:, 0].long() + 4] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Detections matrix nx6 (xyxy, conf, cls) + box, cls, mask = x.split((4, nc, nm), 1) + + if multi_label: + i, j = torch.where(cls > conf_thres) + x = torch.cat((box[i], x[i, 4 + j, None], j[:, None].float(), mask[i]), 1) + else: # best class only + conf, j = cls.max(1, keepdim=True) + x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + if n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + i = i[:max_det] # limit detections + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # Update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if mps: + output[xi] = output[xi].to(device) + if (time.time() - t) > time_limit: + LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') + break # time limit exceeded + + return output + + +def clip_boxes(boxes, shape): + """ + It takes a list of bounding boxes and a shape (height, width) and clips the bounding boxes to the + shape + + Args: + boxes (torch.Tensor): the bounding boxes to clip + shape (tuple): the shape of the image + """ + if isinstance(boxes, torch.Tensor): # faster individually + boxes[..., 0].clamp_(0, shape[1]) # x1 + boxes[..., 1].clamp_(0, shape[0]) # y1 + boxes[..., 2].clamp_(0, shape[1]) # x2 + boxes[..., 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 + + +def clip_coords(coords, shape): + """ + Clip line coordinates to the image boundaries. + + Args: + coords (torch.Tensor | numpy.ndarray): A list of line coordinates. + shape (tuple): A tuple of integers representing the size of the image in the format (height, width). + + Returns: + (None): The function modifies the input `coordinates` in place, by clipping each coordinate to the image boundaries. + """ + if isinstance(coords, torch.Tensor): # faster individually + coords[..., 0].clamp_(0, shape[1]) # x + coords[..., 1].clamp_(0, shape[0]) # y + else: # np.array (faster grouped) + coords[..., 0] = coords[..., 0].clip(0, shape[1]) # x + coords[..., 1] = coords[..., 1].clip(0, shape[0]) # y + + +def scale_image(masks, im0_shape, ratio_pad=None): + """ + Takes a mask, and resizes it to the original image size + + Args: + masks (torch.Tensor): resized and padded masks/images, [h, w, num]/[h, w, 3]. + im0_shape (tuple): the original image shape + ratio_pad (tuple): the ratio of the padding to the original image. + + Returns: + masks (torch.Tensor): The masks that are being returned. + """ + # Rescale coordinates (xyxy) from im1_shape to im0_shape + im1_shape = masks.shape + if im1_shape[:2] == im0_shape[:2]: + return masks + if ratio_pad is None: # calculate from im0_shape + gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new + pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) + + if len(masks.shape) < 2: + raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') + masks = masks[top:bottom, left:right] + # masks = masks.permute(2, 0, 1).contiguous() + # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] + # masks = masks.permute(1, 2, 0).contiguous() + masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) + if len(masks.shape) == 2: + masks = masks[:, :, None] + + return masks + + +def xyxy2xywh(x): + """ + Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height) format. + + Args: + x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format. + Returns: + y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height) format. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center + y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center + y[..., 2] = x[..., 2] - x[..., 0] # width + y[..., 3] = x[..., 3] - x[..., 1] # height + return y + + +def xywh2xyxy(x): + """ + Convert bounding box coordinates from (x, y, width, height) format to (x1, y1, x2, y2) format where (x1, y1) is the + top-left corner and (x2, y2) is the bottom-right corner. + + Args: + x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x, y, width, height) format. + Returns: + y (np.ndarray | torch.Tensor): The bounding box coordinates in (x1, y1, x2, y2) format. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x + y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y + y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x + y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + """ + Convert normalized bounding box coordinates to pixel coordinates. + + Args: + x (np.ndarray | torch.Tensor): The bounding box coordinates. + w (int): Width of the image. Defaults to 640 + h (int): Height of the image. Defaults to 640 + padw (int): Padding width. Defaults to 0 + padh (int): Padding height. Defaults to 0 + Returns: + y (np.ndarray | torch.Tensor): The coordinates of the bounding box in the format [x1, y1, x2, y2] where + x1,y1 is the top-left corner, x2,y2 is the bottom-right corner of the bounding box. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x + y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y + y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x + y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y + return y + + +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): + """ + Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height, normalized) format. + x, y, width and height are normalized to image dimensions + + Args: + x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format. + w (int): The width of the image. Defaults to 640 + h (int): The height of the image. Defaults to 640 + clip (bool): If True, the boxes will be clipped to the image boundaries. Defaults to False + eps (float): The minimum value of the box's width and height. Defaults to 0.0 + Returns: + y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height, normalized) format + """ + if clip: + clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center + y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center + y[..., 2] = (x[..., 2] - x[..., 0]) / w # width + y[..., 3] = (x[..., 3] - x[..., 1]) / h # height + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + """ + Convert normalized coordinates to pixel coordinates of shape (n,2) + + Args: + x (np.ndarray | torch.Tensor): The input tensor of normalized bounding box coordinates + w (int): The width of the image. Defaults to 640 + h (int): The height of the image. Defaults to 640 + padw (int): The width of the padding. Defaults to 0 + padh (int): The height of the padding. Defaults to 0 + Returns: + y (np.ndarray | torch.Tensor): The x and y coordinates of the top left corner of the bounding box + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = w * x[..., 0] + padw # top left x + y[..., 1] = h * x[..., 1] + padh # top left y + return y + + +def xywh2ltwh(x): + """ + Convert the bounding box format from [x, y, w, h] to [x1, y1, w, h], where x1, y1 are the top-left coordinates. + + Args: + x (np.ndarray | torch.Tensor): The input tensor with the bounding box coordinates in the xywh format + Returns: + y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + return y + + +def xyxy2ltwh(x): + """ + Convert nx4 bounding boxes from [x1, y1, x2, y2] to [x1, y1, w, h], where xy1=top-left, xy2=bottom-right + + Args: + x (np.ndarray | torch.Tensor): The input tensor with the bounding boxes coordinates in the xyxy format + Returns: + y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def ltwh2xywh(x): + """ + Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center + + Args: + x (torch.Tensor): the input tensor + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] + x[:, 2] / 2 # center x + y[:, 1] = x[:, 1] + x[:, 3] / 2 # center y + return y + + +def ltwh2xyxy(x): + """ + It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + + Args: + x (np.ndarray | torch.Tensor): the input image + + Returns: + y (np.ndarray | torch.Tensor): the xyxy coordinates of the bounding boxes. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 2] = x[:, 2] + x[:, 0] # width + y[:, 3] = x[:, 3] + x[:, 1] # height + return y + + +def segments2boxes(segments): + """ + It converts segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + + Args: + segments (list): list of segments, each segment is a list of points, each point is a list of x, y coordinates + + Returns: + (np.ndarray): the xywh coordinates of the bounding boxes. + """ + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + """ + Inputs a list of segments (n,2) and returns a list of segments (n,2) up-sampled to n points each. + + Args: + segments (list): a list of (n,2) arrays, where n is the number of points in the segment. + n (int): number of points to resample the segment to. Defaults to 1000 + + Returns: + segments (list): the resampled segments. + """ + for i, s in enumerate(segments): + s = np.concatenate((s, s[0:1, :]), axis=0) + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)], + dtype=np.float32).reshape(2, -1).T # segment xy + return segments + + +def crop_mask(masks, boxes): + """ + It takes a mask and a bounding box, and returns a mask that is cropped to the bounding box + + Args: + masks (torch.Tensor): [n, h, w] tensor of masks + boxes (torch.Tensor): [n, 4] tensor of bbox coordinates in relative point form + + Returns: + (torch.Tensor): The masks are being cropped to the bounding box. + """ + n, h, w = masks.shape + x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1) + r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,1,w) + c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(1,h,1) + + return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) + + +def process_mask_upsample(protos, masks_in, bboxes, shape): + """ + It takes the output of the mask head, and applies the mask to the bounding boxes. This produces masks of higher + quality but is slower. + + Args: + protos (torch.Tensor): [mask_dim, mask_h, mask_w] + masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms + bboxes (torch.Tensor): [n, 4], n is number of masks after nms + shape (tuple): the size of the input image (h,w) + + Returns: + (torch.Tensor): The upsampled masks. + """ + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + +def process_mask(protos, masks_in, bboxes, shape, upsample=False): + """ + Apply masks to bounding boxes using the output of the mask head. + + Args: + protos (torch.Tensor): A tensor of shape [mask_dim, mask_h, mask_w]. + masks_in (torch.Tensor): A tensor of shape [n, mask_dim], where n is the number of masks after NMS. + bboxes (torch.Tensor): A tensor of shape [n, 4], where n is the number of masks after NMS. + shape (tuple): A tuple of integers representing the size of the input image in the format (h, w). + upsample (bool): A flag to indicate whether to upsample the mask to the original image size. Default is False. + + Returns: + (torch.Tensor): A binary mask tensor of shape [n, h, w], where n is the number of masks after NMS, and h and w + are the height and width of the input image. The mask is applied to the bounding boxes. + """ + + c, mh, mw = protos.shape # CHW + ih, iw = shape + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW + + downsampled_bboxes = bboxes.clone() + downsampled_bboxes[:, 0] *= mw / iw + downsampled_bboxes[:, 2] *= mw / iw + downsampled_bboxes[:, 3] *= mh / ih + downsampled_bboxes[:, 1] *= mh / ih + + masks = crop_mask(masks, downsampled_bboxes) # CHW + if upsample: + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + return masks.gt_(0.5) + + +def process_mask_native(protos, masks_in, bboxes, shape): + """ + It takes the output of the mask head, and crops it after upsampling to the bounding boxes. + + Args: + protos (torch.Tensor): [mask_dim, mask_h, mask_w] + masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms + bboxes (torch.Tensor): [n, 4], n is number of masks after nms + shape (tuple): the size of the input image (h,w) + + Returns: + masks (torch.Tensor): The returned masks with dimensions [h, w, n] + """ + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + masks = scale_masks(masks[None], shape)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + +def scale_masks(masks, shape, padding=True): + """ + Rescale segment masks to shape. + + Args: + masks (torch.Tensor): (N, C, H, W). + shape (tuple): Height and width. + padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular + rescaling. + """ + mh, mw = masks.shape[2:] + gain = min(mh / shape[0], mw / shape[1]) # gain = old / new + pad = [mw - shape[1] * gain, mh - shape[0] * gain] # wh padding + if padding: + pad[0] /= 2 + pad[1] /= 2 + top, left = (int(pad[1]), int(pad[0])) if padding else (0, 0) # y, x + bottom, right = (int(mh - pad[1]), int(mw - pad[0])) + masks = masks[..., top:bottom, left:right] + + masks = F.interpolate(masks, shape, mode='bilinear', align_corners=False) # NCHW + return masks + + +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize=False, padding=True): + """ + Rescale segment coordinates (xyxy) from img1_shape to img0_shape + + Args: + img1_shape (tuple): The shape of the image that the coords are from. + coords (torch.Tensor): the coords to be scaled + img0_shape (tuple): the shape of the image that the segmentation is being applied to + ratio_pad (tuple): the ratio of the image size to the padded image size. + normalize (bool): If True, the coordinates will be normalized to the range [0, 1]. Defaults to False + padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular + rescaling. + + Returns: + coords (torch.Tensor): the segmented image. + """ + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + if padding: + coords[..., 0] -= pad[0] # x padding + coords[..., 1] -= pad[1] # y padding + coords[..., 0] /= gain + coords[..., 1] /= gain + clip_coords(coords, img0_shape) + if normalize: + coords[..., 0] /= img0_shape[1] # width + coords[..., 1] /= img0_shape[0] # height + return coords + + +def masks2segments(masks, strategy='largest'): + """ + It takes a list of masks(n,h,w) and returns a list of segments(n,xy) + + Args: + masks (torch.Tensor): the output of the model, which is a tensor of shape (batch_size, 160, 160) + strategy (str): 'concat' or 'largest'. Defaults to largest + + Returns: + segments (List): list of segment masks + """ + segments = [] + for x in masks.int().cpu().numpy().astype('uint8'): + c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] + if c: + if strategy == 'concat': # concatenate all segments + c = np.concatenate([x.reshape(-1, 2) for x in c]) + elif strategy == 'largest': # select largest segment + c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + else: + c = np.zeros((0, 2)) # no segments found + segments.append(c.astype('float32')) + return segments + + +def clean_str(s): + """ + Cleans a string by replacing special characters with underscore _ + + Args: + s (str): a string needing special characters replaced + + Returns: + (str): a string with special characters replaced by an underscore _ + """ + return re.sub(pattern='[|@#!¡·$€%&()=?¿^*;:,¨´><+]', repl='_', string=s) diff --git a/ultralytics/yolo/utils/patches.py b/ultralytics/yolo/utils/patches.py new file mode 100644 index 0000000..4cbebd0 --- /dev/null +++ b/ultralytics/yolo/utils/patches.py @@ -0,0 +1,45 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +""" +Monkey patches to update/extend functionality of existing functions +""" + +from pathlib import Path + +import cv2 +import numpy as np +import torch + +# OpenCV Multilanguage-friendly functions ------------------------------------------------------------------------------ +_imshow = cv2.imshow # copy to avoid recursion errors + + +def imread(filename, flags=cv2.IMREAD_COLOR): + return cv2.imdecode(np.fromfile(filename, np.uint8), flags) + + +def imwrite(filename, img): + try: + cv2.imencode(Path(filename).suffix, img)[1].tofile(filename) + return True + except Exception: + return False + + +def imshow(path, im): + _imshow(path.encode('unicode_escape').decode(), im) + + +# PyTorch functions ---------------------------------------------------------------------------------------------------- +_torch_save = torch.save # copy to avoid recursion errors + + +def torch_save(*args, **kwargs): + """Use dill (if exists) to serialize the lambda functions where pickle does not do this.""" + try: + import dill as pickle + except ImportError: + import pickle + + if 'pickle_module' not in kwargs: + kwargs['pickle_module'] = pickle + return _torch_save(*args, **kwargs) diff --git a/ultralytics/yolo/utils/plotting.py b/ultralytics/yolo/utils/plotting.py new file mode 100644 index 0000000..0236a79 --- /dev/null +++ b/ultralytics/yolo/utils/plotting.py @@ -0,0 +1,527 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import contextlib +import math +import warnings +from pathlib import Path + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import torch +from PIL import Image, ImageDraw, ImageFont +from PIL import __version__ as pil_version +from scipy.ndimage import gaussian_filter1d + +from ultralytics.yolo.utils import LOGGER, TryExcept, plt_settings, threaded + +from .checks import check_font, check_version, is_ascii +from .files import increment_path +from .ops import clip_boxes, scale_image, xywh2xyxy, xyxy2xywh + + +class Colors: + """Ultralytics color palette https://ultralytics.com/.""" + + def __init__(self): + """Initialize colors as hex = matplotlib.colors.TABLEAU_COLORS.values().""" + hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', + '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') + self.palette = [self.hex2rgb(f'#{c}') for c in hexs] + self.n = len(self.palette) + self.pose_palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102], [230, 230, 0], [255, 153, 255], + [153, 204, 255], [255, 102, 255], [255, 51, 255], [102, 178, 255], [51, 153, 255], + [255, 153, 153], [255, 102, 102], [255, 51, 51], [153, 255, 153], [102, 255, 102], + [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0], [255, 255, 255]], + dtype=np.uint8) + + def __call__(self, i, bgr=False): + """Converts hex color codes to rgb values.""" + c = self.palette[int(i) % self.n] + return (c[2], c[1], c[0]) if bgr else c + + @staticmethod + def hex2rgb(h): # rgb order (PIL) + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + +colors = Colors() # create instance for 'from utils.plots import colors' + + +class Annotator: + """YOLOv8 Annotator for train/val mosaics and jpgs and detect/hub inference annotations.""" + + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): + """Initialize the Annotator class with image and line width along with color palette for keypoints and limbs.""" + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' + non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic + self.pil = pil or non_ascii + if self.pil: # use PIL + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + try: + font = check_font('Arial.Unicode.ttf' if non_ascii else font) + size = font_size or max(round(sum(self.im.size) / 2 * 0.035), 12) + self.font = ImageFont.truetype(str(font), size) + except Exception: + self.font = ImageFont.load_default() + # Deprecation fix for w, h = getsize(string) -> _, _, w, h = getbox(string) + if check_version(pil_version, '9.2.0'): + self.font.getsize = lambda x: self.font.getbbox(x)[2:4] # text width, height + else: # use cv2 + self.im = im + self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width + # Pose + self.skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], + [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] + + self.limb_color = colors.pose_palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]] + self.kpt_color = colors.pose_palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]] + + def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + """Add one xyxy box to image with label.""" + if isinstance(box, torch.Tensor): + box = box.tolist() + if self.pil or not is_ascii(label): + self.draw.rectangle(box, width=self.lw, outline=color) # box + if label: + w, h = self.font.getsize(label) # text width, height + outside = box[1] - h >= 0 # label fits outside box + self.draw.rectangle( + (box[0], box[1] - h if outside else box[1], box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1), + fill=color, + ) + # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 + self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) + else: # cv2 + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if label: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = p1[1] - h >= 3 + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, + label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), + 0, + self.lw / 3, + txt_color, + thickness=tf, + lineType=cv2.LINE_AA) + + def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False): + """Plot masks at once. + Args: + masks (tensor): predicted masks on cuda, shape: [n, h, w] + colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] + im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] + alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque + """ + if self.pil: + # Convert to numpy first + self.im = np.asarray(self.im).copy() + if len(masks) == 0: + self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 + if im_gpu.device != masks.device: + im_gpu = im_gpu.to(masks.device) + colors = torch.tensor(colors, device=masks.device, dtype=torch.float32) / 255.0 # shape(n,3) + colors = colors[:, None, None] # shape(n,1,1,3) + masks = masks.unsqueeze(3) # shape(n,h,w,1) + masks_color = masks * (colors * alpha) # shape(n,h,w,3) + + inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) + mcs = masks_color.max(dim=0).values # shape(n,h,w,3) + + im_gpu = im_gpu.flip(dims=[0]) # flip channel + im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) + im_gpu = im_gpu * inv_alph_masks[-1] + mcs + im_mask = (im_gpu * 255) + im_mask_np = im_mask.byte().cpu().numpy() + self.im[:] = im_mask_np if retina_masks else scale_image(im_mask_np, self.im.shape) + if self.pil: + # Convert im back to PIL and update draw + self.fromarray(self.im) + + def kpts(self, kpts, shape=(640, 640), radius=5, kpt_line=True): + """Plot keypoints on the image. + + Args: + kpts (tensor): Predicted keypoints with shape [17, 3]. Each keypoint has (x, y, confidence). + shape (tuple): Image shape as a tuple (h, w), where h is the height and w is the width. + radius (int, optional): Radius of the drawn keypoints. Default is 5. + kpt_line (bool, optional): If True, the function will draw lines connecting keypoints + for human pose. Default is True. + + Note: `kpt_line=True` currently only supports human pose plotting. + """ + if self.pil: + # Convert to numpy first + self.im = np.asarray(self.im).copy() + nkpt, ndim = kpts.shape + is_pose = nkpt == 17 and ndim == 3 + kpt_line &= is_pose # `kpt_line=True` for now only supports human pose plotting + for i, k in enumerate(kpts): + color_k = [int(x) for x in self.kpt_color[i]] if is_pose else colors(i) + x_coord, y_coord = k[0], k[1] + if x_coord % shape[1] != 0 and y_coord % shape[0] != 0: + if len(k) == 3: + conf = k[2] + if conf < 0.5: + continue + cv2.circle(self.im, (int(x_coord), int(y_coord)), radius, color_k, -1, lineType=cv2.LINE_AA) + + if kpt_line: + ndim = kpts.shape[-1] + for i, sk in enumerate(self.skeleton): + pos1 = (int(kpts[(sk[0] - 1), 0]), int(kpts[(sk[0] - 1), 1])) + pos2 = (int(kpts[(sk[1] - 1), 0]), int(kpts[(sk[1] - 1), 1])) + if ndim == 3: + conf1 = kpts[(sk[0] - 1), 2] + conf2 = kpts[(sk[1] - 1), 2] + if conf1 < 0.5 or conf2 < 0.5: + continue + if pos1[0] % shape[1] == 0 or pos1[1] % shape[0] == 0 or pos1[0] < 0 or pos1[1] < 0: + continue + if pos2[0] % shape[1] == 0 or pos2[1] % shape[0] == 0 or pos2[0] < 0 or pos2[1] < 0: + continue + cv2.line(self.im, pos1, pos2, [int(x) for x in self.limb_color[i]], thickness=2, lineType=cv2.LINE_AA) + if self.pil: + # Convert im back to PIL and update draw + self.fromarray(self.im) + + def rectangle(self, xy, fill=None, outline=None, width=1): + """Add rectangle to image (PIL-only).""" + self.draw.rectangle(xy, fill, outline, width) + + def text(self, xy, text, txt_color=(255, 255, 255), anchor='top', box_style=False): + """Adds text to an image using PIL or cv2.""" + if anchor == 'bottom': # start y from font bottom + w, h = self.font.getsize(text) # text width, height + xy[1] += 1 - h + if self.pil: + if box_style: + w, h = self.font.getsize(text) + self.draw.rectangle((xy[0], xy[1], xy[0] + w + 1, xy[1] + h + 1), fill=txt_color) + # Using `txt_color` for background and draw fg with white color + txt_color = (255, 255, 255) + if '\n' in text: + lines = text.split('\n') + _, h = self.font.getsize(text) + for line in lines: + self.draw.text(xy, line, fill=txt_color, font=self.font) + xy[1] += h + else: + self.draw.text(xy, text, fill=txt_color, font=self.font) + else: + if box_style: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(text, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = xy[1] - h >= 3 + p2 = xy[0] + w, xy[1] - h - 3 if outside else xy[1] + h + 3 + cv2.rectangle(self.im, xy, p2, txt_color, -1, cv2.LINE_AA) # filled + # Using `txt_color` for background and draw fg with white color + txt_color = (255, 255, 255) + tf = max(self.lw - 1, 1) # font thickness + cv2.putText(self.im, text, xy, 0, self.lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA) + + def fromarray(self, im): + """Update self.im from a numpy array.""" + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + + def result(self): + """Return annotated image as array.""" + return np.asarray(self.im) + + +@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 +@plt_settings() +def plot_labels(boxes, cls, names=(), save_dir=Path(''), on_plot=None): + """Save and plot image with no axis or spines.""" + import pandas as pd + import seaborn as sn + + # Filter matplotlib>=3.7.2 warning + warnings.filterwarnings('ignore', category=UserWarning, message='The figure layout has changed to tight') + + # Plot dataset labels + LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") + b = boxes.transpose() # classes, boxes + nc = int(cls.max() + 1) # number of classes + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # Seaborn correlogram + sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # Matplotlib labels + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + y = ax[0].hist(cls, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + with contextlib.suppress(Exception): # color histogram bars by class + [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # Rectangles + boxes[:, 0:2] = 0.5 # center + boxes = xywh2xyxy(boxes) * 1000 + img = Image.fromarray(np.ones((1000, 1000, 3), dtype=np.uint8) * 255) + for cls, box in zip(cls[:500], boxes[:500]): + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + fname = save_dir / 'labels.jpg' + plt.savefig(fname, dpi=200) + plt.close() + if on_plot: + on_plot(fname) + + +def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): + """Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop.""" + b = xyxy2xywh(xyxy.view(-1, 4)) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_boxes(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + file.parent.mkdir(parents=True, exist_ok=True) # make directory + f = str(increment_path(file).with_suffix('.jpg')) + # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue + Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB + return crop + + +@threaded +def plot_images(images, + batch_idx, + cls, + bboxes=np.zeros(0, dtype=np.float32), + masks=np.zeros(0, dtype=np.uint8), + kpts=np.zeros((0, 51), dtype=np.float32), + paths=None, + fname='images.jpg', + names=None, + on_plot=None): + """Plot image grid with labels.""" + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(cls, torch.Tensor): + cls = cls.cpu().numpy() + if isinstance(bboxes, torch.Tensor): + bboxes = bboxes.cpu().numpy() + if isinstance(masks, torch.Tensor): + masks = masks.cpu().numpy().astype(int) + if isinstance(kpts, torch.Tensor): + kpts = kpts.cpu().numpy() + if isinstance(batch_idx, torch.Tensor): + batch_idx = batch_idx.cpu().numpy() + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(cls) > 0: + idx = batch_idx == i + classes = cls[idx].astype('int') + + if len(bboxes): + boxes = xywh2xyxy(bboxes[idx, :4]).T + labels = bboxes.shape[1] == 4 # labels if no conf column + conf = None if labels else bboxes[idx, 4] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + c = classes[j] + color = colors(c) + c = names.get(c, c) if names else c + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{c}' if labels else f'{c} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + elif len(classes): + for c in classes: + color = colors(c) + c = names.get(c, c) if names else c + annotator.text((x, y), f'{c}', txt_color=color, box_style=True) + + # Plot keypoints + if len(kpts): + kpts_ = kpts[idx].copy() + if len(kpts_): + if kpts_[..., 0].max() <= 1.01 or kpts_[..., 1].max() <= 1.01: # if normalized with tolerance .01 + kpts_[..., 0] *= w # scale to pixels + kpts_[..., 1] *= h + elif scale < 1: # absolute coords need scale if image scales + kpts_ *= scale + kpts_[..., 0] += x + kpts_[..., 1] += y + for j in range(len(kpts_)): + if labels or conf[j] > 0.25: # 0.25 conf thresh + annotator.kpts(kpts_[j]) + + # Plot masks + if len(masks): + if idx.shape[0] == masks.shape[0]: # overlap_masks=False + image_masks = masks[idx] + else: # overlap_masks=True + image_masks = masks[[i]] # (1, 640, 640) + nl = idx.sum() + index = np.arange(nl).reshape((nl, 1, 1)) + 1 + image_masks = np.repeat(image_masks, nl, axis=0) + image_masks = np.where(image_masks == index, 1.0, 0.0) + + im = np.asarray(annotator.im).copy() + for j, box in enumerate(boxes.T.tolist()): + if labels or conf[j] > 0.25: # 0.25 conf thresh + color = colors(classes[j]) + mh, mw = image_masks[j].shape + if mh != h or mw != w: + mask = image_masks[j].astype(np.uint8) + mask = cv2.resize(mask, (w, h)) + mask = mask.astype(bool) + else: + mask = image_masks[j].astype(bool) + with contextlib.suppress(Exception): + im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 + annotator.fromarray(im) + annotator.im.save(fname) # save + if on_plot: + on_plot(fname) + + +@plt_settings() +def plot_results(file='path/to/results.csv', dir='', segment=False, pose=False, classify=False, on_plot=None): + """Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv').""" + import pandas as pd + save_dir = Path(file).parent if file else Path(dir) + if classify: + fig, ax = plt.subplots(2, 2, figsize=(6, 6), tight_layout=True) + index = [1, 4, 2, 3] + elif segment: + fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) + index = [1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12] + elif pose: + fig, ax = plt.subplots(2, 9, figsize=(21, 6), tight_layout=True) + index = [1, 2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 17, 18, 8, 9, 12, 13] + else: + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + index = [1, 2, 3, 4, 5, 8, 9, 10, 6, 7] + ax = ax.ravel() + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + for f in files: + try: + data = pd.read_csv(f) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate(index): + y = data.values[:, j].astype('float') + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) # actual results + ax[i].plot(x, gaussian_filter1d(y, sigma=3), ':', label='smooth', linewidth=2) # smoothing line + ax[i].set_title(s[j], fontsize=12) + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + LOGGER.warning(f'WARNING: Plotting error for {f}: {e}') + ax[1].legend() + fname = save_dir / 'results.png' + fig.savefig(fname, dpi=200) + plt.close() + if on_plot: + on_plot(fname) + + +def output_to_target(output, max_det=300): + """Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting.""" + targets = [] + for i, o in enumerate(output): + box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1) + j = torch.full((conf.shape[0], 1), i) + targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1)) + targets = torch.cat(targets, 0).numpy() + return targets[:, 0], targets[:, 1], targets[:, 2:] + + +def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): + """ + Visualize feature maps of a given model module during inference. + + Args: + x (torch.Tensor): Features to be visualized. + module_type (str): Module type. + stage (int): Module stage within the model. + n (int, optional): Maximum number of feature maps to plot. Defaults to 32. + save_dir (Path, optional): Directory to save results. Defaults to Path('runs/detect/exp'). + """ + for m in ['Detect', 'Pose', 'Segment']: + if m in module_type: + return + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + + blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels + n = min(n, channels) # number of plots + fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + ax = ax.ravel() + plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze()) # cmap='gray' + ax[i].axis('off') + + LOGGER.info(f'Saving {f}... ({n}/{channels})') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save diff --git a/ultralytics/yolo/utils/tal.py b/ultralytics/yolo/utils/tal.py new file mode 100644 index 0000000..aea8918 --- /dev/null +++ b/ultralytics/yolo/utils/tal.py @@ -0,0 +1,276 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch +import torch.nn as nn + +from .checks import check_version +from .metrics import bbox_iou + +TORCH_1_10 = check_version(torch.__version__, '1.10.0') + + +def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9): + """select the positive anchor center in gt + + Args: + xy_centers (Tensor): shape(h*w, 4) + gt_bboxes (Tensor): shape(b, n_boxes, 4) + Return: + (Tensor): shape(b, n_boxes, h*w) + """ + n_anchors = xy_centers.shape[0] + bs, n_boxes, _ = gt_bboxes.shape + lt, rb = gt_bboxes.view(-1, 1, 4).chunk(2, 2) # left-top, right-bottom + bbox_deltas = torch.cat((xy_centers[None] - lt, rb - xy_centers[None]), dim=2).view(bs, n_boxes, n_anchors, -1) + # return (bbox_deltas.min(3)[0] > eps).to(gt_bboxes.dtype) + return bbox_deltas.amin(3).gt_(eps) + + +def select_highest_overlaps(mask_pos, overlaps, n_max_boxes): + """if an anchor box is assigned to multiple gts, + the one with the highest iou will be selected. + + Args: + mask_pos (Tensor): shape(b, n_max_boxes, h*w) + overlaps (Tensor): shape(b, n_max_boxes, h*w) + Return: + target_gt_idx (Tensor): shape(b, h*w) + fg_mask (Tensor): shape(b, h*w) + mask_pos (Tensor): shape(b, n_max_boxes, h*w) + """ + # (b, n_max_boxes, h*w) -> (b, h*w) + fg_mask = mask_pos.sum(-2) + if fg_mask.max() > 1: # one anchor is assigned to multiple gt_bboxes + mask_multi_gts = (fg_mask.unsqueeze(1) > 1).expand(-1, n_max_boxes, -1) # (b, n_max_boxes, h*w) + max_overlaps_idx = overlaps.argmax(1) # (b, h*w) + + is_max_overlaps = torch.zeros(mask_pos.shape, dtype=mask_pos.dtype, device=mask_pos.device) + is_max_overlaps.scatter_(1, max_overlaps_idx.unsqueeze(1), 1) + + mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos).float() # (b, n_max_boxes, h*w) + fg_mask = mask_pos.sum(-2) + # Find each grid serve which gt(index) + target_gt_idx = mask_pos.argmax(-2) # (b, h*w) + return target_gt_idx, fg_mask, mask_pos + + +class TaskAlignedAssigner(nn.Module): + """ + A task-aligned assigner for object detection. + + This class assigns ground-truth (gt) objects to anchors based on the task-aligned metric, + which combines both classification and localization information. + + Attributes: + topk (int): The number of top candidates to consider. + num_classes (int): The number of object classes. + alpha (float): The alpha parameter for the classification component of the task-aligned metric. + beta (float): The beta parameter for the localization component of the task-aligned metric. + eps (float): A small value to prevent division by zero. + """ + + def __init__(self, topk=13, num_classes=80, alpha=1.0, beta=6.0, eps=1e-9): + """Initialize a TaskAlignedAssigner object with customizable hyperparameters.""" + super().__init__() + self.topk = topk + self.num_classes = num_classes + self.bg_idx = num_classes + self.alpha = alpha + self.beta = beta + self.eps = eps + + @torch.no_grad() + def forward(self, pd_scores, pd_bboxes, anc_points, gt_labels, gt_bboxes, mask_gt): + """ + Compute the task-aligned assignment. + Reference https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py + + Args: + pd_scores (Tensor): shape(bs, num_total_anchors, num_classes) + pd_bboxes (Tensor): shape(bs, num_total_anchors, 4) + anc_points (Tensor): shape(num_total_anchors, 2) + gt_labels (Tensor): shape(bs, n_max_boxes, 1) + gt_bboxes (Tensor): shape(bs, n_max_boxes, 4) + mask_gt (Tensor): shape(bs, n_max_boxes, 1) + + Returns: + target_labels (Tensor): shape(bs, num_total_anchors) + target_bboxes (Tensor): shape(bs, num_total_anchors, 4) + target_scores (Tensor): shape(bs, num_total_anchors, num_classes) + fg_mask (Tensor): shape(bs, num_total_anchors) + target_gt_idx (Tensor): shape(bs, num_total_anchors) + """ + self.bs = pd_scores.size(0) + self.n_max_boxes = gt_bboxes.size(1) + + if self.n_max_boxes == 0: + device = gt_bboxes.device + return (torch.full_like(pd_scores[..., 0], self.bg_idx).to(device), torch.zeros_like(pd_bboxes).to(device), + torch.zeros_like(pd_scores).to(device), torch.zeros_like(pd_scores[..., 0]).to(device), + torch.zeros_like(pd_scores[..., 0]).to(device)) + + mask_pos, align_metric, overlaps = self.get_pos_mask(pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, + mask_gt) + + target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(mask_pos, overlaps, self.n_max_boxes) + + # Assigned target + target_labels, target_bboxes, target_scores = self.get_targets(gt_labels, gt_bboxes, target_gt_idx, fg_mask) + + # Normalize + align_metric *= mask_pos + pos_align_metrics = align_metric.amax(axis=-1, keepdim=True) # b, max_num_obj + pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True) # b, max_num_obj + norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1) + target_scores = target_scores * norm_align_metric + + return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx + + def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt): + """Get in_gts mask, (b, max_num_obj, h*w).""" + mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes) + # Get anchor_align metric, (b, max_num_obj, h*w) + align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_in_gts * mask_gt) + # Get topk_metric mask, (b, max_num_obj, h*w) + mask_topk = self.select_topk_candidates(align_metric, topk_mask=mask_gt.expand(-1, -1, self.topk).bool()) + # Merge all mask to a final mask, (b, max_num_obj, h*w) + mask_pos = mask_topk * mask_in_gts * mask_gt + + return mask_pos, align_metric, overlaps + + def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_gt): + """Compute alignment metric given predicted and ground truth bounding boxes.""" + na = pd_bboxes.shape[-2] + mask_gt = mask_gt.bool() # b, max_num_obj, h*w + overlaps = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_bboxes.dtype, device=pd_bboxes.device) + bbox_scores = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_scores.dtype, device=pd_scores.device) + + ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long) # 2, b, max_num_obj + ind[0] = torch.arange(end=self.bs).view(-1, 1).expand(-1, self.n_max_boxes) # b, max_num_obj + ind[1] = gt_labels.squeeze(-1) # b, max_num_obj + # Get the scores of each grid for each gt cls + bbox_scores[mask_gt] = pd_scores[ind[0], :, ind[1]][mask_gt] # b, max_num_obj, h*w + + # (b, max_num_obj, 1, 4), (b, 1, h*w, 4) + pd_boxes = pd_bboxes.unsqueeze(1).expand(-1, self.n_max_boxes, -1, -1)[mask_gt] + gt_boxes = gt_bboxes.unsqueeze(2).expand(-1, -1, na, -1)[mask_gt] + overlaps[mask_gt] = bbox_iou(gt_boxes, pd_boxes, xywh=False, CIoU=True).squeeze(-1).clamp_(0) + + align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta) + return align_metric, overlaps + + def select_topk_candidates(self, metrics, largest=True, topk_mask=None): + """ + Select the top-k candidates based on the given metrics. + + Args: + metrics (Tensor): A tensor of shape (b, max_num_obj, h*w), where b is the batch size, + max_num_obj is the maximum number of objects, and h*w represents the + total number of anchor points. + largest (bool): If True, select the largest values; otherwise, select the smallest values. + topk_mask (Tensor): An optional boolean tensor of shape (b, max_num_obj, topk), where + topk is the number of top candidates to consider. If not provided, + the top-k values are automatically computed based on the given metrics. + + Returns: + (Tensor): A tensor of shape (b, max_num_obj, h*w) containing the selected top-k candidates. + """ + + # (b, max_num_obj, topk) + topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest) + if topk_mask is None: + topk_mask = (topk_metrics.max(-1, keepdim=True)[0] > self.eps).expand_as(topk_idxs) + # (b, max_num_obj, topk) + topk_idxs.masked_fill_(~topk_mask, 0) + + # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w) + count_tensor = torch.zeros(metrics.shape, dtype=torch.int8, device=topk_idxs.device) + ones = torch.ones_like(topk_idxs[:, :, :1], dtype=torch.int8, device=topk_idxs.device) + for k in range(self.topk): + # Expand topk_idxs for each value of k and add 1 at the specified positions + count_tensor.scatter_add_(-1, topk_idxs[:, :, k:k + 1], ones) + # count_tensor.scatter_add_(-1, topk_idxs, torch.ones_like(topk_idxs, dtype=torch.int8, device=topk_idxs.device)) + # filter invalid bboxes + count_tensor.masked_fill_(count_tensor > 1, 0) + + return count_tensor.to(metrics.dtype) + + def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask): + """ + Compute target labels, target bounding boxes, and target scores for the positive anchor points. + + Args: + gt_labels (Tensor): Ground truth labels of shape (b, max_num_obj, 1), where b is the + batch size and max_num_obj is the maximum number of objects. + gt_bboxes (Tensor): Ground truth bounding boxes of shape (b, max_num_obj, 4). + target_gt_idx (Tensor): Indices of the assigned ground truth objects for positive + anchor points, with shape (b, h*w), where h*w is the total + number of anchor points. + fg_mask (Tensor): A boolean tensor of shape (b, h*w) indicating the positive + (foreground) anchor points. + + Returns: + (Tuple[Tensor, Tensor, Tensor]): A tuple containing the following tensors: + - target_labels (Tensor): Shape (b, h*w), containing the target labels for + positive anchor points. + - target_bboxes (Tensor): Shape (b, h*w, 4), containing the target bounding boxes + for positive anchor points. + - target_scores (Tensor): Shape (b, h*w, num_classes), containing the target scores + for positive anchor points, where num_classes is the number + of object classes. + """ + + # Assigned target labels, (b, 1) + batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None] + target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes # (b, h*w) + target_labels = gt_labels.long().flatten()[target_gt_idx] # (b, h*w) + + # Assigned target boxes, (b, max_num_obj, 4) -> (b, h*w) + target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx] + + # Assigned target scores + target_labels.clamp_(0) + + # 10x faster than F.one_hot() + target_scores = torch.zeros((target_labels.shape[0], target_labels.shape[1], self.num_classes), + dtype=torch.int64, + device=target_labels.device) # (b, h*w, 80) + target_scores.scatter_(2, target_labels.unsqueeze(-1), 1) + + fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes) # (b, h*w, 80) + target_scores = torch.where(fg_scores_mask > 0, target_scores, 0) + + return target_labels, target_bboxes, target_scores + + +def make_anchors(feats, strides, grid_cell_offset=0.5): + """Generate anchors from features.""" + anchor_points, stride_tensor = [], [] + assert feats is not None + dtype, device = feats[0].dtype, feats[0].device + for i, stride in enumerate(strides): + _, _, h, w = feats[i].shape + sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset # shift x + sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset # shift y + sy, sx = torch.meshgrid(sy, sx, indexing='ij') if TORCH_1_10 else torch.meshgrid(sy, sx) + anchor_points.append(torch.stack((sx, sy), -1).view(-1, 2)) + stride_tensor.append(torch.full((h * w, 1), stride, dtype=dtype, device=device)) + return torch.cat(anchor_points), torch.cat(stride_tensor) + + +def dist2bbox(distance, anchor_points, xywh=True, dim=-1): + """Transform distance(ltrb) to box(xywh or xyxy).""" + lt, rb = distance.chunk(2, dim) + x1y1 = anchor_points - lt + x2y2 = anchor_points + rb + if xywh: + c_xy = (x1y1 + x2y2) / 2 + wh = x2y2 - x1y1 + return torch.cat((c_xy, wh), dim) # xywh bbox + return torch.cat((x1y1, x2y2), dim) # xyxy bbox + + +def bbox2dist(anchor_points, bbox, reg_max): + """Transform bbox(xyxy) to dist(ltrb).""" + x1y1, x2y2 = bbox.chunk(2, -1) + return torch.cat((anchor_points - x1y1, x2y2 - anchor_points), -1).clamp_(0, reg_max - 0.01) # dist (lt, rb) diff --git a/ultralytics/yolo/utils/torch_utils.py b/ultralytics/yolo/utils/torch_utils.py new file mode 100644 index 0000000..0ab1f4e --- /dev/null +++ b/ultralytics/yolo/utils/torch_utils.py @@ -0,0 +1,512 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import math +import os +import platform +import random +import time +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path +from typing import Union + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +from ultralytics.yolo.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, __version__ +from ultralytics.yolo.utils.checks import check_requirements, check_version + +try: + import thop +except ImportError: + thop = None + +TORCHVISION_0_10 = check_version(torchvision.__version__, '0.10.0') +TORCH_1_9 = check_version(torch.__version__, '1.9.0') +TORCH_1_11 = check_version(torch.__version__, '1.11.0') +TORCH_1_12 = check_version(torch.__version__, '1.12.0') +TORCH_2_0 = check_version(torch.__version__, minimum='2.0') + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + """Decorator to make all processes in distributed training wait for each local_master to do something.""" + initialized = torch.distributed.is_available() and torch.distributed.is_initialized() + if initialized and local_rank not in (-1, 0): + dist.barrier(device_ids=[local_rank]) + yield + if initialized and local_rank == 0: + dist.barrier(device_ids=[0]) + + +def smart_inference_mode(): + """Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator.""" + + def decorate(fn): + """Applies appropriate torch decorator for inference mode based on torch version.""" + return (torch.inference_mode if TORCH_1_9 else torch.no_grad)()(fn) + + return decorate + + +def get_cpu_info(): + """Return a string with system CPU information, i.e. 'Apple M2'.""" + check_requirements('py-cpuinfo') + import cpuinfo # noqa + return cpuinfo.get_cpu_info()['brand_raw'].replace('(R)', '').replace('CPU ', '').replace('@ ', '') + + +def select_device(device='', batch=0, newline=False, verbose=True): + """Selects PyTorch Device. Options are device = None or 'cpu' or 0 or '0' or '0,1,2,3'.""" + s = f'Ultralytics YOLOv{__version__} 🚀 Python-{platform.python_version()} torch-{torch.__version__} ' + device = str(device).lower() + for remove in 'cuda:', 'none', '(', ')', '[', ']', "'", ' ': + device = device.replace(remove, '') # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1' + cpu = device == 'cpu' + mps = device == 'mps' # Apple Metal Performance Shaders (MPS) + if cpu or mps: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + if device == 'cuda': + device = '0' + visible = os.environ.get('CUDA_VISIBLE_DEVICES', None) + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() + if not (torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', ''))): + LOGGER.info(s) + install = 'See https://pytorch.org/get-started/locally/ for up-to-date torch install instructions if no ' \ + 'CUDA devices are seen by torch.\n' if torch.cuda.device_count() == 0 else '' + raise ValueError(f"Invalid CUDA 'device={device}' requested." + f" Use 'device=cpu' or pass valid CUDA device(s) if available," + f" i.e. 'device=0' or 'device=0,1,2,3' for Multi-GPU.\n" + f'\ntorch.cuda.is_available(): {torch.cuda.is_available()}' + f'\ntorch.cuda.device_count(): {torch.cuda.device_count()}' + f"\nos.environ['CUDA_VISIBLE_DEVICES']: {visible}\n" + f'{install}') + + if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available + devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 + n = len(devices) # device count + if n > 1 and batch > 0 and batch % n != 0: # check batch_size is divisible by device_count + raise ValueError(f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or " + f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}.") + space = ' ' * (len(s) + 1) + for i, d in enumerate(devices): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB + arg = 'cuda:0' + elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available() and TORCH_2_0: + # Prefer MPS if available + s += f'MPS ({get_cpu_info()})\n' + arg = 'mps' + else: # revert to CPU + s += f'CPU ({get_cpu_info()})\n' + arg = 'cpu' + + if verbose and RANK == -1: + LOGGER.info(s if newline else s.rstrip()) + return torch.device(arg) + + +def time_sync(): + """PyTorch-accurate time.""" + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def fuse_conv_and_bn(conv, bn): + """Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/.""" + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + dilation=conv.dilation, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # Prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # Prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def fuse_deconv_and_bn(deconv, bn): + """Fuse ConvTranspose2d() and BatchNorm2d() layers.""" + fuseddconv = nn.ConvTranspose2d(deconv.in_channels, + deconv.out_channels, + kernel_size=deconv.kernel_size, + stride=deconv.stride, + padding=deconv.padding, + output_padding=deconv.output_padding, + dilation=deconv.dilation, + groups=deconv.groups, + bias=True).requires_grad_(False).to(deconv.weight.device) + + # Prepare filters + w_deconv = deconv.weight.clone().view(deconv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fuseddconv.weight.copy_(torch.mm(w_bn, w_deconv).view(fuseddconv.weight.shape)) + + # Prepare spatial bias + b_conv = torch.zeros(deconv.weight.size(1), device=deconv.weight.device) if deconv.bias is None else deconv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fuseddconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fuseddconv + + +def model_info(model, detailed=False, verbose=True, imgsz=640): + """Model information. imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320].""" + if not verbose: + return + n_p = get_num_params(model) # number of parameters + n_g = get_num_gradients(model) # number of gradients + n_l = len(list(model.modules())) # number of layers + if detailed: + LOGGER.info( + f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + LOGGER.info('%5g %40s %9s %12g %20s %10.3g %10.3g %10s' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std(), p.dtype)) + + flops = get_flops(model, imgsz) + fused = ' (fused)' if getattr(model, 'is_fused', lambda: False)() else '' + fs = f', {flops:.1f} GFLOPs' if flops else '' + yaml_file = getattr(model, 'yaml_file', '') or getattr(model, 'yaml', {}).get('yaml_file', '') + model_name = Path(yaml_file).stem.replace('yolo', 'YOLO') or 'Model' + LOGGER.info(f'{model_name} summary{fused}: {n_l} layers, {n_p} parameters, {n_g} gradients{fs}') + return n_l, n_p, n_g, flops + + +def get_num_params(model): + """Return the total number of parameters in a YOLO model.""" + return sum(x.numel() for x in model.parameters()) + + +def get_num_gradients(model): + """Return the total number of parameters with gradients in a YOLO model.""" + return sum(x.numel() for x in model.parameters() if x.requires_grad) + + +def model_info_for_loggers(trainer): + """ + Return model info dict with useful model information. + + Example for YOLOv8n: + {'model/parameters': 3151904, + 'model/GFLOPs': 8.746, + 'model/speed_ONNX(ms)': 41.244, + 'model/speed_TensorRT(ms)': 3.211, + 'model/speed_PyTorch(ms)': 18.755} + """ + if trainer.args.profile: # profile ONNX and TensorRT times + from ultralytics.yolo.utils.benchmarks import ProfileModels + results = ProfileModels([trainer.last], device=trainer.device).profile()[0] + results.pop('model/name') + else: # only return PyTorch times from most recent validation + results = { + 'model/parameters': get_num_params(trainer.model), + 'model/GFLOPs': round(get_flops(trainer.model), 3)} + results['model/speed_PyTorch(ms)'] = round(trainer.validator.speed['inference'], 3) + return results + + +def get_flops(model, imgsz=640): + """Return a YOLO model's FLOPs.""" + try: + model = de_parallel(model) + p = next(model.parameters()) + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride + im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format + flops = thop.profile(deepcopy(model), inputs=[im], verbose=False)[0] / 1E9 * 2 if thop else 0 # stride GFLOPs + imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float + return flops * imgsz[0] / stride * imgsz[1] / stride # 640x640 GFLOPs + except Exception: + return 0 + + +def get_flops_with_torch_profiler(model, imgsz=640): + """Compute model FLOPs (thop alternative).""" + model = de_parallel(model) + p = next(model.parameters()) + stride = (max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32) * 2 # max stride + im = torch.zeros((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format + with torch.profiler.profile(with_flops=True) as prof: + model(im) + flops = sum(x.flops for x in prof.key_averages()) / 1E9 + imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float + flops = flops * imgsz[0] / stride * imgsz[1] / stride # 640x640 GFLOPs + return flops + + +def initialize_weights(model): + """Initialize model weights to random values.""" + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # Scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def make_divisible(x, divisor): + """Returns nearest x divisible by divisor.""" + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int + return math.ceil(x / divisor) * divisor + + +def copy_attr(a, b, include=(), exclude=()): + """Copies attributes from object 'b' to object 'a', with options to include/exclude certain attributes.""" + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +def get_latest_opset(): + """Return second-most (for maturity) recently supported ONNX opset by this version of torch.""" + return max(int(k[14:]) for k in vars(torch.onnx) if 'symbolic_opset' in k) - 1 # opset + + +def intersect_dicts(da, db, exclude=()): + """Returns a dictionary of intersecting keys with matching shapes, excluding 'exclude' keys, using da values.""" + return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} + + +def is_parallel(model): + """Returns True if model is of type DP or DDP.""" + return isinstance(model, (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)) + + +def de_parallel(model): + """De-parallelize a model: returns single-GPU model if model is of type DP or DDP.""" + return model.module if is_parallel(model) else model + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + """Returns a lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf.""" + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def init_seeds(seed=0, deterministic=False): + """Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html.""" + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe + # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 + if deterministic: + if TORCH_2_0: + torch.use_deterministic_algorithms(True, warn_only=True) # warn if deterministic is not possible + torch.backends.cudnn.deterministic = True + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' + os.environ['PYTHONHASHSEED'] = str(seed) + else: + LOGGER.warning('WARNING ⚠️ Upgrade to torch>=2.0.0 for deterministic training.') + else: + torch.use_deterministic_algorithms(False) + torch.backends.cudnn.deterministic = False + + +class ModelEMA: + """Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models + Keeps a moving average of everything in the model state_dict (parameters and buffers) + For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + To disable EMA set the `enabled` attribute to `False`. + """ + + def __init__(self, model, decay=0.9999, tau=2000, updates=0): + """Create EMA.""" + self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + self.enabled = True + + def update(self, model): + """Update EMA parameters.""" + if self.enabled: + self.updates += 1 + d = self.decay(self.updates) + + msd = de_parallel(model).state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: # true for FP16 and FP32 + v *= d + v += (1 - d) * msd[k].detach() + # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype}, model {msd[k].dtype}' + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + """Updates attributes and saves stripped model with optimizer removed.""" + if self.enabled: + copy_attr(self.ema, model, include, exclude) + + +def strip_optimizer(f: Union[str, Path] = 'best.pt', s: str = '') -> None: + """ + Strip optimizer from 'f' to finalize training, optionally save as 's'. + + Args: + f (str): file path to model to strip the optimizer from. Default is 'best.pt'. + s (str): file path to save the model with stripped optimizer to. If not provided, 'f' will be overwritten. + + Returns: + None + + Usage: + from pathlib import Path + from ultralytics.yolo.utils.torch_utils import strip_optimizer + for f in Path('/Users/glennjocher/Downloads/weights').rglob('*.pt'): + strip_optimizer(f) + """ + # Use dill (if exists) to serialize the lambda functions where pickle does not do this + try: + import dill as pickle + except ImportError: + import pickle + + x = torch.load(f, map_location=torch.device('cpu')) + args = {**DEFAULT_CFG_DICT, **x['train_args']} if 'train_args' in x else None # combine args + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + x['train_args'] = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # strip non-default keys + # x['model'].args = x['train_args'] + torch.save(x, s or f, pickle_module=pickle) + mb = os.path.getsize(s or f) / 1E6 # filesize + LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") + + +def profile(input, ops, n=10, device=None): + """ + YOLOv8 speed/memory/FLOPs profiler + + Usage: + input = torch.randn(16, 3, 640, 640) + m1 = lambda x: x * torch.sigmoid(x) + m2 = nn.SiLU() + profile(input, [m1, m2], n=100) # profile over 100 iterations + """ + results = [] + if not isinstance(device, torch.device): + device = select_device(device) + LOGGER.info(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}") + + for x in input if isinstance(input, list) else [input]: + x = x.to(device) + x.requires_grad = True + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward + try: + flops = thop.profile(m, inputs=[x], verbose=False)[0] / 1E9 * 2 if thop else 0 # GFLOPs + except Exception: + flops = 0 + + try: + for _ in range(n): + t[0] = time_sync() + y = m(x) + t[1] = time_sync() + try: + _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() + t[2] = time_sync() + except Exception: # no backward method + # print(e) # for debug + t[2] = float('nan') + tf += (t[1] - t[0]) * 1000 / n # ms per op forward + tb += (t[2] - t[1]) * 1000 / n # ms per op backward + mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) + s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes + p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters + LOGGER.info(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + results.append([p, flops, mem, tf, tb, s_in, s_out]) + except Exception as e: + LOGGER.info(e) + results.append(None) + torch.cuda.empty_cache() + return results + + +class EarlyStopping: + """ + Early stopping class that stops training when a specified number of epochs have passed without improvement. + """ + + def __init__(self, patience=50): + """ + Initialize early stopping object + + Args: + patience (int, optional): Number of epochs to wait after fitness stops improving before stopping. + """ + self.best_fitness = 0.0 # i.e. mAP + self.best_epoch = 0 + self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.possible_stop = False # possible stop may occur next epoch + + def __call__(self, epoch, fitness): + """ + Check whether to stop training + + Args: + epoch (int): Current epoch of training + fitness (float): Fitness value of current epoch + + Returns: + (bool): True if training should stop, False otherwise + """ + if fitness is None: # check if fitness=None (happens when val=False) + return False + + if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training + self.best_epoch = epoch + self.best_fitness = fitness + delta = epoch - self.best_epoch # epochs without improvement + self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch + stop = delta >= self.patience # stop training if patience exceeded + if stop: + LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' + f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' + f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' + f'i.e. `patience=300` or use `patience=0` to disable EarlyStopping.') + return stop diff --git a/ultralytics/yolo/utils/tuner.py b/ultralytics/yolo/utils/tuner.py new file mode 100644 index 0000000..54f10b0 --- /dev/null +++ b/ultralytics/yolo/utils/tuner.py @@ -0,0 +1,120 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +from ultralytics.yolo.cfg import TASK2DATA, TASK2METRIC +from ultralytics.yolo.utils import DEFAULT_CFG_DICT, LOGGER, NUM_THREADS + + +def run_ray_tune(model, + space: dict = None, + grace_period: int = 10, + gpu_per_trial: int = None, + max_samples: int = 10, + **train_args): + """ + Runs hyperparameter tuning using Ray Tune. + + Args: + model (YOLO): Model to run the tuner on. + space (dict, optional): The hyperparameter search space. Defaults to None. + grace_period (int, optional): The grace period in epochs of the ASHA scheduler. Defaults to 10. + gpu_per_trial (int, optional): The number of GPUs to allocate per trial. Defaults to None. + max_samples (int, optional): The maximum number of trials to run. Defaults to 10. + train_args (dict, optional): Additional arguments to pass to the `train()` method. Defaults to {}. + + Returns: + (dict): A dictionary containing the results of the hyperparameter search. + + Raises: + ModuleNotFoundError: If Ray Tune is not installed. + """ + if train_args is None: + train_args = {} + + try: + from ray import tune + from ray.air import RunConfig + from ray.air.integrations.wandb import WandbLoggerCallback + from ray.tune.schedulers import ASHAScheduler + except ImportError: + raise ModuleNotFoundError("Tuning hyperparameters requires Ray Tune. Install with: pip install 'ray[tune]'") + + try: + import wandb + + assert hasattr(wandb, '__version__') + except (ImportError, AssertionError): + wandb = False + + default_space = { + # 'optimizer': tune.choice(['SGD', 'Adam', 'AdamW', 'NAdam', 'RAdam', 'RMSProp']), + 'lr0': tune.uniform(1e-5, 1e-1), + 'lrf': tune.uniform(0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': tune.uniform(0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': tune.uniform(0.0, 0.001), # optimizer weight decay 5e-4 + 'warmup_epochs': tune.uniform(0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': tune.uniform(0.0, 0.95), # warmup initial momentum + 'box': tune.uniform(0.02, 0.2), # box loss gain + 'cls': tune.uniform(0.2, 4.0), # cls loss gain (scale with pixels) + 'hsv_h': tune.uniform(0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': tune.uniform(0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': tune.uniform(0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': tune.uniform(0.0, 45.0), # image rotation (+/- deg) + 'translate': tune.uniform(0.0, 0.9), # image translation (+/- fraction) + 'scale': tune.uniform(0.0, 0.9), # image scale (+/- gain) + 'shear': tune.uniform(0.0, 10.0), # image shear (+/- deg) + 'perspective': tune.uniform(0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': tune.uniform(0.0, 1.0), # image flip up-down (probability) + 'fliplr': tune.uniform(0.0, 1.0), # image flip left-right (probability) + 'mosaic': tune.uniform(0.0, 1.0), # image mixup (probability) + 'mixup': tune.uniform(0.0, 1.0), # image mixup (probability) + 'copy_paste': tune.uniform(0.0, 1.0)} # segment copy-paste (probability) + + def _tune(config): + """ + Trains the YOLO model with the specified hyperparameters and additional arguments. + + Args: + config (dict): A dictionary of hyperparameters to use for training. + + Returns: + None. + """ + model._reset_callbacks() + config.update(train_args) + model.train(**config) + + # Get search space + if not space: + space = default_space + LOGGER.warning('WARNING ⚠️ search space not provided, using default search space.') + + # Get dataset + data = train_args.get('data', TASK2DATA[model.task]) + space['data'] = data + if 'data' not in train_args: + LOGGER.warning(f'WARNING ⚠️ data not provided, using default "data={data}".') + + # Define the trainable function with allocated resources + trainable_with_resources = tune.with_resources(_tune, {'cpu': NUM_THREADS, 'gpu': gpu_per_trial or 0}) + + # Define the ASHA scheduler for hyperparameter search + asha_scheduler = ASHAScheduler(time_attr='epoch', + metric=TASK2METRIC[model.task], + mode='max', + max_t=train_args.get('epochs') or DEFAULT_CFG_DICT['epochs'] or 100, + grace_period=grace_period, + reduction_factor=3) + + # Define the callbacks for the hyperparameter search + tuner_callbacks = [WandbLoggerCallback(project='YOLOv8-tune')] if wandb else [] + + # Create the Ray Tune hyperparameter search tuner + tuner = tune.Tuner(trainable_with_resources, + param_space=space, + tune_config=tune.TuneConfig(scheduler=asha_scheduler, num_samples=max_samples), + run_config=RunConfig(callbacks=tuner_callbacks, storage_path='./runs/tune')) + + # Run the hyperparameter search + tuner.fit() + + # Return the results of the hyperparameter search + return tuner.get_results() diff --git a/ultralytics/yolo/v8/__init__.py b/ultralytics/yolo/v8/__init__.py new file mode 100644 index 0000000..adc0351 --- /dev/null +++ b/ultralytics/yolo/v8/__init__.py @@ -0,0 +1,5 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from ultralytics.yolo.v8 import classify, detect, pose, segment + +__all__ = 'classify', 'segment', 'detect', 'pose' diff --git a/ultralytics/yolo/v8/classify/__init__.py b/ultralytics/yolo/v8/classify/__init__.py new file mode 100644 index 0000000..2f049ed --- /dev/null +++ b/ultralytics/yolo/v8/classify/__init__.py @@ -0,0 +1,7 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from ultralytics.yolo.v8.classify.predict import ClassificationPredictor, predict +from ultralytics.yolo.v8.classify.train import ClassificationTrainer, train +from ultralytics.yolo.v8.classify.val import ClassificationValidator, val + +__all__ = 'ClassificationPredictor', 'predict', 'ClassificationTrainer', 'train', 'ClassificationValidator', 'val' diff --git a/ultralytics/yolo/v8/classify/predict.py b/ultralytics/yolo/v8/classify/predict.py new file mode 100644 index 0000000..fb486e2 --- /dev/null +++ b/ultralytics/yolo/v8/classify/predict.py @@ -0,0 +1,51 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch + +from ultralytics.yolo.engine.predictor import BasePredictor +from ultralytics.yolo.engine.results import Results +from ultralytics.yolo.utils import DEFAULT_CFG, ROOT + + +class ClassificationPredictor(BasePredictor): + + def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + super().__init__(cfg, overrides, _callbacks) + self.args.task = 'classify' + + def preprocess(self, img): + """Converts input image to model-compatible data type.""" + if not isinstance(img, torch.Tensor): + img = torch.stack([self.transforms(im) for im in img], dim=0) + img = (img if isinstance(img, torch.Tensor) else torch.from_numpy(img)).to(self.model.device) + return img.half() if self.model.fp16 else img.float() # uint8 to fp16/32 + + def postprocess(self, preds, img, orig_imgs): + """Postprocesses predictions to return Results objects.""" + results = [] + for i, pred in enumerate(preds): + orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs + path = self.batch[0] + img_path = path[i] if isinstance(path, list) else path + results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, probs=pred)) + + return results + + +def predict(cfg=DEFAULT_CFG, use_python=False): + """Run YOLO model predictions on input images/videos.""" + model = cfg.model or 'yolov8n-cls.pt' # or "resnet18" + source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \ + else 'https://ultralytics.com/images/bus.jpg' + + args = dict(model=model, source=source) + if use_python: + from ultralytics import YOLO + YOLO(model)(**args) + else: + predictor = ClassificationPredictor(overrides=args) + predictor.predict_cli() + + +if __name__ == '__main__': + predict() diff --git a/ultralytics/yolo/v8/classify/train.py b/ultralytics/yolo/v8/classify/train.py new file mode 100644 index 0000000..72feb55 --- /dev/null +++ b/ultralytics/yolo/v8/classify/train.py @@ -0,0 +1,161 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch +import torchvision + +from ultralytics.nn.tasks import ClassificationModel, attempt_load_one_weight +from ultralytics.yolo import v8 +from ultralytics.yolo.data import ClassificationDataset, build_dataloader +from ultralytics.yolo.engine.trainer import BaseTrainer +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, RANK, colorstr +from ultralytics.yolo.utils.plotting import plot_images, plot_results +from ultralytics.yolo.utils.torch_utils import is_parallel, strip_optimizer, torch_distributed_zero_first + + +class ClassificationTrainer(BaseTrainer): + + def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + """Initialize a ClassificationTrainer object with optional configuration overrides and callbacks.""" + if overrides is None: + overrides = {} + overrides['task'] = 'classify' + if overrides.get('imgsz') is None: + overrides['imgsz'] = 224 + super().__init__(cfg, overrides, _callbacks) + + def set_model_attributes(self): + """Set the YOLO model's class names from the loaded dataset.""" + self.model.names = self.data['names'] + + def get_model(self, cfg=None, weights=None, verbose=True): + """Returns a modified PyTorch model configured for training YOLO.""" + model = ClassificationModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1) + if weights: + model.load(weights) + + for m in model.modules(): + if not self.args.pretrained and hasattr(m, 'reset_parameters'): + m.reset_parameters() + if isinstance(m, torch.nn.Dropout) and self.args.dropout: + m.p = self.args.dropout # set dropout + for p in model.parameters(): + p.requires_grad = True # for training + return model + + def setup_model(self): + """ + load/create/download model for any task + """ + # Classification models require special handling + + if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed + return + + model = str(self.model) + # Load a YOLO model locally, from torchvision, or from Ultralytics assets + if model.endswith('.pt'): + self.model, _ = attempt_load_one_weight(model, device='cpu') + for p in self.model.parameters(): + p.requires_grad = True # for training + elif model.endswith('.yaml'): + self.model = self.get_model(cfg=model) + elif model in torchvision.models.__dict__: + self.model = torchvision.models.__dict__[model](weights='IMAGENET1K_V1' if self.args.pretrained else None) + else: + FileNotFoundError(f'ERROR: model={model} not found locally or online. Please check model name.') + ClassificationModel.reshape_outputs(self.model, self.data['nc']) + + return # dont return ckpt. Classification doesn't support resume + + def build_dataset(self, img_path, mode='train', batch=None): + return ClassificationDataset(root=img_path, args=self.args, augment=mode == 'train') + + def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'): + """Returns PyTorch DataLoader with transforms to preprocess images for inference.""" + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = self.build_dataset(dataset_path, mode) + + loader = build_dataloader(dataset, batch_size, self.args.workers, rank=rank) + # Attach inference transforms + if mode != 'train': + if is_parallel(self.model): + self.model.module.transforms = loader.dataset.torch_transforms + else: + self.model.transforms = loader.dataset.torch_transforms + return loader + + def preprocess_batch(self, batch): + """Preprocesses a batch of images and classes.""" + batch['img'] = batch['img'].to(self.device) + batch['cls'] = batch['cls'].to(self.device) + return batch + + def progress_string(self): + """Returns a formatted string showing training progress.""" + return ('\n' + '%11s' * (4 + len(self.loss_names))) % \ + ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size') + + def get_validator(self): + """Returns an instance of ClassificationValidator for validation.""" + self.loss_names = ['loss'] + return v8.classify.ClassificationValidator(self.test_loader, self.save_dir) + + def label_loss_items(self, loss_items=None, prefix='train'): + """ + Returns a loss dict with labelled training loss items tensor + """ + # Not needed for classification but necessary for segmentation & detection + keys = [f'{prefix}/{x}' for x in self.loss_names] + if loss_items is None: + return keys + loss_items = [round(float(loss_items), 5)] + return dict(zip(keys, loss_items)) + + def resume_training(self, ckpt): + """Resumes training from a given checkpoint.""" + pass + + def plot_metrics(self): + """Plots metrics from a CSV file.""" + plot_results(file=self.csv, classify=True, on_plot=self.on_plot) # save results.png + + def final_eval(self): + """Evaluate trained model and save validation results.""" + for f in self.last, self.best: + if f.exists(): + strip_optimizer(f) # strip optimizers + # TODO: validate best.pt after training completes + # if f is self.best: + # LOGGER.info(f'\nValidating {f}...') + # self.validator.args.save_json = True + # self.metrics = self.validator(model=f) + # self.metrics.pop('fitness', None) + # self.run_callbacks('on_fit_epoch_end') + LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}") + + def plot_training_samples(self, batch, ni): + """Plots training samples with their annotations.""" + plot_images(images=batch['img'], + batch_idx=torch.arange(len(batch['img'])), + cls=batch['cls'].squeeze(-1), + fname=self.save_dir / f'train_batch{ni}.jpg', + on_plot=self.on_plot) + + +def train(cfg=DEFAULT_CFG, use_python=False): + """Train the YOLO classification model.""" + model = cfg.model or 'yolov8n-cls.pt' # or "resnet18" + data = cfg.data or 'mnist160' # or yolo.ClassificationDataset("mnist") + device = cfg.device if cfg.device is not None else '' + + args = dict(model=model, data=data, device=device) + if use_python: + from ultralytics import YOLO + YOLO(model).train(**args) + else: + trainer = ClassificationTrainer(overrides=args) + trainer.train() + + +if __name__ == '__main__': + train() diff --git a/ultralytics/yolo/v8/classify/val.py b/ultralytics/yolo/v8/classify/val.py new file mode 100644 index 0000000..f56dea0 --- /dev/null +++ b/ultralytics/yolo/v8/classify/val.py @@ -0,0 +1,109 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch + +from ultralytics.yolo.data import ClassificationDataset, build_dataloader +from ultralytics.yolo.engine.validator import BaseValidator +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER +from ultralytics.yolo.utils.metrics import ClassifyMetrics, ConfusionMatrix +from ultralytics.yolo.utils.plotting import plot_images + + +class ClassificationValidator(BaseValidator): + + def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None): + """Initializes ClassificationValidator instance with args, dataloader, save_dir, and progress bar.""" + super().__init__(dataloader, save_dir, pbar, args, _callbacks) + self.args.task = 'classify' + self.metrics = ClassifyMetrics() + + def get_desc(self): + """Returns a formatted string summarizing classification metrics.""" + return ('%22s' + '%11s' * 2) % ('classes', 'top1_acc', 'top5_acc') + + def init_metrics(self, model): + """Initialize confusion matrix, class names, and top-1 and top-5 accuracy.""" + self.names = model.names + self.nc = len(model.names) + self.confusion_matrix = ConfusionMatrix(nc=self.nc, task='classify') + self.pred = [] + self.targets = [] + + def preprocess(self, batch): + """Preprocesses input batch and returns it.""" + batch['img'] = batch['img'].to(self.device, non_blocking=True) + batch['img'] = batch['img'].half() if self.args.half else batch['img'].float() + batch['cls'] = batch['cls'].to(self.device) + return batch + + def update_metrics(self, preds, batch): + """Updates running metrics with model predictions and batch targets.""" + n5 = min(len(self.model.names), 5) + self.pred.append(preds.argsort(1, descending=True)[:, :n5]) + self.targets.append(batch['cls']) + + def finalize_metrics(self, *args, **kwargs): + """Finalizes metrics of the model such as confusion_matrix and speed.""" + self.confusion_matrix.process_cls_preds(self.pred, self.targets) + if self.args.plots: + for normalize in True, False: + self.confusion_matrix.plot(save_dir=self.save_dir, + names=self.names.values(), + normalize=normalize, + on_plot=self.on_plot) + self.metrics.speed = self.speed + self.metrics.confusion_matrix = self.confusion_matrix + + def get_stats(self): + """Returns a dictionary of metrics obtained by processing targets and predictions.""" + self.metrics.process(self.targets, self.pred) + return self.metrics.results_dict + + def build_dataset(self, img_path): + return ClassificationDataset(root=img_path, args=self.args, augment=False) + + def get_dataloader(self, dataset_path, batch_size): + """Builds and returns a data loader for classification tasks with given parameters.""" + dataset = self.build_dataset(dataset_path) + return build_dataloader(dataset, batch_size, self.args.workers, rank=-1) + + def print_results(self): + """Prints evaluation metrics for YOLO object detection model.""" + pf = '%22s' + '%11.3g' * len(self.metrics.keys) # print format + LOGGER.info(pf % ('all', self.metrics.top1, self.metrics.top5)) + + def plot_val_samples(self, batch, ni): + """Plot validation image samples.""" + plot_images(images=batch['img'], + batch_idx=torch.arange(len(batch['img'])), + cls=batch['cls'].squeeze(-1), + fname=self.save_dir / f'val_batch{ni}_labels.jpg', + names=self.names, + on_plot=self.on_plot) + + def plot_predictions(self, batch, preds, ni): + """Plots predicted bounding boxes on input images and saves the result.""" + plot_images(batch['img'], + batch_idx=torch.arange(len(batch['img'])), + cls=torch.argmax(preds, dim=1), + fname=self.save_dir / f'val_batch{ni}_pred.jpg', + names=self.names, + on_plot=self.on_plot) # pred + + +def val(cfg=DEFAULT_CFG, use_python=False): + """Validate YOLO model using custom data.""" + model = cfg.model or 'yolov8n-cls.pt' # or "resnet18" + data = cfg.data or 'mnist160' + + args = dict(model=model, data=data) + if use_python: + from ultralytics import YOLO + YOLO(model).val(**args) + else: + validator = ClassificationValidator(args=args) + validator(model=args['model']) + + +if __name__ == '__main__': + val() diff --git a/ultralytics/yolo/v8/detect/__init__.py b/ultralytics/yolo/v8/detect/__init__.py new file mode 100644 index 0000000..481951a --- /dev/null +++ b/ultralytics/yolo/v8/detect/__init__.py @@ -0,0 +1,7 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .predict import DetectionPredictor, predict +from .train import DetectionTrainer, train +from .val import DetectionValidator, val + +__all__ = 'DetectionPredictor', 'predict', 'DetectionTrainer', 'train', 'DetectionValidator', 'val' diff --git a/ultralytics/yolo/v8/detect/predict.py b/ultralytics/yolo/v8/detect/predict.py new file mode 100644 index 0000000..31e8a9f --- /dev/null +++ b/ultralytics/yolo/v8/detect/predict.py @@ -0,0 +1,48 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch + +from ultralytics.yolo.engine.predictor import BasePredictor +from ultralytics.yolo.engine.results import Results +from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops + + +class DetectionPredictor(BasePredictor): + + def postprocess(self, preds, img, orig_imgs): + """Postprocesses predictions and returns a list of Results objects.""" + preds = ops.non_max_suppression(preds, + self.args.conf, + self.args.iou, + agnostic=self.args.agnostic_nms, + max_det=self.args.max_det, + classes=self.args.classes) + + results = [] + for i, pred in enumerate(preds): + orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs + if not isinstance(orig_imgs, torch.Tensor): + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) + path = self.batch[0] + img_path = path[i] if isinstance(path, list) else path + results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred)) + return results + + +def predict(cfg=DEFAULT_CFG, use_python=False): + """Runs YOLO model inference on input image(s).""" + model = cfg.model or 'yolov8n.pt' + source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \ + else 'https://ultralytics.com/images/bus.jpg' + + args = dict(model=model, source=source) + if use_python: + from ultralytics import YOLO + YOLO(model)(**args) + else: + predictor = DetectionPredictor(overrides=args) + predictor.predict_cli() + + +if __name__ == '__main__': + predict() diff --git a/ultralytics/yolo/v8/detect/train.py b/ultralytics/yolo/v8/detect/train.py new file mode 100644 index 0000000..1b475ed --- /dev/null +++ b/ultralytics/yolo/v8/detect/train.py @@ -0,0 +1,143 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +from copy import copy + +import numpy as np + +from ultralytics.nn.tasks import DetectionModel +from ultralytics.yolo import v8 +from ultralytics.yolo.data import build_dataloader, build_yolo_dataset +from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader +from ultralytics.yolo.engine.trainer import BaseTrainer +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, RANK, colorstr +from ultralytics.yolo.utils.plotting import plot_images, plot_labels, plot_results +from ultralytics.yolo.utils.torch_utils import de_parallel, torch_distributed_zero_first + + +# BaseTrainer python usage +class DetectionTrainer(BaseTrainer): + + def build_dataset(self, img_path, mode='train', batch=None): + """Build YOLO Dataset + + Args: + img_path (str): Path to the folder containing images. + mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode. + batch (int, optional): Size of batches, this is for `rect`. Defaults to None. + """ + gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32) + return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == 'val', stride=gs) + + def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'): + """TODO: manage splits differently.""" + # Calculate stride - check if model is initialized + if self.args.v5loader: + LOGGER.warning("WARNING ⚠️ 'v5loader' feature is deprecated and will be removed soon. You can train using " + 'the default YOLOv8 dataloader instead, no argument is needed.') + gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32) + return create_dataloader(path=dataset_path, + imgsz=self.args.imgsz, + batch_size=batch_size, + stride=gs, + hyp=vars(self.args), + augment=mode == 'train', + cache=self.args.cache, + pad=0 if mode == 'train' else 0.5, + rect=self.args.rect or mode == 'val', + rank=rank, + workers=self.args.workers, + close_mosaic=self.args.close_mosaic != 0, + prefix=colorstr(f'{mode}: '), + shuffle=mode == 'train', + seed=self.args.seed)[0] + assert mode in ['train', 'val'] + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = self.build_dataset(dataset_path, mode, batch_size) + shuffle = mode == 'train' + if getattr(dataset, 'rect', False) and shuffle: + LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False") + shuffle = False + workers = self.args.workers if mode == 'train' else self.args.workers * 2 + return build_dataloader(dataset, batch_size, workers, shuffle, rank) # return dataloader + + def preprocess_batch(self, batch): + """Preprocesses a batch of images by scaling and converting to float.""" + batch['img'] = batch['img'].to(self.device, non_blocking=True).float() / 255 + return batch + + def set_model_attributes(self): + """nl = de_parallel(self.model).model[-1].nl # number of detection layers (to scale hyps).""" + # self.args.box *= 3 / nl # scale to layers + # self.args.cls *= self.data["nc"] / 80 * 3 / nl # scale to classes and layers + # self.args.cls *= (self.args.imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + self.model.nc = self.data['nc'] # attach number of classes to model + self.model.names = self.data['names'] # attach class names to model + self.model.args = self.args # attach hyperparameters to model + # TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc + + def get_model(self, cfg=None, weights=None, verbose=True): + """Return a YOLO detection model.""" + model = DetectionModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1) + if weights: + model.load(weights) + return model + + def get_validator(self): + """Returns a DetectionValidator for YOLO model validation.""" + self.loss_names = 'box_loss', 'cls_loss', 'dfl_loss' + return v8.detect.DetectionValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args)) + + def label_loss_items(self, loss_items=None, prefix='train'): + """ + Returns a loss dict with labelled training loss items tensor + """ + # Not needed for classification but necessary for segmentation & detection + keys = [f'{prefix}/{x}' for x in self.loss_names] + if loss_items is not None: + loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats + return dict(zip(keys, loss_items)) + else: + return keys + + def progress_string(self): + """Returns a formatted string of training progress with epoch, GPU memory, loss, instances and size.""" + return ('\n' + '%11s' * + (4 + len(self.loss_names))) % ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size') + + def plot_training_samples(self, batch, ni): + """Plots training samples with their annotations.""" + plot_images(images=batch['img'], + batch_idx=batch['batch_idx'], + cls=batch['cls'].squeeze(-1), + bboxes=batch['bboxes'], + paths=batch['im_file'], + fname=self.save_dir / f'train_batch{ni}.jpg', + on_plot=self.on_plot) + + def plot_metrics(self): + """Plots metrics from a CSV file.""" + plot_results(file=self.csv, on_plot=self.on_plot) # save results.png + + def plot_training_labels(self): + """Create a labeled training plot of the YOLO model.""" + boxes = np.concatenate([lb['bboxes'] for lb in self.train_loader.dataset.labels], 0) + cls = np.concatenate([lb['cls'] for lb in self.train_loader.dataset.labels], 0) + plot_labels(boxes, cls.squeeze(), names=self.data['names'], save_dir=self.save_dir, on_plot=self.on_plot) + + +def train(cfg=DEFAULT_CFG, use_python=False): + """Train and optimize YOLO model given training data and device.""" + model = cfg.model or 'yolov8n.pt' + data = cfg.data or 'coco128.yaml' # or yolo.ClassificationDataset("mnist") + device = cfg.device if cfg.device is not None else '' + + args = dict(model=model, data=data, device=device) + if use_python: + from ultralytics import YOLO + YOLO(model).train(**args) + else: + trainer = DetectionTrainer(overrides=args) + trainer.train() + + +if __name__ == '__main__': + train() diff --git a/ultralytics/yolo/v8/detect/val.py b/ultralytics/yolo/v8/detect/val.py new file mode 100644 index 0000000..77d346c --- /dev/null +++ b/ultralytics/yolo/v8/detect/val.py @@ -0,0 +1,296 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import os +from pathlib import Path + +import numpy as np +import torch + +from ultralytics.yolo.data import build_dataloader, build_yolo_dataset +from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader +from ultralytics.yolo.engine.validator import BaseValidator +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, colorstr, ops +from ultralytics.yolo.utils.checks import check_requirements +from ultralytics.yolo.utils.metrics import ConfusionMatrix, DetMetrics, box_iou +from ultralytics.yolo.utils.plotting import output_to_target, plot_images +from ultralytics.yolo.utils.torch_utils import de_parallel + + +class DetectionValidator(BaseValidator): + + def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None): + """Initialize detection model with necessary variables and settings.""" + super().__init__(dataloader, save_dir, pbar, args, _callbacks) + self.args.task = 'detect' + self.is_coco = False + self.class_map = None + self.metrics = DetMetrics(save_dir=self.save_dir, on_plot=self.on_plot) + self.iouv = torch.linspace(0.5, 0.95, 10) # iou vector for mAP@0.5:0.95 + self.niou = self.iouv.numel() + + def preprocess(self, batch): + """Preprocesses batch of images for YOLO training.""" + batch['img'] = batch['img'].to(self.device, non_blocking=True) + batch['img'] = (batch['img'].half() if self.args.half else batch['img'].float()) / 255 + for k in ['batch_idx', 'cls', 'bboxes']: + batch[k] = batch[k].to(self.device) + + nb = len(batch['img']) + self.lb = [torch.cat([batch['cls'], batch['bboxes']], dim=-1)[batch['batch_idx'] == i] + for i in range(nb)] if self.args.save_hybrid else [] # for autolabelling + + return batch + + def init_metrics(self, model): + """Initialize evaluation metrics for YOLO.""" + val = self.data.get(self.args.split, '') # validation path + self.is_coco = isinstance(val, str) and 'coco' in val and val.endswith(f'{os.sep}val2017.txt') # is COCO + self.class_map = ops.coco80_to_coco91_class() if self.is_coco else list(range(1000)) + self.args.save_json |= self.is_coco and not self.training # run on final val if training COCO + self.names = model.names + self.nc = len(model.names) + self.metrics.names = self.names + self.metrics.plot = self.args.plots + self.confusion_matrix = ConfusionMatrix(nc=self.nc) + self.seen = 0 + self.jdict = [] + self.stats = [] + + def get_desc(self): + """Return a formatted string summarizing class metrics of YOLO model.""" + return ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)') + + def postprocess(self, preds): + """Apply Non-maximum suppression to prediction outputs.""" + return ops.non_max_suppression(preds, + self.args.conf, + self.args.iou, + labels=self.lb, + multi_label=True, + agnostic=self.args.single_cls, + max_det=self.args.max_det) + + def update_metrics(self, preds, batch): + """Metrics.""" + for si, pred in enumerate(preds): + idx = batch['batch_idx'] == si + cls = batch['cls'][idx] + bbox = batch['bboxes'][idx] + nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions + shape = batch['ori_shape'][si] + correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init + self.seen += 1 + + if npr == 0: + if nl: + self.stats.append((correct_bboxes, *torch.zeros((2, 0), device=self.device), cls.squeeze(-1))) + if self.args.plots: + self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1)) + continue + + # Predictions + if self.args.single_cls: + pred[:, 5] = 0 + predn = pred.clone() + ops.scale_boxes(batch['img'][si].shape[1:], predn[:, :4], shape, + ratio_pad=batch['ratio_pad'][si]) # native-space pred + + # Evaluate + if nl: + height, width = batch['img'].shape[2:] + tbox = ops.xywh2xyxy(bbox) * torch.tensor( + (width, height, width, height), device=self.device) # target boxes + ops.scale_boxes(batch['img'][si].shape[1:], tbox, shape, + ratio_pad=batch['ratio_pad'][si]) # native-space labels + labelsn = torch.cat((cls, tbox), 1) # native-space labels + correct_bboxes = self._process_batch(predn, labelsn) + # TODO: maybe remove these `self.` arguments as they already are member variable + if self.args.plots: + self.confusion_matrix.process_batch(predn, labelsn) + self.stats.append((correct_bboxes, pred[:, 4], pred[:, 5], cls.squeeze(-1))) # (conf, pcls, tcls) + + # Save + if self.args.save_json: + self.pred_to_json(predn, batch['im_file'][si]) + if self.args.save_txt: + file = self.save_dir / 'labels' / f'{Path(batch["im_file"][si]).stem}.txt' + self.save_one_txt(predn, self.args.save_conf, shape, file) + + def finalize_metrics(self, *args, **kwargs): + """Set final values for metrics speed and confusion matrix.""" + self.metrics.speed = self.speed + self.metrics.confusion_matrix = self.confusion_matrix + + def get_stats(self): + """Returns metrics statistics and results dictionary.""" + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*self.stats)] # to numpy + if len(stats) and stats[0].any(): + self.metrics.process(*stats) + self.nt_per_class = np.bincount(stats[-1].astype(int), minlength=self.nc) # number of targets per class + return self.metrics.results_dict + + def print_results(self): + """Prints training/validation set metrics per class.""" + pf = '%22s' + '%11i' * 2 + '%11.3g' * len(self.metrics.keys) # print format + LOGGER.info(pf % ('all', self.seen, self.nt_per_class.sum(), *self.metrics.mean_results())) + if self.nt_per_class.sum() == 0: + LOGGER.warning( + f'WARNING ⚠️ no labels found in {self.args.task} set, can not compute metrics without labels') + + # Print results per class + if self.args.verbose and not self.training and self.nc > 1 and len(self.stats): + for i, c in enumerate(self.metrics.ap_class_index): + LOGGER.info(pf % (self.names[c], self.seen, self.nt_per_class[c], *self.metrics.class_result(i))) + + if self.args.plots: + for normalize in True, False: + self.confusion_matrix.plot(save_dir=self.save_dir, + names=self.names.values(), + normalize=normalize, + on_plot=self.on_plot) + + def _process_batch(self, detections, labels): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + iou = box_iou(labels[:, 1:], detections[:, :4]) + correct = np.zeros((detections.shape[0], self.iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(self.iouv)): + x = torch.where((iou >= self.iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), + 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=detections.device) + + def build_dataset(self, img_path, mode='val', batch=None): + """Build YOLO Dataset + + Args: + img_path (str): Path to the folder containing images. + mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode. + batch (int, optional): Size of batches, this is for `rect`. Defaults to None. + """ + gs = max(int(de_parallel(self.model).stride if self.model else 0), 32) + return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, stride=gs) + + def get_dataloader(self, dataset_path, batch_size): + """TODO: manage splits differently.""" + # Calculate stride - check if model is initialized + if self.args.v5loader: + LOGGER.warning("WARNING ⚠️ 'v5loader' feature is deprecated and will be removed soon. You can train using " + 'the default YOLOv8 dataloader instead, no argument is needed.') + gs = max(int(de_parallel(self.model).stride if self.model else 0), 32) + return create_dataloader(path=dataset_path, + imgsz=self.args.imgsz, + batch_size=batch_size, + stride=gs, + hyp=vars(self.args), + cache=False, + pad=0.5, + rect=self.args.rect, + workers=self.args.workers, + prefix=colorstr(f'{self.args.mode}: '), + shuffle=False, + seed=self.args.seed)[0] + + dataset = self.build_dataset(dataset_path, batch=batch_size, mode='val') + dataloader = build_dataloader(dataset, batch_size, self.args.workers, shuffle=False, rank=-1) + return dataloader + + def plot_val_samples(self, batch, ni): + """Plot validation image samples.""" + plot_images(batch['img'], + batch['batch_idx'], + batch['cls'].squeeze(-1), + batch['bboxes'], + paths=batch['im_file'], + fname=self.save_dir / f'val_batch{ni}_labels.jpg', + names=self.names, + on_plot=self.on_plot) + + def plot_predictions(self, batch, preds, ni): + """Plots predicted bounding boxes on input images and saves the result.""" + plot_images(batch['img'], + *output_to_target(preds, max_det=self.args.max_det), + paths=batch['im_file'], + fname=self.save_dir / f'val_batch{ni}_pred.jpg', + names=self.names, + on_plot=self.on_plot) # pred + + def save_one_txt(self, predn, save_conf, shape, file): + """Save YOLO detections to a txt file in normalized coordinates in a specific format.""" + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (ops.xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + def pred_to_json(self, predn, filename): + """Serialize YOLO predictions to COCO json format.""" + stem = Path(filename).stem + image_id = int(stem) if stem.isnumeric() else stem + box = ops.xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(predn.tolist(), box.tolist()): + self.jdict.append({ + 'image_id': image_id, + 'category_id': self.class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + def eval_json(self, stats): + """Evaluates YOLO output in JSON format and returns performance statistics.""" + if self.args.save_json and self.is_coco and len(self.jdict): + anno_json = self.data['path'] / 'annotations/instances_val2017.json' # annotations + pred_json = self.save_dir / 'predictions.json' # predictions + LOGGER.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...') + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements('pycocotools>=2.0.6') + from pycocotools.coco import COCO # noqa + from pycocotools.cocoeval import COCOeval # noqa + + for x in anno_json, pred_json: + assert x.is_file(), f'{x} file not found' + anno = COCO(str(anno_json)) # init annotations api + pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) + eval = COCOeval(anno, pred, 'bbox') + if self.is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval + eval.evaluate() + eval.accumulate() + eval.summarize() + stats[self.metrics.keys[-1]], stats[self.metrics.keys[-2]] = eval.stats[:2] # update mAP50-95 and mAP50 + except Exception as e: + LOGGER.warning(f'pycocotools unable to run: {e}') + return stats + + +def val(cfg=DEFAULT_CFG, use_python=False): + """Validate trained YOLO model on validation dataset.""" + model = cfg.model or 'yolov8n.pt' + data = cfg.data or 'coco128.yaml' + + args = dict(model=model, data=data) + if use_python: + from ultralytics import YOLO + YOLO(model).val(**args) + else: + validator = DetectionValidator(args=args) + validator(model=args['model']) + + +if __name__ == '__main__': + val() diff --git a/ultralytics/yolo/v8/pose/__init__.py b/ultralytics/yolo/v8/pose/__init__.py new file mode 100644 index 0000000..8ec6d58 --- /dev/null +++ b/ultralytics/yolo/v8/pose/__init__.py @@ -0,0 +1,7 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .predict import PosePredictor, predict +from .train import PoseTrainer, train +from .val import PoseValidator, val + +__all__ = 'PoseTrainer', 'train', 'PoseValidator', 'val', 'PosePredictor', 'predict' diff --git a/ultralytics/yolo/v8/pose/predict.py b/ultralytics/yolo/v8/pose/predict.py new file mode 100644 index 0000000..ad3246e --- /dev/null +++ b/ultralytics/yolo/v8/pose/predict.py @@ -0,0 +1,58 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from ultralytics.yolo.engine.results import Results +from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops +from ultralytics.yolo.v8.detect.predict import DetectionPredictor + + +class PosePredictor(DetectionPredictor): + + def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + super().__init__(cfg, overrides, _callbacks) + self.args.task = 'pose' + + def postprocess(self, preds, img, orig_imgs): + """Return detection results for a given input image or list of images.""" + preds = ops.non_max_suppression(preds, + self.args.conf, + self.args.iou, + agnostic=self.args.agnostic_nms, + max_det=self.args.max_det, + classes=self.args.classes, + nc=len(self.model.names)) + + results = [] + for i, pred in enumerate(preds): + orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs + shape = orig_img.shape + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round() + pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape) if len(pred) else pred[:, 6:] + pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, shape) + path = self.batch[0] + img_path = path[i] if isinstance(path, list) else path + results.append( + Results(orig_img=orig_img, + path=img_path, + names=self.model.names, + boxes=pred[:, :6], + keypoints=pred_kpts)) + return results + + +def predict(cfg=DEFAULT_CFG, use_python=False): + """Runs YOLO to predict objects in an image or video.""" + model = cfg.model or 'yolov8n-pose.pt' + source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \ + else 'https://ultralytics.com/images/bus.jpg' + + args = dict(model=model, source=source) + if use_python: + from ultralytics import YOLO + YOLO(model)(**args) + else: + predictor = PosePredictor(overrides=args) + predictor.predict_cli() + + +if __name__ == '__main__': + predict() diff --git a/ultralytics/yolo/v8/pose/train.py b/ultralytics/yolo/v8/pose/train.py new file mode 100644 index 0000000..af3043c --- /dev/null +++ b/ultralytics/yolo/v8/pose/train.py @@ -0,0 +1,77 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from copy import copy + +from ultralytics.nn.tasks import PoseModel +from ultralytics.yolo import v8 +from ultralytics.yolo.utils import DEFAULT_CFG +from ultralytics.yolo.utils.plotting import plot_images, plot_results + + +# BaseTrainer python usage +class PoseTrainer(v8.detect.DetectionTrainer): + + def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + """Initialize a PoseTrainer object with specified configurations and overrides.""" + if overrides is None: + overrides = {} + overrides['task'] = 'pose' + super().__init__(cfg, overrides, _callbacks) + + def get_model(self, cfg=None, weights=None, verbose=True): + """Get pose estimation model with specified configuration and weights.""" + model = PoseModel(cfg, ch=3, nc=self.data['nc'], data_kpt_shape=self.data['kpt_shape'], verbose=verbose) + if weights: + model.load(weights) + + return model + + def set_model_attributes(self): + """Sets keypoints shape attribute of PoseModel.""" + super().set_model_attributes() + self.model.kpt_shape = self.data['kpt_shape'] + + def get_validator(self): + """Returns an instance of the PoseValidator class for validation.""" + self.loss_names = 'box_loss', 'pose_loss', 'kobj_loss', 'cls_loss', 'dfl_loss' + return v8.pose.PoseValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args)) + + def plot_training_samples(self, batch, ni): + """Plot a batch of training samples with annotated class labels, bounding boxes, and keypoints.""" + images = batch['img'] + kpts = batch['keypoints'] + cls = batch['cls'].squeeze(-1) + bboxes = batch['bboxes'] + paths = batch['im_file'] + batch_idx = batch['batch_idx'] + plot_images(images, + batch_idx, + cls, + bboxes, + kpts=kpts, + paths=paths, + fname=self.save_dir / f'train_batch{ni}.jpg', + on_plot=self.on_plot) + + def plot_metrics(self): + """Plots training/val metrics.""" + plot_results(file=self.csv, pose=True, on_plot=self.on_plot) # save results.png + + +def train(cfg=DEFAULT_CFG, use_python=False): + """Train the YOLO model on the given data and device.""" + model = cfg.model or 'yolov8n-pose.yaml' + data = cfg.data or 'coco8-pose.yaml' + device = cfg.device if cfg.device is not None else '' + + args = dict(model=model, data=data, device=device) + if use_python: + from ultralytics import YOLO + YOLO(model).train(**args) + else: + trainer = PoseTrainer(overrides=args) + trainer.train() + + +if __name__ == '__main__': + train() diff --git a/ultralytics/yolo/v8/pose/val.py b/ultralytics/yolo/v8/pose/val.py new file mode 100644 index 0000000..f3fc1ac --- /dev/null +++ b/ultralytics/yolo/v8/pose/val.py @@ -0,0 +1,224 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from pathlib import Path + +import numpy as np +import torch + +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, ops +from ultralytics.yolo.utils.checks import check_requirements +from ultralytics.yolo.utils.metrics import OKS_SIGMA, PoseMetrics, box_iou, kpt_iou +from ultralytics.yolo.utils.plotting import output_to_target, plot_images +from ultralytics.yolo.v8.detect import DetectionValidator + + +class PoseValidator(DetectionValidator): + + def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None): + """Initialize a 'PoseValidator' object with custom parameters and assigned attributes.""" + super().__init__(dataloader, save_dir, pbar, args, _callbacks) + self.args.task = 'pose' + self.metrics = PoseMetrics(save_dir=self.save_dir, on_plot=self.on_plot) + + def preprocess(self, batch): + """Preprocesses the batch by converting the 'keypoints' data into a float and moving it to the device.""" + batch = super().preprocess(batch) + batch['keypoints'] = batch['keypoints'].to(self.device).float() + return batch + + def get_desc(self): + """Returns description of evaluation metrics in string format.""" + return ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Pose(P', + 'R', 'mAP50', 'mAP50-95)') + + def postprocess(self, preds): + """Apply non-maximum suppression and return detections with high confidence scores.""" + return ops.non_max_suppression(preds, + self.args.conf, + self.args.iou, + labels=self.lb, + multi_label=True, + agnostic=self.args.single_cls, + max_det=self.args.max_det, + nc=self.nc) + + def init_metrics(self, model): + """Initiate pose estimation metrics for YOLO model.""" + super().init_metrics(model) + self.kpt_shape = self.data['kpt_shape'] + is_pose = self.kpt_shape == [17, 3] + nkpt = self.kpt_shape[0] + self.sigma = OKS_SIGMA if is_pose else np.ones(nkpt) / nkpt + + def update_metrics(self, preds, batch): + """Metrics.""" + for si, pred in enumerate(preds): + idx = batch['batch_idx'] == si + cls = batch['cls'][idx] + bbox = batch['bboxes'][idx] + kpts = batch['keypoints'][idx] + nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions + nk = kpts.shape[1] # number of keypoints + shape = batch['ori_shape'][si] + correct_kpts = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init + correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init + self.seen += 1 + + if npr == 0: + if nl: + self.stats.append((correct_bboxes, correct_kpts, *torch.zeros( + (2, 0), device=self.device), cls.squeeze(-1))) + if self.args.plots: + self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1)) + continue + + # Predictions + if self.args.single_cls: + pred[:, 5] = 0 + predn = pred.clone() + ops.scale_boxes(batch['img'][si].shape[1:], predn[:, :4], shape, + ratio_pad=batch['ratio_pad'][si]) # native-space pred + pred_kpts = predn[:, 6:].view(npr, nk, -1) + ops.scale_coords(batch['img'][si].shape[1:], pred_kpts, shape, ratio_pad=batch['ratio_pad'][si]) + + # Evaluate + if nl: + height, width = batch['img'].shape[2:] + tbox = ops.xywh2xyxy(bbox) * torch.tensor( + (width, height, width, height), device=self.device) # target boxes + ops.scale_boxes(batch['img'][si].shape[1:], tbox, shape, + ratio_pad=batch['ratio_pad'][si]) # native-space labels + tkpts = kpts.clone() + tkpts[..., 0] *= width + tkpts[..., 1] *= height + tkpts = ops.scale_coords(batch['img'][si].shape[1:], tkpts, shape, ratio_pad=batch['ratio_pad'][si]) + labelsn = torch.cat((cls, tbox), 1) # native-space labels + correct_bboxes = self._process_batch(predn[:, :6], labelsn) + correct_kpts = self._process_batch(predn[:, :6], labelsn, pred_kpts, tkpts) + if self.args.plots: + self.confusion_matrix.process_batch(predn, labelsn) + + # Append correct_masks, correct_boxes, pconf, pcls, tcls + self.stats.append((correct_bboxes, correct_kpts, pred[:, 4], pred[:, 5], cls.squeeze(-1))) + + # Save + if self.args.save_json: + self.pred_to_json(predn, batch['im_file'][si]) + # if self.args.save_txt: + # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + + def _process_batch(self, detections, labels, pred_kpts=None, gt_kpts=None): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + pred_kpts (array[N, 51]), 51 = 17 * 3 + gt_kpts (array[N, 51]) + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + if pred_kpts is not None and gt_kpts is not None: + # `0.53` is from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384 + area = ops.xyxy2xywh(labels[:, 1:])[:, 2:].prod(1) * 0.53 + iou = kpt_iou(gt_kpts, pred_kpts, sigma=self.sigma, area=area) + else: # boxes + iou = box_iou(labels[:, 1:], detections[:, :4]) + + correct = np.zeros((detections.shape[0], self.iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(self.iouv)): + x = torch.where((iou >= self.iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), + 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=detections.device) + + def plot_val_samples(self, batch, ni): + """Plots and saves validation set samples with predicted bounding boxes and keypoints.""" + plot_images(batch['img'], + batch['batch_idx'], + batch['cls'].squeeze(-1), + batch['bboxes'], + kpts=batch['keypoints'], + paths=batch['im_file'], + fname=self.save_dir / f'val_batch{ni}_labels.jpg', + names=self.names, + on_plot=self.on_plot) + + def plot_predictions(self, batch, preds, ni): + """Plots predictions for YOLO model.""" + pred_kpts = torch.cat([p[:, 6:].view(-1, *self.kpt_shape) for p in preds], 0) + plot_images(batch['img'], + *output_to_target(preds, max_det=self.args.max_det), + kpts=pred_kpts, + paths=batch['im_file'], + fname=self.save_dir / f'val_batch{ni}_pred.jpg', + names=self.names, + on_plot=self.on_plot) # pred + + def pred_to_json(self, predn, filename): + """Converts YOLO predictions to COCO JSON format.""" + stem = Path(filename).stem + image_id = int(stem) if stem.isnumeric() else stem + box = ops.xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(predn.tolist(), box.tolist()): + self.jdict.append({ + 'image_id': image_id, + 'category_id': self.class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'keypoints': p[6:], + 'score': round(p[4], 5)}) + + def eval_json(self, stats): + """Evaluates object detection model using COCO JSON format.""" + if self.args.save_json and self.is_coco and len(self.jdict): + anno_json = self.data['path'] / 'annotations/person_keypoints_val2017.json' # annotations + pred_json = self.save_dir / 'predictions.json' # predictions + LOGGER.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...') + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements('pycocotools>=2.0.6') + from pycocotools.coco import COCO # noqa + from pycocotools.cocoeval import COCOeval # noqa + + for x in anno_json, pred_json: + assert x.is_file(), f'{x} file not found' + anno = COCO(str(anno_json)) # init annotations api + pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) + for i, eval in enumerate([COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'keypoints')]): + if self.is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval + eval.evaluate() + eval.accumulate() + eval.summarize() + idx = i * 4 + 2 + stats[self.metrics.keys[idx + 1]], stats[ + self.metrics.keys[idx]] = eval.stats[:2] # update mAP50-95 and mAP50 + except Exception as e: + LOGGER.warning(f'pycocotools unable to run: {e}') + return stats + + +def val(cfg=DEFAULT_CFG, use_python=False): + """Performs validation on YOLO model using given data.""" + model = cfg.model or 'yolov8n-pose.pt' + data = cfg.data or 'coco8-pose.yaml' + + args = dict(model=model, data=data) + if use_python: + from ultralytics import YOLO + YOLO(model).val(**args) + else: + validator = PoseValidator(args=args) + validator(model=args['model']) + + +if __name__ == '__main__': + val() diff --git a/ultralytics/yolo/v8/segment/__init__.py b/ultralytics/yolo/v8/segment/__init__.py new file mode 100644 index 0000000..61a9efe --- /dev/null +++ b/ultralytics/yolo/v8/segment/__init__.py @@ -0,0 +1,7 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from .predict import SegmentationPredictor, predict +from .train import SegmentationTrainer, train +from .val import SegmentationValidator, val + +__all__ = 'SegmentationPredictor', 'predict', 'SegmentationTrainer', 'train', 'SegmentationValidator', 'val' diff --git a/ultralytics/yolo/v8/segment/predict.py b/ultralytics/yolo/v8/segment/predict.py new file mode 100644 index 0000000..0b6ebc4 --- /dev/null +++ b/ultralytics/yolo/v8/segment/predict.py @@ -0,0 +1,63 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import torch + +from ultralytics.yolo.engine.results import Results +from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops +from ultralytics.yolo.v8.detect.predict import DetectionPredictor + + +class SegmentationPredictor(DetectionPredictor): + + def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + super().__init__(cfg, overrides, _callbacks) + self.args.task = 'segment' + + def postprocess(self, preds, img, orig_imgs): + """TODO: filter by classes.""" + p = ops.non_max_suppression(preds[0], + self.args.conf, + self.args.iou, + agnostic=self.args.agnostic_nms, + max_det=self.args.max_det, + nc=len(self.model.names), + classes=self.args.classes) + results = [] + proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported + for i, pred in enumerate(p): + orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs + path = self.batch[0] + img_path = path[i] if isinstance(path, list) else path + if not len(pred): # save empty boxes + results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6])) + continue + if self.args.retina_masks: + if not isinstance(orig_imgs, torch.Tensor): + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) + masks = ops.process_mask_native(proto[i], pred[:, 6:], pred[:, :4], orig_img.shape[:2]) # HWC + else: + masks = ops.process_mask(proto[i], pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True) # HWC + if not isinstance(orig_imgs, torch.Tensor): + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) + results.append( + Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], masks=masks)) + return results + + +def predict(cfg=DEFAULT_CFG, use_python=False): + """Runs YOLO object detection on an image or video source.""" + model = cfg.model or 'yolov8n-seg.pt' + source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \ + else 'https://ultralytics.com/images/bus.jpg' + + args = dict(model=model, source=source) + if use_python: + from ultralytics import YOLO + YOLO(model)(**args) + else: + predictor = SegmentationPredictor(overrides=args) + predictor.predict_cli() + + +if __name__ == '__main__': + predict() diff --git a/ultralytics/yolo/v8/segment/train.py b/ultralytics/yolo/v8/segment/train.py new file mode 100644 index 0000000..ab66cf0 --- /dev/null +++ b/ultralytics/yolo/v8/segment/train.py @@ -0,0 +1,65 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +from copy import copy + +from ultralytics.nn.tasks import SegmentationModel +from ultralytics.yolo import v8 +from ultralytics.yolo.utils import DEFAULT_CFG, RANK +from ultralytics.yolo.utils.plotting import plot_images, plot_results + + +# BaseTrainer python usage +class SegmentationTrainer(v8.detect.DetectionTrainer): + + def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): + """Initialize a SegmentationTrainer object with given arguments.""" + if overrides is None: + overrides = {} + overrides['task'] = 'segment' + super().__init__(cfg, overrides, _callbacks) + + def get_model(self, cfg=None, weights=None, verbose=True): + """Return SegmentationModel initialized with specified config and weights.""" + model = SegmentationModel(cfg, ch=3, nc=self.data['nc'], verbose=verbose and RANK == -1) + if weights: + model.load(weights) + + return model + + def get_validator(self): + """Return an instance of SegmentationValidator for validation of YOLO model.""" + self.loss_names = 'box_loss', 'seg_loss', 'cls_loss', 'dfl_loss' + return v8.segment.SegmentationValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args)) + + def plot_training_samples(self, batch, ni): + """Creates a plot of training sample images with labels and box coordinates.""" + plot_images(batch['img'], + batch['batch_idx'], + batch['cls'].squeeze(-1), + batch['bboxes'], + batch['masks'], + paths=batch['im_file'], + fname=self.save_dir / f'train_batch{ni}.jpg', + on_plot=self.on_plot) + + def plot_metrics(self): + """Plots training/val metrics.""" + plot_results(file=self.csv, segment=True, on_plot=self.on_plot) # save results.png + + +def train(cfg=DEFAULT_CFG, use_python=False): + """Train a YOLO segmentation model based on passed arguments.""" + model = cfg.model or 'yolov8n-seg.pt' + data = cfg.data or 'coco128-seg.yaml' # or yolo.ClassificationDataset("mnist") + device = cfg.device if cfg.device is not None else '' + + args = dict(model=model, data=data, device=device) + if use_python: + from ultralytics import YOLO + YOLO(model).train(**args) + else: + trainer = SegmentationTrainer(overrides=args) + trainer.train() + + +if __name__ == '__main__': + train() diff --git a/ultralytics/yolo/v8/segment/val.py b/ultralytics/yolo/v8/segment/val.py new file mode 100644 index 0000000..73c2fe8 --- /dev/null +++ b/ultralytics/yolo/v8/segment/val.py @@ -0,0 +1,262 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import numpy as np +import torch +import torch.nn.functional as F + +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, NUM_THREADS, ops +from ultralytics.yolo.utils.checks import check_requirements +from ultralytics.yolo.utils.metrics import SegmentMetrics, box_iou, mask_iou +from ultralytics.yolo.utils.plotting import output_to_target, plot_images +from ultralytics.yolo.v8.detect import DetectionValidator + + +class SegmentationValidator(DetectionValidator): + + def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None): + """Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.""" + super().__init__(dataloader, save_dir, pbar, args, _callbacks) + self.args.task = 'segment' + self.metrics = SegmentMetrics(save_dir=self.save_dir, on_plot=self.on_plot) + + def preprocess(self, batch): + """Preprocesses batch by converting masks to float and sending to device.""" + batch = super().preprocess(batch) + batch['masks'] = batch['masks'].to(self.device).float() + return batch + + def init_metrics(self, model): + """Initialize metrics and select mask processing function based on save_json flag.""" + super().init_metrics(model) + self.plot_masks = [] + if self.args.save_json: + check_requirements('pycocotools>=2.0.6') + self.process = ops.process_mask_upsample # more accurate + else: + self.process = ops.process_mask # faster + + def get_desc(self): + """Return a formatted description of evaluation metrics.""" + return ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', + 'R', 'mAP50', 'mAP50-95)') + + def postprocess(self, preds): + """Postprocesses YOLO predictions and returns output detections with proto.""" + p = ops.non_max_suppression(preds[0], + self.args.conf, + self.args.iou, + labels=self.lb, + multi_label=True, + agnostic=self.args.single_cls, + max_det=self.args.max_det, + nc=self.nc) + proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported + return p, proto + + def update_metrics(self, preds, batch): + """Metrics.""" + for si, (pred, proto) in enumerate(zip(preds[0], preds[1])): + idx = batch['batch_idx'] == si + cls = batch['cls'][idx] + bbox = batch['bboxes'][idx] + nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions + shape = batch['ori_shape'][si] + correct_masks = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init + correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init + self.seen += 1 + + if npr == 0: + if nl: + self.stats.append((correct_bboxes, correct_masks, *torch.zeros( + (2, 0), device=self.device), cls.squeeze(-1))) + if self.args.plots: + self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1)) + continue + + # Masks + midx = [si] if self.args.overlap_mask else idx + gt_masks = batch['masks'][midx] + pred_masks = self.process(proto, pred[:, 6:], pred[:, :4], shape=batch['img'][si].shape[1:]) + + # Predictions + if self.args.single_cls: + pred[:, 5] = 0 + predn = pred.clone() + ops.scale_boxes(batch['img'][si].shape[1:], predn[:, :4], shape, + ratio_pad=batch['ratio_pad'][si]) # native-space pred + + # Evaluate + if nl: + height, width = batch['img'].shape[2:] + tbox = ops.xywh2xyxy(bbox) * torch.tensor( + (width, height, width, height), device=self.device) # target boxes + ops.scale_boxes(batch['img'][si].shape[1:], tbox, shape, + ratio_pad=batch['ratio_pad'][si]) # native-space labels + labelsn = torch.cat((cls, tbox), 1) # native-space labels + correct_bboxes = self._process_batch(predn, labelsn) + # TODO: maybe remove these `self.` arguments as they already are member variable + correct_masks = self._process_batch(predn, + labelsn, + pred_masks, + gt_masks, + overlap=self.args.overlap_mask, + masks=True) + if self.args.plots: + self.confusion_matrix.process_batch(predn, labelsn) + + # Append correct_masks, correct_boxes, pconf, pcls, tcls + self.stats.append((correct_bboxes, correct_masks, pred[:, 4], pred[:, 5], cls.squeeze(-1))) + + pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) + if self.args.plots and self.batch_i < 3: + self.plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot + + # Save + if self.args.save_json: + pred_masks = ops.scale_image(pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), + shape, + ratio_pad=batch['ratio_pad'][si]) + self.pred_to_json(predn, batch['im_file'][si], pred_masks) + # if self.args.save_txt: + # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + + def finalize_metrics(self, *args, **kwargs): + """Sets speed and confusion matrix for evaluation metrics.""" + self.metrics.speed = self.speed + self.metrics.confusion_matrix = self.confusion_matrix + + def _process_batch(self, detections, labels, pred_masks=None, gt_masks=None, overlap=False, masks=False): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + if masks: + if overlap: + nl = len(labels) + index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 + gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) + gt_masks = torch.where(gt_masks == index, 1.0, 0.0) + if gt_masks.shape[1:] != pred_masks.shape[1:]: + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] + gt_masks = gt_masks.gt_(0.5) + iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) + else: # boxes + iou = box_iou(labels[:, 1:], detections[:, :4]) + + correct = np.zeros((detections.shape[0], self.iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(self.iouv)): + x = torch.where((iou >= self.iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), + 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=detections.device) + + def plot_val_samples(self, batch, ni): + """Plots validation samples with bounding box labels.""" + plot_images(batch['img'], + batch['batch_idx'], + batch['cls'].squeeze(-1), + batch['bboxes'], + batch['masks'], + paths=batch['im_file'], + fname=self.save_dir / f'val_batch{ni}_labels.jpg', + names=self.names, + on_plot=self.on_plot) + + def plot_predictions(self, batch, preds, ni): + """Plots batch predictions with masks and bounding boxes.""" + plot_images( + batch['img'], + *output_to_target(preds[0], max_det=15), # not set to self.args.max_det due to slow plotting speed + torch.cat(self.plot_masks, dim=0) if len(self.plot_masks) else self.plot_masks, + paths=batch['im_file'], + fname=self.save_dir / f'val_batch{ni}_pred.jpg', + names=self.names, + on_plot=self.on_plot) # pred + self.plot_masks.clear() + + def pred_to_json(self, predn, filename, pred_masks): + """Save one JSON result.""" + # Example result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + from pycocotools.mask import encode # noqa + + def single_encode(x): + """Encode predicted masks as RLE and append results to jdict.""" + rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] + rle['counts'] = rle['counts'].decode('utf-8') + return rle + + stem = Path(filename).stem + image_id = int(stem) if stem.isnumeric() else stem + box = ops.xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + pred_masks = np.transpose(pred_masks, (2, 0, 1)) + with ThreadPool(NUM_THREADS) as pool: + rles = pool.map(single_encode, pred_masks) + for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): + self.jdict.append({ + 'image_id': image_id, + 'category_id': self.class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5), + 'segmentation': rles[i]}) + + def eval_json(self, stats): + """Return COCO-style object detection evaluation metrics.""" + if self.args.save_json and self.is_coco and len(self.jdict): + anno_json = self.data['path'] / 'annotations/instances_val2017.json' # annotations + pred_json = self.save_dir / 'predictions.json' # predictions + LOGGER.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...') + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements('pycocotools>=2.0.6') + from pycocotools.coco import COCO # noqa + from pycocotools.cocoeval import COCOeval # noqa + + for x in anno_json, pred_json: + assert x.is_file(), f'{x} file not found' + anno = COCO(str(anno_json)) # init annotations api + pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) + for i, eval in enumerate([COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm')]): + if self.is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval + eval.evaluate() + eval.accumulate() + eval.summarize() + idx = i * 4 + 2 + stats[self.metrics.keys[idx + 1]], stats[ + self.metrics.keys[idx]] = eval.stats[:2] # update mAP50-95 and mAP50 + except Exception as e: + LOGGER.warning(f'pycocotools unable to run: {e}') + return stats + + +def val(cfg=DEFAULT_CFG, use_python=False): + """Validate trained YOLO model on validation data.""" + model = cfg.model or 'yolov8n-seg.pt' + data = cfg.data or 'coco128-seg.yaml' + + args = dict(model=model, data=data) + if use_python: + from ultralytics import YOLO + YOLO(model).val(**args) + else: + validator = SegmentationValidator(args=args) + validator(model=args['model']) + + +if __name__ == '__main__': + val() diff --git a/weights/plate_rec_color.pth b/weights/plate_rec_color.pth new file mode 100644 index 0000000..2aa9938 Binary files /dev/null and b/weights/plate_rec_color.pth differ diff --git a/weights/yolov8-lite-t-plate.pt b/weights/yolov8-lite-t-plate.pt new file mode 100644 index 0000000..e77d56b Binary files /dev/null and b/weights/yolov8-lite-t-plate.pt differ diff --git a/widerface_evaluate/README.md b/widerface_evaluate/README.md new file mode 100644 index 0000000..95952b7 --- /dev/null +++ b/widerface_evaluate/README.md @@ -0,0 +1,27 @@ +# WiderFace-Evaluation +Python Evaluation Code for [Wider Face Dataset](http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/) + + +## Usage + + +##### before evaluating .... + +```` +python3 setup.py build_ext --inplace +```` + +##### evaluating + +**GroungTruth:** `wider_face_val.mat`, `wider_easy_val.mat`, `wider_medium_val.mat`,`wider_hard_val.mat` + +```` +python3 evaluation.py -p -g +```` + +## Bugs & Problems +please issue + +## Acknowledgements + +some code borrowed from Sergey Karayev diff --git a/widerface_evaluate/box_overlaps.c b/widerface_evaluate/box_overlaps.c new file mode 100644 index 0000000..9a890a0 --- /dev/null +++ b/widerface_evaluate/box_overlaps.c @@ -0,0 +1,7813 @@ +/* Generated by Cython 0.29.21 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "depends": [ + "/home/deepcam/miniconda3/lib/python3.7/site-packages/numpy/core/include/numpy/arrayobject.h", + "/home/deepcam/miniconda3/lib/python3.7/site-packages/numpy/core/include/numpy/ufuncobject.h" + ], + "include_dirs": [ + "/home/deepcam/miniconda3/lib/python3.7/site-packages/numpy/core/include" + ], + "name": "bbox", + "sources": [ + "box_overlaps.pyx" + ] + }, + "module_name": "bbox" +} +END: Cython Metadata */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_29_21" +#define CYTHON_HEX_VERSION 0x001D15F0 +#define CYTHON_FUTURE_DIVISION 0 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" +#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#define __PYX_MARK_ERR_POS(f_index, lineno) \ + { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__bbox +#define __PYX_HAVE_API__bbox +/* Early includes */ +#include +#include +#include "numpy/arrayobject.h" +#include "numpy/ufuncobject.h" + + /* NumPy API declarations from "numpy/__init__.pxd" */ + +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + +/* Header.proto */ +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif defined(_Complex_I) + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include + #else + #include + #endif +#endif +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + + +static const char *__pyx_f[] = { + "box_overlaps.pyx", + "__init__.pxd", + "type.pxd", +}; +/* BufferFormatStructs.proto */ +#define IS_UNSIGNED(type) (((type) -1) > 0) +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":697 + * # in Cython to enable them only on the right systems. + * + * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + */ +typedef npy_int8 __pyx_t_5numpy_int8_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":698 + * + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t + */ +typedef npy_int16 __pyx_t_5numpy_int16_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":699 + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< + * ctypedef npy_int64 int64_t + * #ctypedef npy_int96 int96_t + */ +typedef npy_int32 __pyx_t_5numpy_int32_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":700 + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< + * #ctypedef npy_int96 int96_t + * #ctypedef npy_int128 int128_t + */ +typedef npy_int64 __pyx_t_5numpy_int64_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":704 + * #ctypedef npy_int128 int128_t + * + * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + */ +typedef npy_uint8 __pyx_t_5numpy_uint8_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":705 + * + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t + */ +typedef npy_uint16 __pyx_t_5numpy_uint16_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":706 + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< + * ctypedef npy_uint64 uint64_t + * #ctypedef npy_uint96 uint96_t + */ +typedef npy_uint32 __pyx_t_5numpy_uint32_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":707 + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< + * #ctypedef npy_uint96 uint96_t + * #ctypedef npy_uint128 uint128_t + */ +typedef npy_uint64 __pyx_t_5numpy_uint64_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":711 + * #ctypedef npy_uint128 uint128_t + * + * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< + * ctypedef npy_float64 float64_t + * #ctypedef npy_float80 float80_t + */ +typedef npy_float32 __pyx_t_5numpy_float32_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":712 + * + * ctypedef npy_float32 float32_t + * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< + * #ctypedef npy_float80 float80_t + * #ctypedef npy_float128 float128_t + */ +typedef npy_float64 __pyx_t_5numpy_float64_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":721 + * # The int types are mapped a bit surprising -- + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t + */ +typedef npy_long __pyx_t_5numpy_int_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":722 + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong longlong_t + * + */ +typedef npy_longlong __pyx_t_5numpy_long_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":723 + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_ulong uint_t + */ +typedef npy_longlong __pyx_t_5numpy_longlong_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":725 + * ctypedef npy_longlong longlong_t + * + * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t + */ +typedef npy_ulong __pyx_t_5numpy_uint_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":726 + * + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulonglong_t + * + */ +typedef npy_ulonglong __pyx_t_5numpy_ulong_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":727 + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_intp intp_t + */ +typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":729 + * ctypedef npy_ulonglong ulonglong_t + * + * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< + * ctypedef npy_uintp uintp_t + * + */ +typedef npy_intp __pyx_t_5numpy_intp_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":730 + * + * ctypedef npy_intp intp_t + * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< + * + * ctypedef npy_double float_t + */ +typedef npy_uintp __pyx_t_5numpy_uintp_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":732 + * ctypedef npy_uintp uintp_t + * + * ctypedef npy_double float_t # <<<<<<<<<<<<<< + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t + */ +typedef npy_double __pyx_t_5numpy_float_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":733 + * + * ctypedef npy_double float_t + * ctypedef npy_double double_t # <<<<<<<<<<<<<< + * ctypedef npy_longdouble longdouble_t + * + */ +typedef npy_double __pyx_t_5numpy_double_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":734 + * ctypedef npy_double float_t + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cfloat cfloat_t + */ +typedef npy_longdouble __pyx_t_5numpy_longdouble_t; + +/* "box_overlaps.pyx":13 + * + * DTYPE = np.float + * ctypedef np.float_t DTYPE_t # <<<<<<<<<<<<<< + * + * def bbox_overlaps( + */ +typedef __pyx_t_5numpy_float_t __pyx_t_4bbox_DTYPE_t; +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< float > __pyx_t_float_complex; + #else + typedef float _Complex __pyx_t_float_complex; + #endif +#else + typedef struct { float real, imag; } __pyx_t_float_complex; +#endif +static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); + +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< double > __pyx_t_double_complex; + #else + typedef double _Complex __pyx_t_double_complex; + #endif +#else + typedef struct { double real, imag; } __pyx_t_double_complex; +#endif +static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); + + +/*--- Type declarations ---*/ + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":736 + * ctypedef npy_longdouble longdouble_t + * + * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t + */ +typedef npy_cfloat __pyx_t_5numpy_cfloat_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":737 + * + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< + * ctypedef npy_clongdouble clongdouble_t + * + */ +typedef npy_cdouble __pyx_t_5numpy_cdouble_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":738 + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cdouble complex_t + */ +typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":740 + * ctypedef npy_clongdouble clongdouble_t + * + * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew1(a): + */ +typedef npy_cdouble __pyx_t_5numpy_complex_t; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* BufferGetAndValidate.proto */ +#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ + ((obj == Py_None || obj == NULL) ?\ + (__Pyx_ZeroBuffer(buf), 0) :\ + __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) +static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, + __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); +static void __Pyx_ZeroBuffer(Py_buffer* buf); +static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); +static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +/* BufferIndexError.proto */ +static void __Pyx_RaiseBufferIndexError(int axis); + +#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* DictGetItem.proto */ +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key); +#define __Pyx_PyObject_Dict_GetItem(obj, name)\ + (likely(PyDict_CheckExact(obj)) ?\ + __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name)) +#else +#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) +#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name) +#endif + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* GetTopmostException.proto */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* TypeImport.proto */ +#ifndef __PYX_HAVE_RT_ImportType_proto +#define __PYX_HAVE_RT_ImportType_proto +enum __Pyx_ImportType_CheckSize { + __Pyx_ImportType_CheckSize_Error = 0, + __Pyx_ImportType_CheckSize_Warn = 1, + __Pyx_ImportType_CheckSize_Ignore = 2 +}; +static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value); + +/* RealImag.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif +#if defined(__cplusplus) && CYTHON_CCOMPLEX\ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_float(a, b) ((a)==(b)) + #define __Pyx_c_sum_float(a, b) ((a)+(b)) + #define __Pyx_c_diff_float(a, b) ((a)-(b)) + #define __Pyx_c_prod_float(a, b) ((a)*(b)) + #define __Pyx_c_quot_float(a, b) ((a)/(b)) + #define __Pyx_c_neg_float(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_float(z) ((z)==(float)0) + #define __Pyx_c_conj_float(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_float(z) (::std::abs(z)) + #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_float(z) ((z)==0) + #define __Pyx_c_conj_float(z) (conjf(z)) + #if 1 + #define __Pyx_c_abs_float(z) (cabsf(z)) + #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); + #endif +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_double(a, b) ((a)==(b)) + #define __Pyx_c_sum_double(a, b) ((a)+(b)) + #define __Pyx_c_diff_double(a, b) ((a)-(b)) + #define __Pyx_c_prod_double(a, b) ((a)*(b)) + #define __Pyx_c_quot_double(a, b) ((a)/(b)) + #define __Pyx_c_neg_double(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_double(z) ((z)==(double)0) + #define __Pyx_c_conj_double(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (::std::abs(z)) + #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_double(z) ((z)==0) + #define __Pyx_c_conj_double(z) (conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (cabs(z)) + #define __Pyx_c_pow_double(a, b) (cpow(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); + #endif +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'cython' */ + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'numpy' */ + +/* Module declarations from 'numpy' */ +static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; +static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; +static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; +static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; +static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ + +/* Module declarations from 'bbox' */ +static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_4bbox_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_4bbox_DTYPE_t), { 0 }, 0, 'R', 0, 0 }; +#define __Pyx_MODULE_NAME "bbox" +extern int __pyx_module_is_main_bbox; +int __pyx_module_is_main_bbox = 0; + +/* Implementation of 'bbox' */ +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin_RuntimeError; +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_ImportError; +static const char __pyx_k_K[] = "K"; +static const char __pyx_k_N[] = "N"; +static const char __pyx_k_k[] = "k"; +static const char __pyx_k_n[] = "n"; +static const char __pyx_k_ih[] = "ih"; +static const char __pyx_k_iw[] = "iw"; +static const char __pyx_k_np[] = "np"; +static const char __pyx_k_ua[] = "ua"; +static const char __pyx_k_bbox[] = "bbox"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_name[] = "__name__"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_DTYPE[] = "DTYPE"; +static const char __pyx_k_boxes[] = "boxes"; +static const char __pyx_k_dtype[] = "dtype"; +static const char __pyx_k_float[] = "float"; +static const char __pyx_k_numpy[] = "numpy"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_zeros[] = "zeros"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_box_area[] = "box_area"; +static const char __pyx_k_overlaps[] = "overlaps"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_ImportError[] = "ImportError"; +static const char __pyx_k_query_boxes[] = "query_boxes"; +static const char __pyx_k_RuntimeError[] = "RuntimeError"; +static const char __pyx_k_bbox_overlaps[] = "bbox_overlaps"; +static const char __pyx_k_box_overlaps_pyx[] = "box_overlaps.pyx"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; +static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; +static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; +static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; +static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; +static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; +static PyObject *__pyx_n_s_DTYPE; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; +static PyObject *__pyx_n_s_ImportError; +static PyObject *__pyx_n_s_K; +static PyObject *__pyx_n_s_N; +static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; +static PyObject *__pyx_n_s_RuntimeError; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_n_s_bbox; +static PyObject *__pyx_n_s_bbox_overlaps; +static PyObject *__pyx_n_s_box_area; +static PyObject *__pyx_kp_s_box_overlaps_pyx; +static PyObject *__pyx_n_s_boxes; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_dtype; +static PyObject *__pyx_n_s_float; +static PyObject *__pyx_n_s_ih; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_iw; +static PyObject *__pyx_n_s_k; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_n; +static PyObject *__pyx_n_s_name; +static PyObject *__pyx_n_s_np; +static PyObject *__pyx_n_s_numpy; +static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; +static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; +static PyObject *__pyx_n_s_overlaps; +static PyObject *__pyx_n_s_query_boxes; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_ua; +static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; +static PyObject *__pyx_n_s_zeros; +static PyObject *__pyx_pf_4bbox_bbox_overlaps(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_boxes, PyArrayObject *__pyx_v_query_boxes); /* proto */ +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_codeobj__7; +/* Late includes */ + +/* "box_overlaps.pyx":15 + * ctypedef np.float_t DTYPE_t + * + * def bbox_overlaps( # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] boxes, + * np.ndarray[DTYPE_t, ndim=2] query_boxes): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_4bbox_1bbox_overlaps(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_4bbox_bbox_overlaps[] = "\n Parameters\n ----------\n boxes: (N, 4) ndarray of float\n query_boxes: (K, 4) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n "; +static PyMethodDef __pyx_mdef_4bbox_1bbox_overlaps = {"bbox_overlaps", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4bbox_1bbox_overlaps, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4bbox_bbox_overlaps}; +static PyObject *__pyx_pw_4bbox_1bbox_overlaps(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_boxes = 0; + PyArrayObject *__pyx_v_query_boxes = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("bbox_overlaps (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_boxes,&__pyx_n_s_query_boxes,0}; + PyObject* values[2] = {0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_boxes)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_query_boxes)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("bbox_overlaps", 1, 2, 2, 1); __PYX_ERR(0, 15, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bbox_overlaps") < 0)) __PYX_ERR(0, 15, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + } + __pyx_v_boxes = ((PyArrayObject *)values[0]); + __pyx_v_query_boxes = ((PyArrayObject *)values[1]); + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("bbox_overlaps", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 15, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("bbox.bbox_overlaps", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_boxes), __pyx_ptype_5numpy_ndarray, 1, "boxes", 0))) __PYX_ERR(0, 16, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_query_boxes), __pyx_ptype_5numpy_ndarray, 1, "query_boxes", 0))) __PYX_ERR(0, 17, __pyx_L1_error) + __pyx_r = __pyx_pf_4bbox_bbox_overlaps(__pyx_self, __pyx_v_boxes, __pyx_v_query_boxes); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_4bbox_bbox_overlaps(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_boxes, PyArrayObject *__pyx_v_query_boxes) { + unsigned int __pyx_v_N; + unsigned int __pyx_v_K; + PyArrayObject *__pyx_v_overlaps = 0; + __pyx_t_4bbox_DTYPE_t __pyx_v_iw; + __pyx_t_4bbox_DTYPE_t __pyx_v_ih; + __pyx_t_4bbox_DTYPE_t __pyx_v_box_area; + __pyx_t_4bbox_DTYPE_t __pyx_v_ua; + unsigned int __pyx_v_k; + unsigned int __pyx_v_n; + __Pyx_LocalBuf_ND __pyx_pybuffernd_boxes; + __Pyx_Buffer __pyx_pybuffer_boxes; + __Pyx_LocalBuf_ND __pyx_pybuffernd_overlaps; + __Pyx_Buffer __pyx_pybuffer_overlaps; + __Pyx_LocalBuf_ND __pyx_pybuffernd_query_boxes; + __Pyx_Buffer __pyx_pybuffer_query_boxes; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyArrayObject *__pyx_t_5 = NULL; + unsigned int __pyx_t_6; + unsigned int __pyx_t_7; + unsigned int __pyx_t_8; + size_t __pyx_t_9; + Py_ssize_t __pyx_t_10; + int __pyx_t_11; + size_t __pyx_t_12; + Py_ssize_t __pyx_t_13; + size_t __pyx_t_14; + Py_ssize_t __pyx_t_15; + size_t __pyx_t_16; + Py_ssize_t __pyx_t_17; + unsigned int __pyx_t_18; + unsigned int __pyx_t_19; + unsigned int __pyx_t_20; + __pyx_t_4bbox_DTYPE_t __pyx_t_21; + __pyx_t_4bbox_DTYPE_t __pyx_t_22; + __pyx_t_4bbox_DTYPE_t __pyx_t_23; + __pyx_t_4bbox_DTYPE_t __pyx_t_24; + int __pyx_t_25; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("bbox_overlaps", 0); + __pyx_pybuffer_overlaps.pybuffer.buf = NULL; + __pyx_pybuffer_overlaps.refcount = 0; + __pyx_pybuffernd_overlaps.data = NULL; + __pyx_pybuffernd_overlaps.rcbuffer = &__pyx_pybuffer_overlaps; + __pyx_pybuffer_boxes.pybuffer.buf = NULL; + __pyx_pybuffer_boxes.refcount = 0; + __pyx_pybuffernd_boxes.data = NULL; + __pyx_pybuffernd_boxes.rcbuffer = &__pyx_pybuffer_boxes; + __pyx_pybuffer_query_boxes.pybuffer.buf = NULL; + __pyx_pybuffer_query_boxes.refcount = 0; + __pyx_pybuffernd_query_boxes.data = NULL; + __pyx_pybuffernd_query_boxes.rcbuffer = &__pyx_pybuffer_query_boxes; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_boxes.rcbuffer->pybuffer, (PyObject*)__pyx_v_boxes, &__Pyx_TypeInfo_nn___pyx_t_4bbox_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 15, __pyx_L1_error) + } + __pyx_pybuffernd_boxes.diminfo[0].strides = __pyx_pybuffernd_boxes.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_boxes.diminfo[0].shape = __pyx_pybuffernd_boxes.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_boxes.diminfo[1].strides = __pyx_pybuffernd_boxes.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_boxes.diminfo[1].shape = __pyx_pybuffernd_boxes.rcbuffer->pybuffer.shape[1]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_query_boxes.rcbuffer->pybuffer, (PyObject*)__pyx_v_query_boxes, &__Pyx_TypeInfo_nn___pyx_t_4bbox_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 15, __pyx_L1_error) + } + __pyx_pybuffernd_query_boxes.diminfo[0].strides = __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_query_boxes.diminfo[0].shape = __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_query_boxes.diminfo[1].strides = __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_query_boxes.diminfo[1].shape = __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.shape[1]; + + /* "box_overlaps.pyx":27 + * overlaps: (N, K) ndarray of overlap between boxes and query_boxes + * """ + * cdef unsigned int N = boxes.shape[0] # <<<<<<<<<<<<<< + * cdef unsigned int K = query_boxes.shape[0] + * cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE) + */ + __pyx_v_N = (__pyx_v_boxes->dimensions[0]); + + /* "box_overlaps.pyx":28 + * """ + * cdef unsigned int N = boxes.shape[0] + * cdef unsigned int K = query_boxes.shape[0] # <<<<<<<<<<<<<< + * cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE) + * cdef DTYPE_t iw, ih, box_area + */ + __pyx_v_K = (__pyx_v_query_boxes->dimensions[0]); + + /* "box_overlaps.pyx":29 + * cdef unsigned int N = boxes.shape[0] + * cdef unsigned int K = query_boxes.shape[0] + * cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE) # <<<<<<<<<<<<<< + * cdef DTYPE_t iw, ih, box_area + * cdef DTYPE_t ua + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyInt_From_unsigned_int(__pyx_v_N); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyInt_From_unsigned_int(__pyx_v_K); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); + __pyx_t_1 = 0; + __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_DTYPE); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 29, __pyx_L1_error) + __pyx_t_5 = ((PyArrayObject *)__pyx_t_1); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_overlaps.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_4bbox_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { + __pyx_v_overlaps = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_overlaps.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 29, __pyx_L1_error) + } else {__pyx_pybuffernd_overlaps.diminfo[0].strides = __pyx_pybuffernd_overlaps.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_overlaps.diminfo[0].shape = __pyx_pybuffernd_overlaps.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_overlaps.diminfo[1].strides = __pyx_pybuffernd_overlaps.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_overlaps.diminfo[1].shape = __pyx_pybuffernd_overlaps.rcbuffer->pybuffer.shape[1]; + } + } + __pyx_t_5 = 0; + __pyx_v_overlaps = ((PyArrayObject *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "box_overlaps.pyx":33 + * cdef DTYPE_t ua + * cdef unsigned int k, n + * for k in range(K): # <<<<<<<<<<<<<< + * box_area = ( + * (query_boxes[k, 2] - query_boxes[k, 0] + 1) * + */ + __pyx_t_6 = __pyx_v_K; + __pyx_t_7 = __pyx_t_6; + for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { + __pyx_v_k = __pyx_t_8; + + /* "box_overlaps.pyx":35 + * for k in range(K): + * box_area = ( + * (query_boxes[k, 2] - query_boxes[k, 0] + 1) * # <<<<<<<<<<<<<< + * (query_boxes[k, 3] - query_boxes[k, 1] + 1) + * ) + */ + __pyx_t_9 = __pyx_v_k; + __pyx_t_10 = 2; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_9 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_10 < 0) { + __pyx_t_10 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 35, __pyx_L1_error) + } + __pyx_t_12 = __pyx_v_k; + __pyx_t_13 = 0; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_12 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_13 < 0) { + __pyx_t_13 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 35, __pyx_L1_error) + } + + /* "box_overlaps.pyx":36 + * box_area = ( + * (query_boxes[k, 2] - query_boxes[k, 0] + 1) * + * (query_boxes[k, 3] - query_boxes[k, 1] + 1) # <<<<<<<<<<<<<< + * ) + * for n in range(N): + */ + __pyx_t_14 = __pyx_v_k; + __pyx_t_15 = 3; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_14 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_15 < 0) { + __pyx_t_15 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_15 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_15 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 36, __pyx_L1_error) + } + __pyx_t_16 = __pyx_v_k; + __pyx_t_17 = 1; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 36, __pyx_L1_error) + } + + /* "box_overlaps.pyx":35 + * for k in range(K): + * box_area = ( + * (query_boxes[k, 2] - query_boxes[k, 0] + 1) * # <<<<<<<<<<<<<< + * (query_boxes[k, 3] - query_boxes[k, 1] + 1) + * ) + */ + __pyx_v_box_area = ((((*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_query_boxes.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_query_boxes.diminfo[1].strides))) + 1.0) * (((*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_15, __pyx_pybuffernd_query_boxes.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_query_boxes.diminfo[1].strides))) + 1.0)); + + /* "box_overlaps.pyx":38 + * (query_boxes[k, 3] - query_boxes[k, 1] + 1) + * ) + * for n in range(N): # <<<<<<<<<<<<<< + * iw = ( + * min(boxes[n, 2], query_boxes[k, 2]) - + */ + __pyx_t_18 = __pyx_v_N; + __pyx_t_19 = __pyx_t_18; + for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) { + __pyx_v_n = __pyx_t_20; + + /* "box_overlaps.pyx":40 + * for n in range(N): + * iw = ( + * min(boxes[n, 2], query_boxes[k, 2]) - # <<<<<<<<<<<<<< + * max(boxes[n, 0], query_boxes[k, 0]) + 1 + * ) + */ + __pyx_t_16 = __pyx_v_k; + __pyx_t_17 = 2; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 40, __pyx_L1_error) + } + __pyx_t_21 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_query_boxes.diminfo[1].strides)); + __pyx_t_16 = __pyx_v_n; + __pyx_t_17 = 2; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 40, __pyx_L1_error) + } + __pyx_t_22 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_boxes.diminfo[1].strides)); + if (((__pyx_t_21 < __pyx_t_22) != 0)) { + __pyx_t_23 = __pyx_t_21; + } else { + __pyx_t_23 = __pyx_t_22; + } + + /* "box_overlaps.pyx":41 + * iw = ( + * min(boxes[n, 2], query_boxes[k, 2]) - + * max(boxes[n, 0], query_boxes[k, 0]) + 1 # <<<<<<<<<<<<<< + * ) + * if iw > 0: + */ + __pyx_t_16 = __pyx_v_k; + __pyx_t_17 = 0; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 41, __pyx_L1_error) + } + __pyx_t_21 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_query_boxes.diminfo[1].strides)); + __pyx_t_16 = __pyx_v_n; + __pyx_t_17 = 0; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 41, __pyx_L1_error) + } + __pyx_t_22 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_boxes.diminfo[1].strides)); + if (((__pyx_t_21 > __pyx_t_22) != 0)) { + __pyx_t_24 = __pyx_t_21; + } else { + __pyx_t_24 = __pyx_t_22; + } + + /* "box_overlaps.pyx":40 + * for n in range(N): + * iw = ( + * min(boxes[n, 2], query_boxes[k, 2]) - # <<<<<<<<<<<<<< + * max(boxes[n, 0], query_boxes[k, 0]) + 1 + * ) + */ + __pyx_v_iw = ((__pyx_t_23 - __pyx_t_24) + 1.0); + + /* "box_overlaps.pyx":43 + * max(boxes[n, 0], query_boxes[k, 0]) + 1 + * ) + * if iw > 0: # <<<<<<<<<<<<<< + * ih = ( + * min(boxes[n, 3], query_boxes[k, 3]) - + */ + __pyx_t_25 = ((__pyx_v_iw > 0.0) != 0); + if (__pyx_t_25) { + + /* "box_overlaps.pyx":45 + * if iw > 0: + * ih = ( + * min(boxes[n, 3], query_boxes[k, 3]) - # <<<<<<<<<<<<<< + * max(boxes[n, 1], query_boxes[k, 1]) + 1 + * ) + */ + __pyx_t_16 = __pyx_v_k; + __pyx_t_17 = 3; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 45, __pyx_L1_error) + } + __pyx_t_24 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_query_boxes.diminfo[1].strides)); + __pyx_t_16 = __pyx_v_n; + __pyx_t_17 = 3; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 45, __pyx_L1_error) + } + __pyx_t_23 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_boxes.diminfo[1].strides)); + if (((__pyx_t_24 < __pyx_t_23) != 0)) { + __pyx_t_21 = __pyx_t_24; + } else { + __pyx_t_21 = __pyx_t_23; + } + + /* "box_overlaps.pyx":46 + * ih = ( + * min(boxes[n, 3], query_boxes[k, 3]) - + * max(boxes[n, 1], query_boxes[k, 1]) + 1 # <<<<<<<<<<<<<< + * ) + * if ih > 0: + */ + __pyx_t_16 = __pyx_v_k; + __pyx_t_17 = 1; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_query_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_query_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_query_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 46, __pyx_L1_error) + } + __pyx_t_24 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_query_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_query_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_query_boxes.diminfo[1].strides)); + __pyx_t_16 = __pyx_v_n; + __pyx_t_17 = 1; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 46, __pyx_L1_error) + } + __pyx_t_23 = (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_boxes.diminfo[1].strides)); + if (((__pyx_t_24 > __pyx_t_23) != 0)) { + __pyx_t_22 = __pyx_t_24; + } else { + __pyx_t_22 = __pyx_t_23; + } + + /* "box_overlaps.pyx":45 + * if iw > 0: + * ih = ( + * min(boxes[n, 3], query_boxes[k, 3]) - # <<<<<<<<<<<<<< + * max(boxes[n, 1], query_boxes[k, 1]) + 1 + * ) + */ + __pyx_v_ih = ((__pyx_t_21 - __pyx_t_22) + 1.0); + + /* "box_overlaps.pyx":48 + * max(boxes[n, 1], query_boxes[k, 1]) + 1 + * ) + * if ih > 0: # <<<<<<<<<<<<<< + * ua = float( + * (boxes[n, 2] - boxes[n, 0] + 1) * + */ + __pyx_t_25 = ((__pyx_v_ih > 0.0) != 0); + if (__pyx_t_25) { + + /* "box_overlaps.pyx":50 + * if ih > 0: + * ua = float( + * (boxes[n, 2] - boxes[n, 0] + 1) * # <<<<<<<<<<<<<< + * (boxes[n, 3] - boxes[n, 1] + 1) + + * box_area - iw * ih + */ + __pyx_t_16 = __pyx_v_n; + __pyx_t_17 = 2; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_16 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_17 < 0) { + __pyx_t_17 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_17 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 50, __pyx_L1_error) + } + __pyx_t_14 = __pyx_v_n; + __pyx_t_15 = 0; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_14 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_15 < 0) { + __pyx_t_15 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_15 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_15 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 50, __pyx_L1_error) + } + + /* "box_overlaps.pyx":51 + * ua = float( + * (boxes[n, 2] - boxes[n, 0] + 1) * + * (boxes[n, 3] - boxes[n, 1] + 1) + # <<<<<<<<<<<<<< + * box_area - iw * ih + * ) + */ + __pyx_t_12 = __pyx_v_n; + __pyx_t_13 = 3; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_12 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_13 < 0) { + __pyx_t_13 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 51, __pyx_L1_error) + } + __pyx_t_9 = __pyx_v_n; + __pyx_t_10 = 1; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_9 >= (size_t)__pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_11 = 0; + if (__pyx_t_10 < 0) { + __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; + if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 1; + } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 51, __pyx_L1_error) + } + + /* "box_overlaps.pyx":49 + * ) + * if ih > 0: + * ua = float( # <<<<<<<<<<<<<< + * (boxes[n, 2] - boxes[n, 0] + 1) * + * (boxes[n, 3] - boxes[n, 1] + 1) + + */ + __pyx_v_ua = ((double)((((((*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_boxes.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_15, __pyx_pybuffernd_boxes.diminfo[1].strides))) + 1.0) * (((*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_boxes.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides))) + 1.0)) + __pyx_v_box_area) - (__pyx_v_iw * __pyx_v_ih))); + + /* "box_overlaps.pyx":54 + * box_area - iw * ih + * ) + * overlaps[n, k] = iw * ih / ua # <<<<<<<<<<<<<< + * return overlaps + */ + __pyx_t_22 = (__pyx_v_iw * __pyx_v_ih); + if (unlikely(__pyx_v_ua == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 54, __pyx_L1_error) + } + __pyx_t_9 = __pyx_v_n; + __pyx_t_12 = __pyx_v_k; + __pyx_t_11 = -1; + if (unlikely(__pyx_t_9 >= (size_t)__pyx_pybuffernd_overlaps.diminfo[0].shape)) __pyx_t_11 = 0; + if (unlikely(__pyx_t_12 >= (size_t)__pyx_pybuffernd_overlaps.diminfo[1].shape)) __pyx_t_11 = 1; + if (unlikely(__pyx_t_11 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_11); + __PYX_ERR(0, 54, __pyx_L1_error) + } + *__Pyx_BufPtrStrided2d(__pyx_t_4bbox_DTYPE_t *, __pyx_pybuffernd_overlaps.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_overlaps.diminfo[0].strides, __pyx_t_12, __pyx_pybuffernd_overlaps.diminfo[1].strides) = (__pyx_t_22 / __pyx_v_ua); + + /* "box_overlaps.pyx":48 + * max(boxes[n, 1], query_boxes[k, 1]) + 1 + * ) + * if ih > 0: # <<<<<<<<<<<<<< + * ua = float( + * (boxes[n, 2] - boxes[n, 0] + 1) * + */ + } + + /* "box_overlaps.pyx":43 + * max(boxes[n, 0], query_boxes[k, 0]) + 1 + * ) + * if iw > 0: # <<<<<<<<<<<<<< + * ih = ( + * min(boxes[n, 3], query_boxes[k, 3]) - + */ + } + } + } + + /* "box_overlaps.pyx":55 + * ) + * overlaps[n, k] = iw * ih / ua + * return overlaps # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_overlaps)); + __pyx_r = ((PyObject *)__pyx_v_overlaps); + goto __pyx_L0; + + /* "box_overlaps.pyx":15 + * ctypedef np.float_t DTYPE_t + * + * def bbox_overlaps( # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] boxes, + * np.ndarray[DTYPE_t, ndim=2] query_boxes): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_boxes.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_overlaps.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_query_boxes.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("bbox.bbox_overlaps", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_boxes.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_overlaps.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_query_boxes.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_overlaps); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":742 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":743 + * + * cdef inline object PyArray_MultiIterNew1(a): + * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew2(a, b): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 743, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":742 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":745 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":746 + * + * cdef inline object PyArray_MultiIterNew2(a, b): + * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":745 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":748 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":749 + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 749, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":748 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":751 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":752 + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 752, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":751 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":754 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":755 + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 755, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":754 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":757 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":758 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); + if (__pyx_t_1) { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":759 + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape # <<<<<<<<<<<<<< + * else: + * return () + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); + __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":758 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":761 + * return d.subarray.shape + * else: + * return () # <<<<<<<<<<<<<< + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_empty_tuple); + __pyx_r = __pyx_empty_tuple; + goto __pyx_L0; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":757 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":763 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { + PyArray_Descr *__pyx_v_child = 0; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + PyObject *__pyx_v_fields = 0; + PyObject *__pyx_v_childname = NULL; + PyObject *__pyx_v_new_offset = NULL; + PyObject *__pyx_v_t = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_t_7; + long __pyx_t_8; + char *__pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_util_dtypestring", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":768 + * + * cdef dtype child + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * cdef tuple fields + */ + __pyx_v_endian_detector = 1; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":769 + * cdef dtype child + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * cdef tuple fields + * + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":772 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + if (unlikely(__pyx_v_descr->names == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); + __PYX_ERR(1, 772, __pyx_L1_error) + } + __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; + for (;;) { + if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 772, __pyx_L1_error) + #else + __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 772, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + #endif + __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":773 + * + * for childname in descr.names: + * fields = descr.fields[childname] # <<<<<<<<<<<<<< + * child, new_offset = fields + * + */ + if (unlikely(__pyx_v_descr->fields == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 773, __pyx_L1_error) + } + __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 773, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 773, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); + __pyx_t_3 = 0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":774 + * for childname in descr.names: + * fields = descr.fields[childname] + * child, new_offset = fields # <<<<<<<<<<<<<< + * + * if (end - f) - (new_offset - offset[0]) < 15: + */ + if (likely(__pyx_v_fields != Py_None)) { + PyObject* sequence = __pyx_v_fields; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 774, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 774, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 774, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 774, __pyx_L1_error) + } + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 774, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); + __pyx_t_3 = 0; + __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":776 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 776, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 776, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 776, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); + if (unlikely(__pyx_t_6)) { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":777 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 777, __pyx_L1_error) + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":776 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":779 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); + if (!__pyx_t_7) { + goto __pyx_L8_next_or; + } else { + } + __pyx_t_7 = (__pyx_v_little_endian != 0); + if (!__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_L8_next_or:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":780 + * + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * # One could encode it in the format string and have Cython + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); + if (__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_6 = __pyx_t_7; + __pyx_L7_bool_binop_done:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":779 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (unlikely(__pyx_t_6)) { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":781 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 781, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 781, __pyx_L1_error) + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":779 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":791 + * + * # Output padding bytes + * while offset[0] < new_offset: # <<<<<<<<<<<<<< + * f[0] = 120 # "x"; pad byte + * f += 1 + */ + while (1) { + __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 791, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 791, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 791, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!__pyx_t_6) break; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":792 + * # Output padding bytes + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< + * f += 1 + * offset[0] += 1 + */ + (__pyx_v_f[0]) = 0x78; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":793 + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte + * f += 1 # <<<<<<<<<<<<<< + * offset[0] += 1 + * + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":794 + * f[0] = 120 # "x"; pad byte + * f += 1 + * offset[0] += 1 # <<<<<<<<<<<<<< + * + * offset[0] += child.itemsize + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":796 + * offset[0] += 1 + * + * offset[0] += child.itemsize # <<<<<<<<<<<<<< + * + * if not PyDataType_HASFIELDS(child): + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":798 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); + if (__pyx_t_6) { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":799 + * + * if not PyDataType_HASFIELDS(child): + * t = child.type_num # <<<<<<<<<<<<<< + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") + */ + __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 799, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":800 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); + if (unlikely(__pyx_t_6)) { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":801 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 801, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 801, __pyx_L1_error) + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":800 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":804 + * + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 804, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 804, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 804, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 98; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":805 + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 805, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 805, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 805, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 66; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":806 + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 806, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 806, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 806, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x68; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":807 + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 807, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 807, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 807, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 72; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":808 + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 808, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 808, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 808, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x69; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":809 + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 809, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 809, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 809, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 73; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":810 + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 810, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 810, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 810, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x6C; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":811 + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 811, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 811, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 811, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 76; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":812 + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 812, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 812, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 812, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x71; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":813 + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 813, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 813, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 813, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 81; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":814 + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 814, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 814, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 814, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x66; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":815 + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 815, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 815, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 815, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x64; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":816 + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 816, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 816, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 816, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x67; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":817 + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 817, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 817, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 817, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x66; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":818 + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 818, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 818, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 818, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x64; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":819 + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x67; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":820 + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (likely(__pyx_t_6)) { + (__pyx_v_f[0]) = 79; + goto __pyx_L15; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":822 + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * f += 1 + * else: + */ + /*else*/ { + __pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 822, __pyx_L1_error) + } + __pyx_L15:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":823 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * f += 1 # <<<<<<<<<<<<<< + * else: + * # Cython ignores struct boundary information ("T{...}"), + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":798 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + goto __pyx_L13; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":827 + * # Cython ignores struct boundary information ("T{...}"), + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< + * return f + * + */ + /*else*/ { + __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 827, __pyx_L1_error) + __pyx_v_f = __pyx_t_9; + } + __pyx_L13:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":772 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":828 + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) + * return f # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_f; + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":763 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_child); + __Pyx_XDECREF(__pyx_v_fields); + __Pyx_XDECREF(__pyx_v_childname); + __Pyx_XDECREF(__pyx_v_new_offset); + __Pyx_XDECREF(__pyx_v_t); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":943 + * int _import_umath() except -1 + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * Py_INCREF(base) # important to do this before stealing the reference below! + * PyArray_SetBaseObject(arr, base) + */ + +static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("set_array_base", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":944 + * + * cdef inline void set_array_base(ndarray arr, object base): + * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< + * PyArray_SetBaseObject(arr, base) + * + */ + Py_INCREF(__pyx_v_base); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":945 + * cdef inline void set_array_base(ndarray arr, object base): + * Py_INCREF(base) # important to do this before stealing the reference below! + * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< + * + * cdef inline object get_array_base(ndarray arr): + */ + (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":943 + * int _import_umath() except -1 + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * Py_INCREF(base) # important to do this before stealing the reference below! + * PyArray_SetBaseObject(arr, base) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":947 + * PyArray_SetBaseObject(arr, base) + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * base = PyArray_BASE(arr) + * if base is NULL: + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { + PyObject *__pyx_v_base; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("get_array_base", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":948 + * + * cdef inline object get_array_base(ndarray arr): + * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< + * if base is NULL: + * return None + */ + __pyx_v_base = PyArray_BASE(__pyx_v_arr); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":949 + * cdef inline object get_array_base(ndarray arr): + * base = PyArray_BASE(arr) + * if base is NULL: # <<<<<<<<<<<<<< + * return None + * return base + */ + __pyx_t_1 = ((__pyx_v_base == NULL) != 0); + if (__pyx_t_1) { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":950 + * base = PyArray_BASE(arr) + * if base is NULL: + * return None # <<<<<<<<<<<<<< + * return base + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":949 + * cdef inline object get_array_base(ndarray arr): + * base = PyArray_BASE(arr) + * if base is NULL: # <<<<<<<<<<<<<< + * return None + * return base + */ + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":951 + * if base is NULL: + * return None + * return base # <<<<<<<<<<<<<< + * + * # Versions of the import_* functions which are more suitable for + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_base)); + __pyx_r = ((PyObject *)__pyx_v_base); + goto __pyx_L0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":947 + * PyArray_SetBaseObject(arr, base) + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * base = PyArray_BASE(arr) + * if base is NULL: + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":955 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * __pyx_import_array() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("import_array", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":956 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * __pyx_import_array() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":957 + * cdef inline int import_array() except -1: + * try: + * __pyx_import_array() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") + */ + __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L3_error) + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":956 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * __pyx_import_array() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":958 + * try: + * __pyx_import_array() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.multiarray failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 958, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":959 + * __pyx_import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 959, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 959, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":956 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * __pyx_import_array() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":955 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * __pyx_import_array() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":961 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("import_umath", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":962 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":963 + * cdef inline int import_umath() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 963, __pyx_L3_error) + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":962 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":964 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 964, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":965 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 965, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 965, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":962 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":961 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":967 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("import_ufunc", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":968 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":969 + * cdef inline int import_ufunc() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 969, __pyx_L3_error) + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":968 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":970 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 970, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":971 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef extern from *: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 971, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 971, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":968 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":967 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_bbox(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_bbox}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "bbox", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, + {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, + {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, + {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, + {&__pyx_n_s_K, __pyx_k_K, sizeof(__pyx_k_K), 0, 0, 1, 1}, + {&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1}, + {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, + {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_n_s_bbox, __pyx_k_bbox, sizeof(__pyx_k_bbox), 0, 0, 1, 1}, + {&__pyx_n_s_bbox_overlaps, __pyx_k_bbox_overlaps, sizeof(__pyx_k_bbox_overlaps), 0, 0, 1, 1}, + {&__pyx_n_s_box_area, __pyx_k_box_area, sizeof(__pyx_k_box_area), 0, 0, 1, 1}, + {&__pyx_kp_s_box_overlaps_pyx, __pyx_k_box_overlaps_pyx, sizeof(__pyx_k_box_overlaps_pyx), 0, 0, 1, 0}, + {&__pyx_n_s_boxes, __pyx_k_boxes, sizeof(__pyx_k_boxes), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, + {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, + {&__pyx_n_s_ih, __pyx_k_ih, sizeof(__pyx_k_ih), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_iw, __pyx_k_iw, sizeof(__pyx_k_iw), 0, 0, 1, 1}, + {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, + {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, + {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, + {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, + {&__pyx_n_s_overlaps, __pyx_k_overlaps, sizeof(__pyx_k_overlaps), 0, 0, 1, 1}, + {&__pyx_n_s_query_boxes, __pyx_k_query_boxes, sizeof(__pyx_k_query_boxes), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_ua, __pyx_k_ua, sizeof(__pyx_k_ua), 0, 0, 1, 1}, + {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, + {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 33, __pyx_L1_error) + __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 777, __pyx_L1_error) + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 781, __pyx_L1_error) + __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 959, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":777 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 777, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":781 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 781, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":801 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 801, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":959 + * __pyx_import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 959, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":965 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 965, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "box_overlaps.pyx":15 + * ctypedef np.float_t DTYPE_t + * + * def bbox_overlaps( # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] boxes, + * np.ndarray[DTYPE_t, ndim=2] query_boxes): + */ + __pyx_tuple__6 = PyTuple_Pack(11, __pyx_n_s_boxes, __pyx_n_s_query_boxes, __pyx_n_s_N, __pyx_n_s_K, __pyx_n_s_overlaps, __pyx_n_s_iw, __pyx_n_s_ih, __pyx_n_s_box_area, __pyx_n_s_ua, __pyx_n_s_k, __pyx_n_s_n); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + __pyx_codeobj__7 = (PyObject*)__Pyx_PyCode_New(2, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__6, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_box_overlaps_pyx, __pyx_n_s_bbox_overlaps, 15, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__7)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", + #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 207, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 207, __pyx_L1_error) + __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 230, __pyx_L1_error) + __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 234, __pyx_L1_error) + __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 246, __pyx_L1_error) + __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 839, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#elif PY_MAJOR_VERSION < 3 +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif +#else +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initbbox(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initbbox(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_bbox(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_bbox(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_bbox(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'bbox' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_bbox(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("bbox", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_b); + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_cython_runtime); + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_bbox) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "bbox")) { + if (unlikely(PyDict_SetItemString(modules, "bbox", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + (void)__Pyx_modinit_type_init_code(); + if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "box_overlaps.pyx":9 + * + * cimport cython + * import numpy as np # <<<<<<<<<<<<<< + * cimport numpy as np + * + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 9, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "box_overlaps.pyx":12 + * cimport numpy as np + * + * DTYPE = np.float # <<<<<<<<<<<<<< + * ctypedef np.float_t DTYPE_t + * + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) __PYX_ERR(0, 12, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "box_overlaps.pyx":15 + * ctypedef np.float_t DTYPE_t + * + * def bbox_overlaps( # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] boxes, + * np.ndarray[DTYPE_t, ndim=2] query_boxes): + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_4bbox_1bbox_overlaps, NULL, __pyx_n_s_bbox); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_bbox_overlaps, __pyx_t_2) < 0) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "box_overlaps.pyx":1 + * # -------------------------------------------------------- # <<<<<<<<<<<<<< + * # Fast R-CNN + * # Copyright (c) 2015 Microsoft + */ + __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "../../../miniconda3/lib/python3.7/site-packages/numpy/__init__.pxd":967 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init bbox", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + Py_CLEAR(__pyx_m); + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init bbox"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t <= '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case '?': return "'bool'"; + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparseable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static PyObject * +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number, ndim; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ndim = ctx->head->field->type->ndim; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) + return PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + if (*ts != ',' && *ts != ')') + return PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + if (*ts == ',') ts++; + i++; + } + if (i != ndim) + return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return NULL; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return Py_None; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + CYTHON_FALLTHROUGH; + case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && + (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + CYTHON_FALLTHROUGH; + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* BufferGetAndValidate */ + static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { + if (unlikely(info->buf == NULL)) return; + if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; + __Pyx_ReleaseBuffer(info); +} +static void __Pyx_ZeroBuffer(Py_buffer* buf) { + buf->buf = NULL; + buf->obj = NULL; + buf->strides = __Pyx_zeros; + buf->shape = __Pyx_zeros; + buf->suboffsets = __Pyx_minusones; +} +static int __Pyx__GetBufferAndValidate( + Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, + int nd, int cast, __Pyx_BufFmt_StackElem* stack) +{ + buf->buf = NULL; + if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { + __Pyx_ZeroBuffer(buf); + return -1; + } + if (unlikely(buf->ndim != nd)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + nd, buf->ndim); + goto fail; + } + if (!cast) { + __Pyx_BufFmt_Context ctx; + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; + } + if (unlikely((size_t)buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", + buf->itemsize, (buf->itemsize > 1) ? "s" : "", + dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; + return 0; +fail:; + __Pyx_SafeReleaseBuffer(buf); + return -1; +} + +/* PyDictVersioning */ + #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ + #if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* PyObjectCall */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* ExtTypeTest */ + static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* BufferIndexError */ + static void __Pyx_RaiseBufferIndexError(int axis) { + PyErr_Format(PyExc_IndexError, + "Out of bounds on buffer access (axis %d)", axis); +} + +/* PyErrFetchRestore */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* DictGetItem */ + #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { + PyObject *value; + value = PyDict_GetItemWithError(d, key); + if (unlikely(!value)) { + if (!PyErr_Occurred()) { + if (unlikely(PyTuple_Check(key))) { + PyObject* args = PyTuple_Pack(1, key); + if (likely(args)) { + PyErr_SetObject(PyExc_KeyError, args); + Py_DECREF(args); + } + } else { + PyErr_SetObject(PyExc_KeyError, key); + } + } + return NULL; + } + Py_INCREF(value); + return value; +} +#endif + +/* RaiseTooManyValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ + static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* RaiseException */ + #if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* PyCFunctionFastCall */ + #if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); + } +} +#endif + +/* PyFunctionFastCall */ + #if CYTHON_FAST_PYCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyObjectCallMethO */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallOneArg */ + #if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* GetTopmostException */ + #if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + +/* SaveResetException */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* PyErrExceptionMatches */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetException */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* TypeImport */ + #ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, + size_t size, enum __Pyx_ImportType_CheckSize check_size) +{ + PyObject *result = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + result = PyObject_GetAttrString(module, class_name); + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if ((size_t)basicsize < size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(result); + return NULL; +} +#endif + +/* Import */ + static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* CLineInTraceback */ + #ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ + static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ + #include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + + /* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value) { + const unsigned int neg_one = (unsigned int) ((unsigned int) 0 - (unsigned int) 1), const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(unsigned int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(unsigned int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(unsigned int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(unsigned int), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return ::std::complex< float >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return x + y*(__pyx_t_float_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + __pyx_t_float_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabsf(b.real) >= fabsf(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + float r = b.imag / b.real; + float s = (float)(1.0) / (b.real + b.imag * r); + return __pyx_t_float_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + float r = b.real / b.imag; + float s = (float)(1.0) / (b.imag + b.real * r); + return __pyx_t_float_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + float denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_float_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrtf(z.real*z.real + z.imag*z.imag); + #else + return hypotf(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + float r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + float denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + return __Pyx_c_prod_float(a, a); + case 3: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, a); + case 4: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = powf(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2f(0.0, -1.0); + } + } else { + r = __Pyx_c_abs_float(a); + theta = atan2f(a.imag, a.real); + } + lnr = logf(r); + z_r = expf(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cosf(z_theta); + z.imag = z_r * sinf(z_theta); + return z; + } + #endif +#endif + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return ::std::complex< double >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return x + y*(__pyx_t_double_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + __pyx_t_double_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs(b.real) >= fabs(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + double r = b.imag / b.real; + double s = (double)(1.0) / (b.real + b.imag * r); + return __pyx_t_double_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + double r = b.real / b.imag; + double s = (double)(1.0) / (b.imag + b.real * r); + return __pyx_t_double_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + double denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_double_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt(z.real*z.real + z.imag*z.imag); + #else + return hypot(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + double r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + double denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + return __Pyx_c_prod_double(a, a); + case 3: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, a); + case 4: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = pow(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2(0.0, -1.0); + } + } else { + r = __Pyx_c_abs_double(a); + theta = atan2(a.imag, a.real); + } + lnr = log(r); + z_r = exp(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos(z_theta); + z.imag = z_r * sin(z_theta); + return z; + } + #endif +#endif + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { + const enum NPY_TYPES neg_one = (enum NPY_TYPES) ((enum NPY_TYPES) 0 - (enum NPY_TYPES) 1), const_zero = (enum NPY_TYPES) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(enum NPY_TYPES) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(enum NPY_TYPES) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { + const unsigned int neg_one = (unsigned int) ((unsigned int) 0 - (unsigned int) 1), const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(unsigned int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (unsigned int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0]) + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) { + return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) { + return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) { + return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (unsigned int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(unsigned int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0]) + case -2: + if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(unsigned int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + unsigned int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (unsigned int) -1; + } + } else { + unsigned int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (unsigned int) -1; + val = __Pyx_PyInt_As_unsigned_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to unsigned int"); + return (unsigned int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to unsigned int"); + return (unsigned int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* FastTypeChecks */ + #if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; ip) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/widerface_evaluate/evaluation.py b/widerface_evaluate/evaluation.py new file mode 100644 index 0000000..554f278 --- /dev/null +++ b/widerface_evaluate/evaluation.py @@ -0,0 +1,303 @@ +""" +WiderFace evaluation code +author: wondervictor +mail: tianhengcheng@gmail.com +copyright@wondervictor +""" + +import os +import tqdm +import pickle +import argparse +import numpy as np +from scipy.io import loadmat +from bbox import bbox_overlaps +from IPython import embed + + +def get_gt_boxes(gt_dir): + """ gt dir: (wider_face_val.mat, wider_easy_val.mat, wider_medium_val.mat, wider_hard_val.mat)""" + + gt_mat = loadmat(os.path.join(gt_dir, 'wider_face_val.mat')) + hard_mat = loadmat(os.path.join(gt_dir, 'wider_hard_val.mat')) + medium_mat = loadmat(os.path.join(gt_dir, 'wider_medium_val.mat')) + easy_mat = loadmat(os.path.join(gt_dir, 'wider_easy_val.mat')) + + facebox_list = gt_mat['face_bbx_list'] + event_list = gt_mat['event_list'] + file_list = gt_mat['file_list'] + + hard_gt_list = hard_mat['gt_list'] + medium_gt_list = medium_mat['gt_list'] + easy_gt_list = easy_mat['gt_list'] + + return facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list + + +def get_gt_boxes_from_txt(gt_path, cache_dir): + + cache_file = os.path.join(cache_dir, 'gt_cache.pkl') + if os.path.exists(cache_file): + f = open(cache_file, 'rb') + boxes = pickle.load(f) + f.close() + return boxes + + f = open(gt_path, 'r') + state = 0 + lines = f.readlines() + lines = list(map(lambda x: x.rstrip('\r\n'), lines)) + boxes = {} + print(len(lines)) + f.close() + current_boxes = [] + current_name = None + for line in lines: + if state == 0 and '--' in line: + state = 1 + current_name = line + continue + if state == 1: + state = 2 + continue + + if state == 2 and '--' in line: + state = 1 + boxes[current_name] = np.array(current_boxes).astype('float32') + current_name = line + current_boxes = [] + continue + + if state == 2: + box = [float(x) for x in line.split(' ')[:4]] + current_boxes.append(box) + continue + + f = open(cache_file, 'wb') + pickle.dump(boxes, f) + f.close() + return boxes + + +def read_pred_file(filepath): + + with open(filepath, 'r') as f: + lines = f.readlines() + img_file = lines[0].rstrip('\n\r') + lines = lines[2:] + + # b = lines[0].rstrip('\r\n').split(' ')[:-1] + # c = float(b) + # a = map(lambda x: [[float(a[0]), float(a[1]), float(a[2]), float(a[3]), float(a[4])] for a in x.rstrip('\r\n').split(' ')], lines) + boxes = [] + for line in lines: + line = line.rstrip('\r\n').split(' ') + if line[0] == '': + continue + # a = float(line[4]) + boxes.append([float(line[0]), float(line[1]), float(line[2]), float(line[3]), float(line[4])]) + boxes = np.array(boxes) + # boxes = np.array(list(map(lambda x: [float(a) for a in x.rstrip('\r\n').split(' ')], lines))).astype('float') + return img_file.split('/')[-1], boxes + + +def get_preds(pred_dir): + events = os.listdir(pred_dir) + boxes = dict() + pbar = tqdm.tqdm(events) + + for event in pbar: + pbar.set_description('Reading Predictions ') + event_dir = os.path.join(pred_dir, event) + event_images = os.listdir(event_dir) + current_event = dict() + for imgtxt in event_images: + imgname, _boxes = read_pred_file(os.path.join(event_dir, imgtxt)) + current_event[imgname.rstrip('.jpg')] = _boxes + boxes[event] = current_event + return boxes + + +def norm_score(pred): + """ norm score + pred {key: [[x1,y1,x2,y2,s]]} + """ + + max_score = 0 + min_score = 1 + + for _, k in pred.items(): + for _, v in k.items(): + if len(v) == 0: + continue + _min = np.min(v[:, -1]) + _max = np.max(v[:, -1]) + max_score = max(_max, max_score) + min_score = min(_min, min_score) + + diff = max_score - min_score + for _, k in pred.items(): + for _, v in k.items(): + if len(v) == 0: + continue + v[:, -1] = (v[:, -1] - min_score)/diff + + +def image_eval(pred, gt, ignore, iou_thresh): + """ single image evaluation + pred: Nx5 + gt: Nx4 + ignore: + """ + + _pred = pred.copy() + _gt = gt.copy() + pred_recall = np.zeros(_pred.shape[0]) + recall_list = np.zeros(_gt.shape[0]) + proposal_list = np.ones(_pred.shape[0]) + + _pred[:, 2] = _pred[:, 2] + _pred[:, 0] + _pred[:, 3] = _pred[:, 3] + _pred[:, 1] + _gt[:, 2] = _gt[:, 2] + _gt[:, 0] + _gt[:, 3] = _gt[:, 3] + _gt[:, 1] + + overlaps = bbox_overlaps(_pred[:, :4], _gt) + + for h in range(_pred.shape[0]): + + gt_overlap = overlaps[h] + max_overlap, max_idx = gt_overlap.max(), gt_overlap.argmax() + if max_overlap >= iou_thresh: + if ignore[max_idx] == 0: + recall_list[max_idx] = -1 + proposal_list[h] = -1 + elif recall_list[max_idx] == 0: + recall_list[max_idx] = 1 + + r_keep_index = np.where(recall_list == 1)[0] + pred_recall[h] = len(r_keep_index) + return pred_recall, proposal_list + + +def img_pr_info(thresh_num, pred_info, proposal_list, pred_recall): + pr_info = np.zeros((thresh_num, 2)).astype('float') + for t in range(thresh_num): + + thresh = 1 - (t+1)/thresh_num + r_index = np.where(pred_info[:, 4] >= thresh)[0] + if len(r_index) == 0: + pr_info[t, 0] = 0 + pr_info[t, 1] = 0 + else: + r_index = r_index[-1] + p_index = np.where(proposal_list[:r_index+1] == 1)[0] + pr_info[t, 0] = len(p_index) + pr_info[t, 1] = pred_recall[r_index] + return pr_info + + +def dataset_pr_info(thresh_num, pr_curve, count_face): + _pr_curve = np.zeros((thresh_num, 2)) + for i in range(thresh_num): + _pr_curve[i, 0] = pr_curve[i, 1] / pr_curve[i, 0] + _pr_curve[i, 1] = pr_curve[i, 1] / count_face + return _pr_curve + + +def voc_ap(rec, prec): + + # correct AP calculation + # first append sentinel values at the end + mrec = np.concatenate(([0.], rec, [1.])) + mpre = np.concatenate(([0.], prec, [0.])) + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + + +def evaluation(pred, gt_path, iou_thresh=0.5): + pred = get_preds(pred) + norm_score(pred) + facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list = get_gt_boxes(gt_path) + event_num = len(event_list) + thresh_num = 1000 + settings = ['easy', 'medium', 'hard'] + setting_gts = [easy_gt_list, medium_gt_list, hard_gt_list] + aps = [] + for setting_id in range(3): + # different setting + gt_list = setting_gts[setting_id] + count_face = 0 + pr_curve = np.zeros((thresh_num, 2)).astype('float') + # [hard, medium, easy] + pbar = tqdm.tqdm(range(event_num)) + for i in pbar: + pbar.set_description('Processing {}'.format(settings[setting_id])) + event_name = str(event_list[i][0][0]) + img_list = file_list[i][0] + pred_list = pred[event_name] + sub_gt_list = gt_list[i][0] + # img_pr_info_list = np.zeros((len(img_list), thresh_num, 2)) + gt_bbx_list = facebox_list[i][0] + + for j in range(len(img_list)): + pred_info = pred_list[str(img_list[j][0][0])] + + gt_boxes = gt_bbx_list[j][0].astype('float') + keep_index = sub_gt_list[j][0] + count_face += len(keep_index) + + if len(gt_boxes) == 0 or len(pred_info) == 0: + continue + ignore = np.zeros(gt_boxes.shape[0]) + if len(keep_index) != 0: + ignore[keep_index-1] = 1 + pred_recall, proposal_list = image_eval(pred_info, gt_boxes, ignore, iou_thresh) + + _img_pr_info = img_pr_info(thresh_num, pred_info, proposal_list, pred_recall) + + pr_curve += _img_pr_info + pr_curve = dataset_pr_info(thresh_num, pr_curve, count_face) + + propose = pr_curve[:, 0] + recall = pr_curve[:, 1] + + ap = voc_ap(recall, propose) + aps.append(ap) + + print("==================== Results ====================") + print("Easy Val AP: {}".format(aps[0])) + print("Medium Val AP: {}".format(aps[1])) + print("Hard Val AP: {}".format(aps[2])) + print("=================================================") + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('-p', '--pred', default="./widerface_txt/") + parser.add_argument('-g', '--gt', default='./ground_truth/') + + args = parser.parse_args() + evaluation(args.pred, args.gt) + + + + + + + + + + + + diff --git a/widerface_evaluate/setup.py b/widerface_evaluate/setup.py new file mode 100644 index 0000000..74dba05 --- /dev/null +++ b/widerface_evaluate/setup.py @@ -0,0 +1,13 @@ +""" +WiderFace evaluation code +author: wondervictor +mail: tianhengcheng@gmail.com +copyright@wondervictor +""" + +from distutils.core import setup, Extension +from Cython.Build import cythonize +import numpy + +package = Extension('bbox', ['box_overlaps.pyx'], include_dirs=[numpy.get_include()]) +setup(ext_modules=cythonize([package]))