From 3bb06d18ab04edcd9179adfc9f29a27d8b9d42e8 Mon Sep 17 00:00:00 2001 From: "github-action[bot]" Date: Thu, 30 May 2024 20:33:31 +0200 Subject: [PATCH] Update On Thu May 30 20:33:30 CEST 2024 --- .github/update.log | 1 + clash-meta/adapter/inbound/listen.go | 14 +- clash-meta/adapter/inbound/listen_unix.go | 23 + clash-meta/adapter/inbound/listen_windows.go | 15 + clash-meta/component/dialer/tfo.go | 17 - clash-meta/component/dialer/tfo_unix.go | 25 + clash-meta/component/dialer/tfo_windows.go | 15 +- clash-nyanpasu/frontend/nyanpasu/package.json | 2 +- clash-nyanpasu/frontend/ui/package.json | 2 +- clash-nyanpasu/package.json | 2 +- clash-nyanpasu/pnpm-lock.yaml | 102 +- clash-verge-rev/package.json | 1 - clash-verge-rev/pnpm-lock.yaml | 11 - .../src/components/base/base-search-box.tsx | 14 +- clash-verge-rev/src/polyfills/RegExp.js | 76 +- clash-verge-rev/src/polyfills/WeakRef.js | 45 +- clash-verge-rev/src/polyfills/matchMedia.js | 36 + clash-verge-rev/vite.config.ts | 3 +- echo/.golangci.yml | 5 +- echo/.goreleaser.yml | 3 + echo/Makefile | 9 +- echo/go.mod | 32 +- echo/go.sum | 97 +- echo/internal/metrics/metrics.go | 154 - echo/internal/metrics/node.go | 32 + echo/internal/metrics/ping.go | 134 + echo/internal/transporter/ws.go | 6 +- echo/internal/transporter/ws_conn.go | 82 + echo/internal/transporter/ws_conn_test.go | 70 + echo/test/cmd/tcp_client/main.go | 23 +- echo/test/echo/echo.go | 3 +- echo/test/echo/ws.json | 16 + .../ramips/dts/mt7621_glinet_gl-mt1300.dts | 4 +- .../ramips/dts/mt7621_jdcloud_re-cp-02.dts | 160 + lede/target/linux/ramips/image/mt7621.mk | 9 + .../mt7621/base-files/etc/board.d/02_network | 1 + .../mt7621/base-files/etc/init.d/bootcount | 3 + mieru/pkg/cli/client.go | 233 +- mieru/pkg/cli/server.go | 2 +- mieru/pkg/stderror/template.go | 2 - mihomo/adapter/inbound/listen.go | 14 +- mihomo/adapter/inbound/listen_unix.go | 23 + mihomo/adapter/inbound/listen_windows.go | 15 + mihomo/component/dialer/tfo.go | 17 - mihomo/component/dialer/tfo_unix.go | 25 + mihomo/component/dialer/tfo_windows.go | 15 +- .../root/etc/init.d/unblockneteasemusic | 1 - .../luasrc/model/cbi/passwall/client/acl.lua | 1 - .../model/cbi/passwall/client/acl_config.lua | 1 - .../model/cbi/passwall/client/app_update.lua | 1 - .../model/cbi/passwall/client/global.lua | 1 - .../model/cbi/passwall/client/haproxy.lua | 1 - .../model/cbi/passwall/client/node_config.lua | 1 - .../model/cbi/passwall/client/node_list.lua | 1 - .../cbi/passwall/client/node_subscribe.lua | 1 - .../passwall/client/node_subscribe_config.lua | 1 - .../model/cbi/passwall/client/other.lua | 1 - .../luasrc/model/cbi/passwall/client/rule.lua | 2 - .../model/cbi/passwall/client/rule_list.lua | 1 - .../model/cbi/passwall/client/shunt_rules.lua | 1 - .../cbi/passwall/client/socks_config.lua | 1 - .../model/cbi/passwall/server/index.lua | 1 - .../luasrc/model/cbi/passwall/server/user.lua | 1 - .../luci-app-passwall/luasrc/passwall/api.lua | 16 - sing-box/docs/changelog.md | 8 + sing-box/docs/configuration/inbound/tun.md | 6 +- sing-box/docs/configuration/inbound/tun.zh.md | 2 +- sing-box/go.mod | 4 +- sing-box/go.sum | 4 +- sing-box/inbound/tun_auto_redirect.go | 186 +- .../luasrc/model/cbi/passwall/client/acl.lua | 1 - .../model/cbi/passwall/client/acl_config.lua | 1 - .../model/cbi/passwall/client/app_update.lua | 1 - .../model/cbi/passwall/client/global.lua | 1 - .../model/cbi/passwall/client/haproxy.lua | 1 - .../model/cbi/passwall/client/node_config.lua | 1 - .../model/cbi/passwall/client/node_list.lua | 1 - .../cbi/passwall/client/node_subscribe.lua | 1 - .../passwall/client/node_subscribe_config.lua | 1 - .../model/cbi/passwall/client/other.lua | 1 - .../luasrc/model/cbi/passwall/client/rule.lua | 2 - .../model/cbi/passwall/client/rule_list.lua | 1 - .../model/cbi/passwall/client/shunt_rules.lua | 1 - .../cbi/passwall/client/socks_config.lua | 1 - .../model/cbi/passwall/server/index.lua | 1 - .../luasrc/model/cbi/passwall/server/user.lua | 1 - .../luci-app-passwall/luasrc/passwall/api.lua | 16 - small/redsocks2/Makefile | 6 +- small/v2ray-geodata/Makefile | 4 +- .../v2ray/ang/service/SubscriptionUpdater.kt | 2 +- .../kotlin/com/v2ray/ang/ui/MainActivity.kt | 108 +- .../com/v2ray/ang/ui/SubSettingActivity.kt | 43 +- .../com/v2ray/ang/util/AngConfigManager.kt | 641 +--- .../com/v2ray/ang/util/fmt/ShadowsocksFmt.kt | 159 + .../kotlin/com/v2ray/ang/util/fmt/SocksFmt.kt | 69 + .../com/v2ray/ang/util/fmt/TrojanFmt.kt | 170 ++ .../kotlin/com/v2ray/ang/util/fmt/VlessFmt.kt | 170 ++ .../kotlin/com/v2ray/ang/util/fmt/VmessFmt.kt | 193 ++ .../com/v2ray/ang/util/fmt/WireguardFmt.kt | 71 + .../com/v2ray/ang/viewmodel/SubViewModel.kt | 93 + .../src/main/res/layout/layout_progress.xml | 16 + .../src/main/res/menu/action_sub_setting.xml | 11 +- .../app/src/main/res/values-ru/strings.xml | 4 +- yass/.github/workflows/compiler.yml | 2 +- yass/.gitmodules | 3 + yass/CMakeLists.txt | 114 +- yass/LICENSE.cmake.in | 4 + yass/src/cli/cli.cpp | 6 +- yass/src/config/config_version.cpp | 26 + yass/src/core/utils.cpp | 43 +- yass/src/core/utils.hpp | 4 +- yass/src/net/dns_addrinfo_helper.cpp | 15 +- yass/src/net/dns_addrinfo_helper.hpp | 2 +- yass/src/net/dns_addrinfo_helper_test.cpp | 51 + yass/src/net/doh_request.cpp | 2 +- yass/src/net/dot_request.cpp | 2 +- yass/src/server/server.cpp | 6 +- yass/src/ss_test.cpp | 4 +- yass/third_party/benchmark-winxp-fix.patch | 25 +- yass/third_party/benchmark/.clang-tidy | 1 - .../benchmark/.github/install_bazel.sh | 9 +- .../benchmark/.github/libcxx-setup.sh | 2 +- .../benchmark/.github/workflows/bazel.yml | 2 +- .../.github/workflows/build-and-test.yml | 59 +- .../.github/workflows/clang-format-lint.yml | 3 +- .../.github/workflows/pre-commit.yml | 38 + .../benchmark/.github/workflows/pylint.yml | 28 - .../.github/workflows/test_bindings.yml | 15 +- .../benchmark/.github/workflows/wheels.yml | 77 +- yass/third_party/benchmark/.gitignore | 1 + .../benchmark/.pre-commit-config.yaml | 18 + yass/third_party/benchmark/.ycm_extra_conf.py | 165 +- yass/third_party/benchmark/AUTHORS | 1 + yass/third_party/benchmark/BUILD.bazel | 53 +- yass/third_party/benchmark/CMakeLists.txt | 35 +- yass/third_party/benchmark/CONTRIBUTORS | 2 + yass/third_party/benchmark/MODULE.bazel | 35 +- yass/third_party/benchmark/WORKSPACE | 20 +- .../benchmark/bazel/benchmark_deps.bzl | 35 +- .../benchmark/bindings/python/BUILD | 3 - .../benchmark/bindings/python/build_defs.bzl | 25 - .../bindings/python/google_benchmark/BUILD | 19 +- .../python/google_benchmark/__init__.py | 57 +- .../python/google_benchmark/example.py | 5 +- .../python/google_benchmark/version.py | 7 + .../benchmark/bindings/python/nanobind.BUILD | 17 - .../bindings/python/python_headers.BUILD | 6 - .../benchmark/cmake/GetGitVersion.cmake | 30 +- .../benchmark/cmake/benchmark_main.pc.in | 7 + .../benchmark/docs/python_bindings.md | 8 +- .../benchmark/docs/reducing_variance.md | 6 +- yass/third_party/benchmark/docs/releasing.md | 18 +- yass/third_party/benchmark/docs/user_guide.md | 30 +- .../benchmark/include/benchmark/benchmark.h | 73 +- yass/third_party/benchmark/pyproject.toml | 40 +- yass/third_party/benchmark/setup.py | 157 +- yass/third_party/benchmark/src/CMakeLists.txt | 11 +- yass/third_party/benchmark/src/benchmark.cc | 37 +- .../benchmark/src/benchmark_register.cc | 5 +- .../benchmark/src/benchmark_register.h | 4 +- .../benchmark/src/benchmark_runner.cc | 11 +- yass/third_party/benchmark/src/colorprint.cc | 6 +- yass/third_party/benchmark/src/complexity.cc | 43 +- .../benchmark/src/console_reporter.cc | 14 +- yass/third_party/benchmark/src/counter.cc | 4 +- .../third_party/benchmark/src/csv_reporter.cc | 14 +- yass/third_party/benchmark/src/cycleclock.h | 36 +- .../benchmark/src/internal_macros.h | 6 +- .../benchmark/src/json_reporter.cc | 7 + .../benchmark/src/perf_counters.cc | 5 +- yass/third_party/benchmark/src/statistics.cc | 15 +- yass/third_party/benchmark/src/string_util.cc | 2 +- yass/third_party/benchmark/src/sysinfo.cc | 60 +- yass/third_party/benchmark/src/timers.cc | 10 +- yass/third_party/benchmark/test/BUILD | 5 + .../third_party/benchmark/test/CMakeLists.txt | 89 +- yass/third_party/benchmark/test/basic_test.cc | 2 +- .../benchmark/test/benchmark_gtest.cc | 2 +- .../benchmark/test/benchmark_test.cc | 26 + .../benchmark/test/complexity_test.cc | 160 +- .../benchmark/test/diagnostics_test.cc | 4 +- .../benchmark/test/link_main_test.cc | 2 +- .../benchmark/test/memory_manager_test.cc | 2 +- .../benchmark/test/output_test_helper.cc | 1 + .../benchmark/test/perf_counters_gtest.cc | 2 +- .../benchmark/test/perf_counters_test.cc | 2 +- .../benchmark/test/reporter_output_test.cc | 11 +- .../benchmark/test/skip_with_error_test.cc | 2 +- .../benchmark/test/statistics_gtest.cc | 4 +- .../test/user_counters_tabular_test.cc | 9 +- .../benchmark/test/user_counters_test.cc | 14 +- yass/third_party/benchmark/tools/BUILD.bazel | 5 +- yass/third_party/benchmark/tools/compare.py | 447 +-- .../tools/gbench/Inputs/test5_run0.json | 18 + .../tools/gbench/Inputs/test5_run1.json | 18 + .../benchmark/tools/gbench/__init__.py | 8 +- .../benchmark/tools/gbench/report.py | 1720 +++++++---- .../benchmark/tools/gbench/util.py | 114 +- yass/third_party/benchmark/tools/strip_asm.py | 118 +- yass/third_party/libc++/CMakeLists.txt | 32 +- yass/third_party/libc++/freebsd/libgcc.a | 1 + yass/third_party/mimalloc/.gitattributes | 12 + yass/third_party/mimalloc/.gitignore | 11 + yass/third_party/mimalloc/CMakeLists.txt | 609 ++++ yass/third_party/mimalloc/LICENSE | 21 + yass/third_party/mimalloc/SECURITY.md | 45 + yass/third_party/mimalloc/azure-pipelines.yml | 197 ++ .../mimalloc/bin/mimalloc-redirect.dll | Bin 0 -> 68096 bytes .../mimalloc/bin/mimalloc-redirect.lib | Bin 0 -> 2874 bytes .../mimalloc/bin/mimalloc-redirect32.dll | Bin 0 -> 41984 bytes .../mimalloc/bin/mimalloc-redirect32.lib | Bin 0 -> 2928 bytes yass/third_party/mimalloc/bin/readme.md | 71 + .../mimalloc/cmake/JoinPaths.cmake | 23 + .../cmake/mimalloc-config-version.cmake | 19 + .../mimalloc/cmake/mimalloc-config.cmake | 14 + .../bench-c5-18xlarge-2020-01-20-a.svg | 887 ++++++ .../bench-c5-18xlarge-2020-01-20-b.svg | 1185 ++++++++ .../bench-c5-18xlarge-2020-01-20-rss-a.svg | 757 +++++ .../bench-c5-18xlarge-2020-01-20-rss-b.svg | 1028 +++++++ .../mimalloc/doc/bench-2020/bench-r5a-1.svg | 769 +++++ .../bench-r5a-12xlarge-2020-01-16-a.svg | 868 ++++++ .../bench-r5a-12xlarge-2020-01-16-b.svg | 1157 +++++++ .../mimalloc/doc/bench-2020/bench-r5a-2.svg | 983 ++++++ .../doc/bench-2020/bench-r5a-rss-1.svg | 683 +++++ .../doc/bench-2020/bench-r5a-rss-2.svg | 854 ++++++ .../doc/bench-2020/bench-spec-rss.svg | 713 +++++ .../mimalloc/doc/bench-2020/bench-spec.svg | 713 +++++ .../mimalloc/doc/bench-2020/bench-z4-1.svg | 890 ++++++ .../mimalloc/doc/bench-2020/bench-z4-2.svg | 1146 +++++++ .../doc/bench-2020/bench-z4-rss-1.svg | 796 +++++ .../doc/bench-2020/bench-z4-rss-2.svg | 974 ++++++ .../bench-amd5950x-2021-01-30-a.svg | 952 ++++++ .../bench-amd5950x-2021-01-30-b.svg | 1255 ++++++++ .../bench-c5-18xlarge-2021-01-30-a.svg | 955 ++++++ .../bench-c5-18xlarge-2021-01-30-b.svg | 1269 ++++++++ .../bench-c5-18xlarge-2021-01-30-rss-a.svg | 836 ++++++ .../bench-c5-18xlarge-2021-01-30-rss-b.svg | 1131 +++++++ .../bench-2021/bench-macmini-2021-01-30.svg | 766 +++++ yass/third_party/mimalloc/doc/doxyfile | 2659 +++++++++++++++++ yass/third_party/mimalloc/doc/ds-logo.jpg | Bin 0 -> 181497 bytes yass/third_party/mimalloc/doc/ds-logo.png | Bin 0 -> 121150 bytes yass/third_party/mimalloc/doc/mimalloc-doc.h | 1281 ++++++++ .../mimalloc/doc/mimalloc-doxygen.css | 49 + .../mimalloc/doc/mimalloc-logo-100.png | Bin 0 -> 3532 bytes .../mimalloc/doc/mimalloc-logo.png | Bin 0 -> 73097 bytes .../mimalloc/doc/mimalloc-logo.svg | 161 + yass/third_party/mimalloc/doc/spades-logo.png | Bin 0 -> 34583 bytes yass/third_party/mimalloc/doc/unreal-logo.svg | 43 + .../mimalloc/docker/alpine-arm32v7/Dockerfile | 28 + .../mimalloc/docker/alpine/Dockerfile | 23 + .../mimalloc/docker/manylinux-x64/Dockerfile | 23 + yass/third_party/mimalloc/docker/readme.md | 10 + .../ide/vs2017/mimalloc-override-test.vcxproj | 190 ++ .../ide/vs2017/mimalloc-override.vcxproj | 260 ++ .../ide/vs2017/mimalloc-test-stress.vcxproj | 159 + .../mimalloc/ide/vs2017/mimalloc-test.vcxproj | 158 + .../mimalloc/ide/vs2017/mimalloc.sln | 71 + .../mimalloc/ide/vs2017/mimalloc.vcxproj | 260 ++ .../ide/vs2019/mimalloc-override-test.vcxproj | 190 ++ .../ide/vs2019/mimalloc-override.vcxproj | 260 ++ .../ide/vs2019/mimalloc-test-api.vcxproj | 155 + .../ide/vs2019/mimalloc-test-stress.vcxproj | 159 + .../mimalloc/ide/vs2019/mimalloc-test.vcxproj | 158 + .../mimalloc/ide/vs2019/mimalloc.sln | 81 + .../mimalloc/ide/vs2019/mimalloc.vcxproj | 258 ++ .../ide/vs2022/mimalloc-override-test.vcxproj | 190 ++ .../ide/vs2022/mimalloc-override.vcxproj | 271 ++ .../ide/vs2022/mimalloc-test-api.vcxproj | 162 + .../ide/vs2022/mimalloc-test-stress.vcxproj | 159 + .../mimalloc/ide/vs2022/mimalloc-test.vcxproj | 158 + .../mimalloc/ide/vs2022/mimalloc.sln | 81 + .../mimalloc/ide/vs2022/mimalloc.vcxproj | 264 ++ .../mimalloc/include/mimalloc-new-delete.h | 66 + .../mimalloc/include/mimalloc-override.h | 68 + yass/third_party/mimalloc/include/mimalloc.h | 569 ++++ .../mimalloc/include/mimalloc/atomic.h | 393 +++ .../mimalloc/include/mimalloc/internal.h | 1018 +++++++ .../mimalloc/include/mimalloc/prim.h | 373 +++ .../mimalloc/include/mimalloc/track.h | 149 + .../mimalloc/include/mimalloc/types.h | 705 +++++ yass/third_party/mimalloc/mimalloc.pc.in | 11 + yass/third_party/mimalloc/readme.md | 861 ++++++ yass/third_party/mimalloc/src/alloc-aligned.c | 312 ++ .../third_party/mimalloc/src/alloc-override.c | 314 ++ yass/third_party/mimalloc/src/alloc-posix.c | 185 ++ yass/third_party/mimalloc/src/alloc.c | 598 ++++ yass/third_party/mimalloc/src/arena.c | 1108 +++++++ yass/third_party/mimalloc/src/bitmap.c | 436 +++ yass/third_party/mimalloc/src/bitmap.h | 115 + yass/third_party/mimalloc/src/free.c | 530 ++++ yass/third_party/mimalloc/src/heap.c | 653 ++++ yass/third_party/mimalloc/src/init.c | 714 +++++ yass/third_party/mimalloc/src/libc.c | 273 ++ yass/third_party/mimalloc/src/options.c | 526 ++++ yass/third_party/mimalloc/src/os.c | 678 +++++ yass/third_party/mimalloc/src/page-queue.c | 343 +++ yass/third_party/mimalloc/src/page.c | 943 ++++++ .../mimalloc/src/prim/emscripten/prim.c | 244 ++ .../src/prim/osx/alloc-override-zone.c | 461 +++ yass/third_party/mimalloc/src/prim/osx/prim.c | 9 + yass/third_party/mimalloc/src/prim/prim.c | 27 + yass/third_party/mimalloc/src/prim/readme.md | 9 + .../third_party/mimalloc/src/prim/unix/prim.c | 879 ++++++ .../third_party/mimalloc/src/prim/wasi/prim.c | 280 ++ .../src/prim/windows/etw-mimalloc.wprp | 61 + .../mimalloc/src/prim/windows/etw.h | 905 ++++++ .../mimalloc/src/prim/windows/etw.man | Bin 0 -> 3926 bytes .../mimalloc/src/prim/windows/prim.c | 663 ++++ .../mimalloc/src/prim/windows/readme.md | 17 + yass/third_party/mimalloc/src/random.c | 254 ++ yass/third_party/mimalloc/src/segment-map.c | 155 + yass/third_party/mimalloc/src/segment.c | 1524 ++++++++++ yass/third_party/mimalloc/src/static.c | 41 + yass/third_party/mimalloc/src/stats.c | 467 +++ yass/third_party/mimalloc/test/CMakeLists.txt | 54 + .../mimalloc/test/main-override-static.c | 415 +++ .../third_party/mimalloc/test/main-override.c | 36 + .../mimalloc/test/main-override.cpp | 400 +++ yass/third_party/mimalloc/test/main.c | 46 + yass/third_party/mimalloc/test/readme.md | 16 + .../third_party/mimalloc/test/test-api-fill.c | 343 +++ yass/third_party/mimalloc/test/test-api.c | 451 +++ yass/third_party/mimalloc/test/test-stress.c | 364 +++ yass/third_party/mimalloc/test/test-wrong.c | 92 + yass/third_party/mimalloc/test/testhelper.h | 49 + yass/tools/build.go | 36 +- youtube-dl/test/test_InfoExtractor.py | 3 + youtube-dl/test/test_traversal.py | 509 ++++ youtube-dl/test/test_utils.py | 362 --- youtube-dl/youtube_dl/compat.py | 219 +- youtube-dl/youtube_dl/extractor/common.py | 63 +- youtube-dl/youtube_dl/traversal.py | 10 + youtube-dl/youtube_dl/utils.py | 102 +- yt-dlp/yt_dlp/extractor/orf.py | 139 +- 334 files changed, 56759 insertions(+), 3551 deletions(-) create mode 100644 clash-meta/adapter/inbound/listen_unix.go create mode 100644 clash-meta/adapter/inbound/listen_windows.go create mode 100644 clash-meta/component/dialer/tfo_unix.go create mode 100644 clash-verge-rev/src/polyfills/matchMedia.js create mode 100644 echo/internal/metrics/node.go create mode 100644 echo/internal/metrics/ping.go create mode 100644 echo/internal/transporter/ws_conn.go create mode 100644 echo/internal/transporter/ws_conn_test.go create mode 100644 echo/test/echo/ws.json create mode 100644 lede/target/linux/ramips/dts/mt7621_jdcloud_re-cp-02.dts mode change 100755 => 100644 lede/target/linux/ramips/mt7621/base-files/etc/board.d/02_network create mode 100644 mihomo/adapter/inbound/listen_unix.go create mode 100644 mihomo/adapter/inbound/listen_windows.go create mode 100644 mihomo/component/dialer/tfo_unix.go create mode 100644 v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/ShadowsocksFmt.kt create mode 100644 v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/SocksFmt.kt create mode 100644 v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/TrojanFmt.kt create mode 100644 v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/VlessFmt.kt create mode 100644 v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/VmessFmt.kt create mode 100644 v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/WireguardFmt.kt create mode 100644 v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/viewmodel/SubViewModel.kt create mode 100644 v2rayng/V2rayNG/app/src/main/res/layout/layout_progress.xml create mode 100644 yass/src/net/dns_addrinfo_helper_test.cpp create mode 100644 yass/third_party/benchmark/.github/workflows/pre-commit.yml delete mode 100644 yass/third_party/benchmark/.github/workflows/pylint.yml create mode 100644 yass/third_party/benchmark/.pre-commit-config.yaml delete mode 100644 yass/third_party/benchmark/bindings/python/BUILD delete mode 100644 yass/third_party/benchmark/bindings/python/build_defs.bzl create mode 100644 yass/third_party/benchmark/bindings/python/google_benchmark/version.py delete mode 100644 yass/third_party/benchmark/bindings/python/nanobind.BUILD delete mode 100644 yass/third_party/benchmark/bindings/python/python_headers.BUILD create mode 100644 yass/third_party/benchmark/cmake/benchmark_main.pc.in create mode 100644 yass/third_party/benchmark/tools/gbench/Inputs/test5_run0.json create mode 100644 yass/third_party/benchmark/tools/gbench/Inputs/test5_run1.json create mode 100644 yass/third_party/libc++/freebsd/libgcc.a create mode 100644 yass/third_party/mimalloc/.gitattributes create mode 100644 yass/third_party/mimalloc/.gitignore create mode 100644 yass/third_party/mimalloc/CMakeLists.txt create mode 100644 yass/third_party/mimalloc/LICENSE create mode 100644 yass/third_party/mimalloc/SECURITY.md create mode 100644 yass/third_party/mimalloc/azure-pipelines.yml create mode 100644 yass/third_party/mimalloc/bin/mimalloc-redirect.dll create mode 100644 yass/third_party/mimalloc/bin/mimalloc-redirect.lib create mode 100644 yass/third_party/mimalloc/bin/mimalloc-redirect32.dll create mode 100644 yass/third_party/mimalloc/bin/mimalloc-redirect32.lib create mode 100644 yass/third_party/mimalloc/bin/readme.md create mode 100644 yass/third_party/mimalloc/cmake/JoinPaths.cmake create mode 100644 yass/third_party/mimalloc/cmake/mimalloc-config-version.cmake create mode 100644 yass/third_party/mimalloc/cmake/mimalloc-config.cmake create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-r5a-1.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-r5a-2.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-r5a-rss-1.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-r5a-rss-2.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-spec-rss.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-spec.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-z4-1.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-z4-2.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-z4-rss-1.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2020/bench-z4-rss-2.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg create mode 100644 yass/third_party/mimalloc/doc/bench-2021/bench-macmini-2021-01-30.svg create mode 100644 yass/third_party/mimalloc/doc/doxyfile create mode 100644 yass/third_party/mimalloc/doc/ds-logo.jpg create mode 100644 yass/third_party/mimalloc/doc/ds-logo.png create mode 100644 yass/third_party/mimalloc/doc/mimalloc-doc.h create mode 100644 yass/third_party/mimalloc/doc/mimalloc-doxygen.css create mode 100644 yass/third_party/mimalloc/doc/mimalloc-logo-100.png create mode 100644 yass/third_party/mimalloc/doc/mimalloc-logo.png create mode 100644 yass/third_party/mimalloc/doc/mimalloc-logo.svg create mode 100644 yass/third_party/mimalloc/doc/spades-logo.png create mode 100644 yass/third_party/mimalloc/doc/unreal-logo.svg create mode 100644 yass/third_party/mimalloc/docker/alpine-arm32v7/Dockerfile create mode 100644 yass/third_party/mimalloc/docker/alpine/Dockerfile create mode 100644 yass/third_party/mimalloc/docker/manylinux-x64/Dockerfile create mode 100644 yass/third_party/mimalloc/docker/readme.md create mode 100644 yass/third_party/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2017/mimalloc-override.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2017/mimalloc-test.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2017/mimalloc.sln create mode 100644 yass/third_party/mimalloc/ide/vs2017/mimalloc.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2019/mimalloc-override-test.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2019/mimalloc-override.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2019/mimalloc-test-api.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2019/mimalloc-test-stress.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2019/mimalloc-test.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2019/mimalloc.sln create mode 100644 yass/third_party/mimalloc/ide/vs2019/mimalloc.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2022/mimalloc-override.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2022/mimalloc-test.vcxproj create mode 100644 yass/third_party/mimalloc/ide/vs2022/mimalloc.sln create mode 100644 yass/third_party/mimalloc/ide/vs2022/mimalloc.vcxproj create mode 100644 yass/third_party/mimalloc/include/mimalloc-new-delete.h create mode 100644 yass/third_party/mimalloc/include/mimalloc-override.h create mode 100644 yass/third_party/mimalloc/include/mimalloc.h create mode 100644 yass/third_party/mimalloc/include/mimalloc/atomic.h create mode 100644 yass/third_party/mimalloc/include/mimalloc/internal.h create mode 100644 yass/third_party/mimalloc/include/mimalloc/prim.h create mode 100644 yass/third_party/mimalloc/include/mimalloc/track.h create mode 100644 yass/third_party/mimalloc/include/mimalloc/types.h create mode 100644 yass/third_party/mimalloc/mimalloc.pc.in create mode 100644 yass/third_party/mimalloc/readme.md create mode 100644 yass/third_party/mimalloc/src/alloc-aligned.c create mode 100644 yass/third_party/mimalloc/src/alloc-override.c create mode 100644 yass/third_party/mimalloc/src/alloc-posix.c create mode 100644 yass/third_party/mimalloc/src/alloc.c create mode 100644 yass/third_party/mimalloc/src/arena.c create mode 100644 yass/third_party/mimalloc/src/bitmap.c create mode 100644 yass/third_party/mimalloc/src/bitmap.h create mode 100644 yass/third_party/mimalloc/src/free.c create mode 100644 yass/third_party/mimalloc/src/heap.c create mode 100644 yass/third_party/mimalloc/src/init.c create mode 100644 yass/third_party/mimalloc/src/libc.c create mode 100644 yass/third_party/mimalloc/src/options.c create mode 100644 yass/third_party/mimalloc/src/os.c create mode 100644 yass/third_party/mimalloc/src/page-queue.c create mode 100644 yass/third_party/mimalloc/src/page.c create mode 100644 yass/third_party/mimalloc/src/prim/emscripten/prim.c create mode 100644 yass/third_party/mimalloc/src/prim/osx/alloc-override-zone.c create mode 100644 yass/third_party/mimalloc/src/prim/osx/prim.c create mode 100644 yass/third_party/mimalloc/src/prim/prim.c create mode 100644 yass/third_party/mimalloc/src/prim/readme.md create mode 100644 yass/third_party/mimalloc/src/prim/unix/prim.c create mode 100644 yass/third_party/mimalloc/src/prim/wasi/prim.c create mode 100644 yass/third_party/mimalloc/src/prim/windows/etw-mimalloc.wprp create mode 100644 yass/third_party/mimalloc/src/prim/windows/etw.h create mode 100644 yass/third_party/mimalloc/src/prim/windows/etw.man create mode 100644 yass/third_party/mimalloc/src/prim/windows/prim.c create mode 100644 yass/third_party/mimalloc/src/prim/windows/readme.md create mode 100644 yass/third_party/mimalloc/src/random.c create mode 100644 yass/third_party/mimalloc/src/segment-map.c create mode 100644 yass/third_party/mimalloc/src/segment.c create mode 100644 yass/third_party/mimalloc/src/static.c create mode 100644 yass/third_party/mimalloc/src/stats.c create mode 100644 yass/third_party/mimalloc/test/CMakeLists.txt create mode 100644 yass/third_party/mimalloc/test/main-override-static.c create mode 100644 yass/third_party/mimalloc/test/main-override.c create mode 100644 yass/third_party/mimalloc/test/main-override.cpp create mode 100644 yass/third_party/mimalloc/test/main.c create mode 100644 yass/third_party/mimalloc/test/readme.md create mode 100644 yass/third_party/mimalloc/test/test-api-fill.c create mode 100644 yass/third_party/mimalloc/test/test-api.c create mode 100644 yass/third_party/mimalloc/test/test-stress.c create mode 100644 yass/third_party/mimalloc/test/test-wrong.c create mode 100644 yass/third_party/mimalloc/test/testhelper.h create mode 100644 youtube-dl/test/test_traversal.py create mode 100644 youtube-dl/youtube_dl/traversal.py diff --git a/.github/update.log b/.github/update.log index 3aa919207a..5aa8b5ca3e 100644 --- a/.github/update.log +++ b/.github/update.log @@ -662,3 +662,4 @@ Update On Sun May 26 20:29:29 CEST 2024 Update On Mon May 27 20:31:35 CEST 2024 Update On Tue May 28 20:30:03 CEST 2024 Update On Wed May 29 20:30:48 CEST 2024 +Update On Thu May 30 20:33:19 CEST 2024 diff --git a/clash-meta/adapter/inbound/listen.go b/clash-meta/adapter/inbound/listen.go index 18dc1bc242..edbccea70a 100644 --- a/clash-meta/adapter/inbound/listen.go +++ b/clash-meta/adapter/inbound/listen.go @@ -3,22 +3,10 @@ package inbound import ( "context" "net" - - "github.com/metacubex/tfo-go" ) -var ( - lc = tfo.ListenConfig{ - DisableTFO: true, - } -) - -func SetTfo(open bool) { - lc.DisableTFO = !open -} - func SetMPTCP(open bool) { - setMultiPathTCP(&lc.ListenConfig, open) + setMultiPathTCP(getListenConfig(), open) } func ListenContext(ctx context.Context, network, address string) (net.Listener, error) { diff --git a/clash-meta/adapter/inbound/listen_unix.go b/clash-meta/adapter/inbound/listen_unix.go new file mode 100644 index 0000000000..bb78adb222 --- /dev/null +++ b/clash-meta/adapter/inbound/listen_unix.go @@ -0,0 +1,23 @@ +//go:build unix + +package inbound + +import ( + "net" + + "github.com/metacubex/tfo-go" +) + +var ( + lc = tfo.ListenConfig{ + DisableTFO: true, + } +) + +func SetTfo(open bool) { + lc.DisableTFO = !open +} + +func getListenConfig() *net.ListenConfig { + return &lc.ListenConfig +} diff --git a/clash-meta/adapter/inbound/listen_windows.go b/clash-meta/adapter/inbound/listen_windows.go new file mode 100644 index 0000000000..a4223e2b58 --- /dev/null +++ b/clash-meta/adapter/inbound/listen_windows.go @@ -0,0 +1,15 @@ +package inbound + +import ( + "net" +) + +var ( + lc = net.ListenConfig{} +) + +func SetTfo(open bool) {} + +func getListenConfig() *net.ListenConfig { + return &lc +} diff --git a/clash-meta/component/dialer/tfo.go b/clash-meta/component/dialer/tfo.go index 76fe94d021..bc32b38a74 100644 --- a/clash-meta/component/dialer/tfo.go +++ b/clash-meta/component/dialer/tfo.go @@ -5,12 +5,8 @@ import ( "io" "net" "time" - - "github.com/metacubex/tfo-go" ) -var DisableTFO = false - type tfoConn struct { net.Conn closed bool @@ -124,16 +120,3 @@ func (c *tfoConn) ReaderReplaceable() bool { func (c *tfoConn) WriterReplaceable() bool { return c.Conn != nil } - -func dialTFO(ctx context.Context, netDialer net.Dialer, network, address string) (net.Conn, error) { - ctx, cancel := context.WithTimeout(context.Background(), DefaultTCPTimeout) - dialer := tfo.Dialer{Dialer: netDialer, DisableTFO: false} - return &tfoConn{ - dialed: make(chan bool, 1), - cancel: cancel, - ctx: ctx, - dialFn: func(ctx context.Context, earlyData []byte) (net.Conn, error) { - return dialer.DialContext(ctx, network, address, earlyData) - }, - }, nil -} diff --git a/clash-meta/component/dialer/tfo_unix.go b/clash-meta/component/dialer/tfo_unix.go new file mode 100644 index 0000000000..b8908849e8 --- /dev/null +++ b/clash-meta/component/dialer/tfo_unix.go @@ -0,0 +1,25 @@ +//go:build unix + +package dialer + +import ( + "context" + "net" + + "github.com/metacubex/tfo-go" +) + +const DisableTFO = false + +func dialTFO(ctx context.Context, netDialer net.Dialer, network, address string) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), DefaultTCPTimeout) + dialer := tfo.Dialer{Dialer: netDialer, DisableTFO: false} + return &tfoConn{ + dialed: make(chan bool, 1), + cancel: cancel, + ctx: ctx, + dialFn: func(ctx context.Context, earlyData []byte) (net.Conn, error) { + return dialer.DialContext(ctx, network, address, earlyData) + }, + }, nil +} diff --git a/clash-meta/component/dialer/tfo_windows.go b/clash-meta/component/dialer/tfo_windows.go index 632661186c..f1dddcf44e 100644 --- a/clash-meta/component/dialer/tfo_windows.go +++ b/clash-meta/component/dialer/tfo_windows.go @@ -1,11 +1,12 @@ package dialer -import "github.com/metacubex/mihomo/constant/features" +import ( + "context" + "net" +) -func init() { - // According to MSDN, this option is available since Windows 10, 1607 - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms738596(v=vs.85).aspx - if features.WindowsMajorVersion < 10 || (features.WindowsMajorVersion == 10 && features.WindowsBuildNumber < 14393) { - DisableTFO = true - } +const DisableTFO = true + +func dialTFO(ctx context.Context, netDialer net.Dialer, network, address string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, address) } diff --git a/clash-nyanpasu/frontend/nyanpasu/package.json b/clash-nyanpasu/frontend/nyanpasu/package.json index 90eb42f60d..2b0c0c7089 100644 --- a/clash-nyanpasu/frontend/nyanpasu/package.json +++ b/clash-nyanpasu/frontend/nyanpasu/package.json @@ -56,7 +56,7 @@ "@typescript-eslint/eslint-plugin": "7.11.0", "@typescript-eslint/parser": "7.11.0", "@vitejs/plugin-react": "4.3.0", - "sass": "1.77.2", + "sass": "1.77.3", "shiki": "1.6.1", "vite": "5.2.12", "vite-plugin-monaco-editor": "1.1.3", diff --git a/clash-nyanpasu/frontend/ui/package.json b/clash-nyanpasu/frontend/ui/package.json index 94dd9911d4..6c4ce29ba3 100644 --- a/clash-nyanpasu/frontend/ui/package.json +++ b/clash-nyanpasu/frontend/ui/package.json @@ -16,7 +16,7 @@ "react-i18next": "14.1.2" }, "devDependencies": { - "sass": "1.77.2", + "sass": "1.77.3", "typescript-plugin-css-modules": "5.1.0" } } diff --git a/clash-nyanpasu/package.json b/clash-nyanpasu/package.json index 5bb959813a..48653ba790 100644 --- a/clash-nyanpasu/package.json +++ b/clash-nyanpasu/package.json @@ -74,7 +74,7 @@ "@tauri-apps/cli": "1.5.14", "@types/fs-extra": "11.0.4", "@types/lodash-es": "4.17.12", - "@types/node": "20.12.12", + "@types/node": "20.12.13", "autoprefixer": "10.4.19", "conventional-changelog-conventionalcommits": "8.0.0", "cross-env": "7.0.3", diff --git a/clash-nyanpasu/pnpm-lock.yaml b/clash-nyanpasu/pnpm-lock.yaml index 29b252cdc8..131686839d 100644 --- a/clash-nyanpasu/pnpm-lock.yaml +++ b/clash-nyanpasu/pnpm-lock.yaml @@ -24,7 +24,7 @@ importers: devDependencies: '@commitlint/cli': specifier: 19.3.0 - version: 19.3.0(@types/node@20.12.12)(typescript@5.4.5) + version: 19.3.0(@types/node@20.12.13)(typescript@5.4.5) '@commitlint/config-conventional': specifier: 19.2.2 version: 19.2.2 @@ -38,8 +38,8 @@ importers: specifier: 4.17.12 version: 4.17.12 '@types/node': - specifier: 20.12.12 - version: 20.12.12 + specifier: 20.12.13 + version: 20.12.13 autoprefixer: specifier: 10.4.19 version: 10.4.19(postcss@8.4.38) @@ -175,7 +175,7 @@ importers: version: 11.11.5(@emotion/react@11.11.4(react@19.0.0-rc-935180c7e0-20240524)(types-react@19.0.0-rc.0))(react@19.0.0-rc-935180c7e0-20240524)(types-react@19.0.0-rc.0) '@generouted/react-router': specifier: 1.19.5 - version: 1.19.5(react-router-dom@6.23.1(react-dom@19.0.0-rc-935180c7e0-20240524(react@19.0.0-rc-935180c7e0-20240524))(react@19.0.0-rc-935180c7e0-20240524))(react@19.0.0-rc-935180c7e0-20240524)(vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0)) + version: 1.19.5(react-router-dom@6.23.1(react-dom@19.0.0-rc-935180c7e0-20240524(react@19.0.0-rc-935180c7e0-20240524))(react@19.0.0-rc-935180c7e0-20240524))(react@19.0.0-rc-935180c7e0-20240524)(vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0)) '@juggle/resize-observer': specifier: 3.4.0 version: 3.4.0 @@ -296,28 +296,28 @@ importers: version: 7.11.0(eslint@8.57.0)(typescript@5.4.5) '@vitejs/plugin-react': specifier: 4.3.0 - version: 4.3.0(vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0)) + version: 4.3.0(vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0)) sass: - specifier: 1.77.2 - version: 1.77.2 + specifier: 1.77.3 + version: 1.77.3 shiki: specifier: 1.6.1 version: 1.6.1 vite: specifier: 5.2.12 - version: 5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0) + version: 5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0) vite-plugin-monaco-editor: specifier: npm:vite-plugin-monaco-editor-new@1.1.3 version: vite-plugin-monaco-editor-new@1.1.3(monaco-editor@0.49.0) vite-plugin-sass-dts: specifier: 1.3.22 - version: 1.3.22(postcss@8.4.38)(prettier@3.2.5)(sass@1.77.2)(vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0)) + version: 1.3.22(postcss@8.4.38)(prettier@3.2.5)(sass@1.77.3)(vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0)) vite-plugin-svgr: specifier: 4.2.0 - version: 4.2.0(rollup@4.17.2)(typescript@5.4.5)(vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0)) + version: 4.2.0(rollup@4.17.2)(typescript@5.4.5)(vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0)) vite-tsconfig-paths: specifier: 4.3.2 - version: 4.3.2(typescript@5.4.5)(vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0)) + version: 4.3.2(typescript@5.4.5)(vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0)) frontend/ui: dependencies: @@ -353,8 +353,8 @@ importers: version: 14.1.2(i18next@23.11.5)(react-dom@19.0.0-rc-935180c7e0-20240524(react@19.0.0-rc-935180c7e0-20240524))(react@19.0.0-rc-935180c7e0-20240524) devDependencies: sass: - specifier: 1.77.2 - version: 1.77.2 + specifier: 1.77.3 + version: 1.77.3 typescript-plugin-css-modules: specifier: 5.1.0 version: 5.1.0(typescript@5.4.5) @@ -1581,8 +1581,8 @@ packages: '@types/node@20.12.10': resolution: {integrity: sha512-Eem5pH9pmWBHoGAT8Dr5fdc5rYA+4NAovdM4EktRPVAAiJhmWWfQrA0cFhAbOsQdSfIHjAud6YdkbL69+zSKjw==} - '@types/node@20.12.12': - resolution: {integrity: sha512-eWLDGF/FOSPtAvEqeRAQ4C8LSA7M1I7i0ky1I8U7kD1J5ITyW3AsRhQrKVoWf5pFKZ2kILsEGJhsI9r93PYnOw==} + '@types/node@20.12.13': + resolution: {integrity: sha512-gBGeanV41c1L171rR7wjbMiEpEI/l5XFQdLLfhr/REwpgDy/4U8y89+i8kRiLzDyZdOkXh+cRaTetUnCYutoXA==} '@types/parse-json@4.0.2': resolution: {integrity: sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==} @@ -4283,8 +4283,8 @@ packages: safer-buffer@2.1.2: resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} - sass@1.77.2: - resolution: {integrity: sha512-eb4GZt1C3avsX3heBNlrc7I09nyT00IUuo4eFhAbeXWU2fvA7oXI53SxODVAA+zgZCk9aunAZgO+losjR3fAwA==} + sass@1.77.3: + resolution: {integrity: sha512-WJHo+jmFp0dwRuymPmIovuxHaBntcCyja5hCB0yYY9wWrViEp4kF5Cdai98P72v6FzroPuABqu+ddLMbQWmwzA==} engines: {node: '>=14.0.0'} hasBin: true @@ -5199,11 +5199,11 @@ snapshots: '@babel/helper-validator-identifier': 7.24.5 to-fast-properties: 2.0.0 - '@commitlint/cli@19.3.0(@types/node@20.12.12)(typescript@5.4.5)': + '@commitlint/cli@19.3.0(@types/node@20.12.13)(typescript@5.4.5)': dependencies: '@commitlint/format': 19.3.0 '@commitlint/lint': 19.2.2 - '@commitlint/load': 19.2.0(@types/node@20.12.12)(typescript@5.4.5) + '@commitlint/load': 19.2.0(@types/node@20.12.13)(typescript@5.4.5) '@commitlint/read': 19.2.1 '@commitlint/types': 19.0.3 execa: 8.0.1 @@ -5250,7 +5250,7 @@ snapshots: '@commitlint/rules': 19.0.3 '@commitlint/types': 19.0.3 - '@commitlint/load@19.2.0(@types/node@20.12.12)(typescript@5.4.5)': + '@commitlint/load@19.2.0(@types/node@20.12.13)(typescript@5.4.5)': dependencies: '@commitlint/config-validator': 19.0.3 '@commitlint/execute-rule': 19.0.0 @@ -5258,7 +5258,7 @@ snapshots: '@commitlint/types': 19.0.3 chalk: 5.3.0 cosmiconfig: 9.0.0(typescript@5.4.5) - cosmiconfig-typescript-loader: 5.0.0(@types/node@20.12.12)(cosmiconfig@9.0.0(typescript@5.4.5))(typescript@5.4.5) + cosmiconfig-typescript-loader: 5.0.0(@types/node@20.12.13)(cosmiconfig@9.0.0(typescript@5.4.5))(typescript@5.4.5) lodash.isplainobject: 4.0.6 lodash.merge: 4.6.2 lodash.uniq: 4.5.0 @@ -5627,13 +5627,13 @@ snapshots: '@floating-ui/utils@0.2.2': {} - '@generouted/react-router@1.19.5(react-router-dom@6.23.1(react-dom@19.0.0-rc-935180c7e0-20240524(react@19.0.0-rc-935180c7e0-20240524))(react@19.0.0-rc-935180c7e0-20240524))(react@19.0.0-rc-935180c7e0-20240524)(vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0))': + '@generouted/react-router@1.19.5(react-router-dom@6.23.1(react-dom@19.0.0-rc-935180c7e0-20240524(react@19.0.0-rc-935180c7e0-20240524))(react@19.0.0-rc-935180c7e0-20240524))(react@19.0.0-rc-935180c7e0-20240524)(vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0))': dependencies: fast-glob: 3.3.2 - generouted: 1.19.5(vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0)) + generouted: 1.19.5(vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0)) react: 19.0.0-rc-935180c7e0-20240524 react-router-dom: 6.23.1(react-dom@19.0.0-rc-935180c7e0-20240524(react@19.0.0-rc-935180c7e0-20240524))(react@19.0.0-rc-935180c7e0-20240524) - vite: 5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0) + vite: 5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0) '@humanwhocodes/config-array@0.11.14': dependencies: @@ -6098,12 +6098,12 @@ snapshots: dependencies: '@types/http-cache-semantics': 4.0.4 '@types/keyv': 3.1.4 - '@types/node': 20.12.12 + '@types/node': 20.12.13 '@types/responselike': 1.0.3 '@types/conventional-commits-parser@5.0.0': dependencies: - '@types/node': 20.12.12 + '@types/node': 20.12.13 '@types/debug@4.1.12': dependencies: @@ -6120,7 +6120,7 @@ snapshots: '@types/fs-extra@11.0.4': dependencies: '@types/jsonfile': 6.1.4 - '@types/node': 20.12.12 + '@types/node': 20.12.13 '@types/hast@3.0.4': dependencies: @@ -6134,11 +6134,11 @@ snapshots: '@types/jsonfile@6.1.4': dependencies: - '@types/node': 20.12.12 + '@types/node': 20.12.13 '@types/keyv@3.1.4': dependencies: - '@types/node': 20.12.12 + '@types/node': 20.12.13 '@types/lodash-es@4.17.12': dependencies: @@ -6158,7 +6158,7 @@ snapshots: dependencies: undici-types: 5.26.5 - '@types/node@20.12.12': + '@types/node@20.12.13': dependencies: undici-types: 5.26.5 @@ -6180,7 +6180,7 @@ snapshots: '@types/responselike@1.0.3': dependencies: - '@types/node': 20.12.12 + '@types/node': 20.12.13 '@types/unist@2.0.10': {} @@ -6188,7 +6188,7 @@ snapshots: '@types/yauzl@2.10.3': dependencies: - '@types/node': 20.12.12 + '@types/node': 20.12.13 optional: true '@typescript-eslint/eslint-plugin@7.11.0(@typescript-eslint/parser@7.11.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5)': @@ -6274,14 +6274,14 @@ snapshots: '@ungap/structured-clone@1.2.0': {} - '@vitejs/plugin-react@4.3.0(vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0))': + '@vitejs/plugin-react@4.3.0(vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0))': dependencies: '@babel/core': 7.24.5 '@babel/plugin-transform-react-jsx-self': 7.24.5(@babel/core@7.24.5) '@babel/plugin-transform-react-jsx-source': 7.24.1(@babel/core@7.24.5) '@types/babel__core': 7.20.5 react-refresh: 0.14.2 - vite: 5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0) + vite: 5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0) transitivePeerDependencies: - supports-color @@ -6613,7 +6613,7 @@ snapshots: chokidar@3.6.0: dependencies: anymatch: 3.1.3 - braces: 3.0.2 + braces: 3.0.3 glob-parent: 5.1.2 is-binary-path: 2.1.0 is-glob: 4.0.3 @@ -6724,9 +6724,9 @@ snapshots: dependencies: is-what: 3.14.1 - cosmiconfig-typescript-loader@5.0.0(@types/node@20.12.12)(cosmiconfig@9.0.0(typescript@5.4.5))(typescript@5.4.5): + cosmiconfig-typescript-loader@5.0.0(@types/node@20.12.13)(cosmiconfig@9.0.0(typescript@5.4.5))(typescript@5.4.5): dependencies: - '@types/node': 20.12.12 + '@types/node': 20.12.13 cosmiconfig: 9.0.0(typescript@5.4.5) jiti: 1.21.0 typescript: 5.4.5 @@ -7537,9 +7537,9 @@ snapshots: functions-have-names@1.2.3: {} - generouted@1.19.5(vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0)): + generouted@1.19.5(vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0)): dependencies: - vite: 5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0) + vite: 5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0) gensync@1.0.0-beta.2: {} @@ -9199,7 +9199,7 @@ snapshots: safer-buffer@2.1.2: optional: true - sass@1.77.2: + sass@1.77.3: dependencies: chokidar: 3.6.0 immutable: 4.3.5 @@ -9746,7 +9746,7 @@ snapshots: postcss-modules-local-by-default: 4.0.5(postcss@8.4.38) postcss-modules-scope: 3.2.0(postcss@8.4.38) reserved-words: 0.1.2 - sass: 1.77.2 + sass: 1.77.3 source-map-js: 1.2.0 stylus: 0.62.0 tsconfig-paths: 4.2.0 @@ -9888,46 +9888,46 @@ snapshots: esbuild: 0.19.12 monaco-editor: 0.49.0 - vite-plugin-sass-dts@1.3.22(postcss@8.4.38)(prettier@3.2.5)(sass@1.77.2)(vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0)): + vite-plugin-sass-dts@1.3.22(postcss@8.4.38)(prettier@3.2.5)(sass@1.77.3)(vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0)): dependencies: postcss: 8.4.38 postcss-js: 4.0.1(postcss@8.4.38) prettier: 3.2.5 - sass: 1.77.2 - vite: 5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0) + sass: 1.77.3 + vite: 5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0) - vite-plugin-svgr@4.2.0(rollup@4.17.2)(typescript@5.4.5)(vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0)): + vite-plugin-svgr@4.2.0(rollup@4.17.2)(typescript@5.4.5)(vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0)): dependencies: '@rollup/pluginutils': 5.1.0(rollup@4.17.2) '@svgr/core': 8.1.0(typescript@5.4.5) '@svgr/plugin-jsx': 8.1.0(@svgr/core@8.1.0(typescript@5.4.5)) - vite: 5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0) + vite: 5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0) transitivePeerDependencies: - rollup - supports-color - typescript - vite-tsconfig-paths@4.3.2(typescript@5.4.5)(vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0)): + vite-tsconfig-paths@4.3.2(typescript@5.4.5)(vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0)): dependencies: debug: 4.3.4 globrex: 0.1.2 tsconfck: 3.0.3(typescript@5.4.5) optionalDependencies: - vite: 5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0) + vite: 5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0) transitivePeerDependencies: - supports-color - typescript - vite@5.2.12(@types/node@20.12.12)(less@4.2.0)(sass@1.77.2)(stylus@0.62.0): + vite@5.2.12(@types/node@20.12.13)(less@4.2.0)(sass@1.77.3)(stylus@0.62.0): dependencies: esbuild: 0.20.2 postcss: 8.4.38 rollup: 4.17.2 optionalDependencies: - '@types/node': 20.12.12 + '@types/node': 20.12.13 fsevents: 2.3.3 less: 4.2.0 - sass: 1.77.2 + sass: 1.77.3 stylus: 0.62.0 void-elements@3.1.0: {} diff --git a/clash-verge-rev/package.json b/clash-verge-rev/package.json index 3751384663..2f4166e635 100644 --- a/clash-verge-rev/package.json +++ b/clash-verge-rev/package.json @@ -35,7 +35,6 @@ "dayjs": "1.11.5", "i18next": "^23.11.3", "lodash-es": "^4.17.21", - "matchmedia-polyfill": "^0.3.2", "meta-json-schema": "1.18.5-alpha", "monaco-editor": "^0.49.0", "monaco-yaml": "^5.1.1", diff --git a/clash-verge-rev/pnpm-lock.yaml b/clash-verge-rev/pnpm-lock.yaml index e0f15839bc..db08d3497a 100644 --- a/clash-verge-rev/pnpm-lock.yaml +++ b/clash-verge-rev/pnpm-lock.yaml @@ -58,9 +58,6 @@ importers: lodash-es: specifier: ^4.17.21 version: 4.17.21 - matchmedia-polyfill: - specifier: ^0.3.2 - version: 0.3.2 meta-json-schema: specifier: 1.18.5-alpha version: 1.18.5-alpha @@ -3224,12 +3221,6 @@ packages: integrity: sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==, } - matchmedia-polyfill@0.3.2: - resolution: - { - integrity: sha512-B2zRzjqxZFUusBZrZux59XFFLoTN99SbGranxIHfjZVLGZuy8Iaf/s5iNR3qJwRQZBjBKsU6qBSUCltLV82gdw==, - } - mdast-util-from-markdown@2.0.1: resolution: { @@ -6432,8 +6423,6 @@ snapshots: dependencies: "@jridgewell/sourcemap-codec": 1.4.15 - matchmedia-polyfill@0.3.2: {} - mdast-util-from-markdown@2.0.1: dependencies: "@types/mdast": 4.0.4 diff --git a/clash-verge-rev/src/components/base/base-search-box.tsx b/clash-verge-rev/src/components/base/base-search-box.tsx index bffb5078e9..3a1545867a 100644 --- a/clash-verge-rev/src/components/base/base-search-box.tsx +++ b/clash-verge-rev/src/components/base/base-search-box.tsx @@ -1,6 +1,6 @@ -import { Box, SvgIcon, TextField, Theme, styled } from "@mui/material"; +import { Box, SvgIcon, TextField, styled } from "@mui/material"; import Tooltip from "@mui/material/Tooltip"; -import { ChangeEvent, useState } from "react"; +import { ChangeEvent, useEffect, useRef, useState } from "react"; import { useTranslation } from "react-i18next"; import matchCaseIcon from "@/assets/image/component/match_case.svg?react"; @@ -22,6 +22,7 @@ type SearchProps = { export const BaseSearchBox = styled((props: SearchProps) => { const { t } = useTranslation(); + const inputRef = useRef(null); const [matchCase, setMatchCase] = useState(true); const [matchWholeWord, setMatchWholeWord] = useState(false); const [useRegularExpression, setUseRegularExpression] = useState(false); @@ -36,6 +37,14 @@ export const BaseSearchBox = styled((props: SearchProps) => { inheritViewBox: true, }; + useEffect(() => { + if (!inputRef.current) return; + + onChange({ + target: inputRef.current, + } as ChangeEvent); + }, [matchCase, matchWholeWord, useRegularExpression]); + const onChange = (e: ChangeEvent) => { props.onSearch( (content) => doSearch([content], e.target?.value ?? "").length > 0, @@ -80,6 +89,7 @@ export const BaseSearchBox = styled((props: SearchProps) => { return ( 0) { exit 1 } }' - +BUILD_TAG_FOR_NODE_EXPORTER="nofibrechannel,nomountstats" # -w -s 参数的解释:You will get the smallest binaries if you compile with -ldflags '-w -s'. The -w turns off DWARF debugging information # for more information, please refer to https://stackoverflow.com/questions/22267189/what-does-the-w-flag-mean-when-passed-in-via-the-ldflags-option-to-the-go-comman -# we need CGO_ENABLED=1 because we import the node_exporter ,and we need install `glibc-source,libc6` to make it work -# TODO check if node_exporter collector with CGO_ENABLED=0 is enough -GOBUILD=CGO_ENABLED=0 go build -trimpath -ldflags="-w -s -X ${PACKAGE}.GitBranch=${BRANCH} -X ${PACKAGE}.GitRevision=${REVISION} -X ${PACKAGE}.BuildTime=${BUILDTIME}" +GOBUILD=CGO_ENABLED=0 go build -tags ${BUILD_TAG_FOR_NODE_EXPORTER} -trimpath -ldflags="-w -s -X ${PACKAGE}.GitBranch=${BRANCH} -X ${PACKAGE}.GitRevision=${REVISION} -X ${PACKAGE}.BuildTime=${BUILDTIME}" + tools: @echo "run setup tools" @@ -35,7 +34,7 @@ fmt: tools @tools/bin/gofumpt -l -w $(FILES) 2>&1 | $(FAIL_ON_STDOUT) test: - go test -v -count=1 -timeout=1m ./... + go test -tags ${BUILD_TAG_FOR_NODE_EXPORTER} -v -count=1 -timeout=1m ./... build: ${GOBUILD} -o $(BINDIR)/$(NAME) cmd/ehco/main.go diff --git a/echo/go.mod b/echo/go.mod index ec1f6048f7..c13bbaf988 100644 --- a/echo/go.mod +++ b/echo/go.mod @@ -5,15 +5,15 @@ go 1.22 toolchain go1.22.1 require ( - github.com/alecthomas/kingpin/v2 v2.4.0 github.com/getsentry/sentry-go v0.27.0 github.com/go-ping/ping v1.1.0 github.com/gobwas/ws v1.3.2 github.com/labstack/echo/v4 v4.11.4 - github.com/prometheus/client_golang v1.19.0 - github.com/prometheus/client_model v0.5.0 - github.com/prometheus/common v0.48.0 - github.com/prometheus/node_exporter v1.7.0 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.53.0 + github.com/prometheus/node_exporter v1.8.1 + github.com/sagernet/sing v0.3.8 github.com/sagernet/sing-box v1.8.14 github.com/stretchr/testify v1.9.0 github.com/urfave/cli/v2 v2.27.1 @@ -27,16 +27,17 @@ require ( ) require ( - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/alecthomas/kingpin/v2 v2.4.0 // indirect + github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect github.com/andybalholm/brotli v1.1.0 // indirect - github.com/beevik/ntp v1.3.0 // indirect + github.com/beevik/ntp v1.4.2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dennwc/btrfs v0.0.0-20230312211831-a1f570bd01a1 // indirect + github.com/dennwc/btrfs v0.0.0-20240418142341-0167142bde7a // indirect github.com/dennwc/ioctl v1.0.0 // indirect github.com/dgryski/go-metro v0.0.0-20211217172704-adc40b04c140 // indirect github.com/ema/qdisc v1.0.0 // indirect @@ -60,7 +61,7 @@ require ( github.com/hodgesds/perf-utils v0.7.0 // indirect github.com/illumos/go-kstat v0.0.0-20210513183136-173c9b0a9973 // indirect github.com/josharian/native v1.1.0 // indirect - github.com/jsimonetti/rtnetlink v1.3.5 // indirect + github.com/jsimonetti/rtnetlink v1.4.2 // indirect github.com/klauspost/compress v1.17.7 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/labstack/gommon v0.4.2 // indirect @@ -72,8 +73,8 @@ require ( github.com/mdlayher/ethtool v0.1.0 // indirect github.com/mdlayher/genetlink v1.3.2 // indirect github.com/mdlayher/netlink v1.7.2 // indirect - github.com/mdlayher/socket v0.4.1 // indirect - github.com/mdlayher/wifi v0.1.0 // indirect + github.com/mdlayher/socket v0.5.1 // indirect + github.com/mdlayher/wifi v0.2.0 // indirect github.com/miekg/dns v1.1.59 // indirect github.com/onsi/ginkgo/v2 v2.16.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect @@ -82,7 +83,7 @@ require ( github.com/pires/go-proxyproto v0.7.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus-community/go-runit v0.1.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/procfs v0.15.0 // indirect github.com/quic-go/quic-go v0.41.0 // indirect github.com/refraction-networking/utls v1.6.3 // indirect github.com/riobard/go-bloom v0.0.0-20200614022211-cdc8013cb5b3 // indirect @@ -90,7 +91,6 @@ require ( github.com/safchain/ethtool v0.3.0 // indirect github.com/sagernet/gvisor v0.0.0-20240428053021-e691de28565f // indirect github.com/sagernet/netlink v0.0.0-20240523065131-45e60152f9ba // indirect - github.com/sagernet/sing v0.3.8 // indirect github.com/sagernet/sing-dns v0.1.14 // indirect github.com/sagernet/sing-shadowsocks v0.2.6 // indirect github.com/sagernet/sing-tun v0.2.7 // indirect @@ -107,7 +107,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect golang.org/x/crypto v0.23.0 // indirect - golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect + golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/net v0.25.0 // indirect golang.org/x/sync v0.7.0 // indirect @@ -117,7 +117,7 @@ require ( golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240308144416-29370a3891b7 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gvisor.dev/gvisor v0.0.0-20231104011432-48a6d7d5bd0b // indirect howett.net/plist v1.0.1 // indirect diff --git a/echo/go.sum b/echo/go.sum index 385a38e24a..ddfab4b747 100644 --- a/echo/go.sum +++ b/echo/go.sum @@ -10,22 +10,22 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/beevik/ntp v1.3.0 h1:/w5VhpW5BGKS37vFm1p9oVk/t4HnnkKZAZIubHM6F7Q= -github.com/beevik/ntp v1.3.0/go.mod h1:vD6h1um4kzXpqmLTuu0cCLcC+NfvC0IC+ltmEDA8E78= +github.com/beevik/ntp v1.4.2 h1:cjYhZqczanf6br/ocViahE75ipj7CmKQAh7fSBaCNK4= +github.com/beevik/ntp v1.4.2/go.mod h1:zkATLTt8VUZuOfYX2KgOnir4yvtAxWbnUUA24umXFnc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y= -github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= +github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= @@ -37,8 +37,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dennwc/btrfs v0.0.0-20230312211831-a1f570bd01a1 h1:ue4Es4Xzz255hWQ7NAWzZxuXG+YOV7URzzusLLSe0zU= -github.com/dennwc/btrfs v0.0.0-20230312211831-a1f570bd01a1/go.mod h1:MYsOV9Dgsec3FFSOjywi0QK5r6TeBbdWxdrMGtiYXHA= +github.com/dennwc/btrfs v0.0.0-20240418142341-0167142bde7a h1:KfFsGLJFVdCXlySUkV2FmxNtmiztpJb6tV+XYBmmv8E= +github.com/dennwc/btrfs v0.0.0-20240418142341-0167142bde7a/go.mod h1:MYsOV9Dgsec3FFSOjywi0QK5r6TeBbdWxdrMGtiYXHA= github.com/dennwc/ioctl v1.0.0 h1:DsWAAjIxRqNcLn9x6mwfuf2pet3iB7aK90K4tF16rLg= github.com/dennwc/ioctl v1.0.0/go.mod h1:ellh2YB5ldny99SBU/VX7Nq0xiZbHphf1DrtHxxjMk0= github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= @@ -128,8 +128,8 @@ github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0 github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/jsimonetti/rtnetlink v1.3.5 h1:hVlNQNRlLDGZz31gBPicsG7Q53rnlsz1l1Ix/9XlpVA= -github.com/jsimonetti/rtnetlink v1.3.5/go.mod h1:0LFedyiTkebnd43tE4YAkWGIq9jQphow4CcwxaT2Y00= +github.com/jsimonetti/rtnetlink v1.4.2 h1:Df9w9TZ3npHTyDn0Ev9e1uzmN2odmXd0QX+J5GTEn90= +github.com/jsimonetti/rtnetlink v1.4.2/go.mod h1:92s6LJdE+1iOrw+F2/RO7LYI2Qd8pPpFNNUYW06gcoM= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -169,10 +169,10 @@ github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy5 github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= -github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= -github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= -github.com/mdlayher/wifi v0.1.0 h1:y8wYRUXwok5CtUZOXT3egghYesX0O79E3ALl+SIDm9Q= -github.com/mdlayher/wifi v0.1.0/go.mod h1:+gBYnZAMcUKHSFzMJXwlz7tLsEHgwDJ9DJCefhJM+gI= +github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos= +github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ= +github.com/mdlayher/wifi v0.2.0 h1:vwbVyu5MWTiFNvOmWdvIx9veBlMVnEasZ90PhUi1DYU= +github.com/mdlayher/wifi v0.2.0/go.mod h1:yOfWhVZ4FFJxeHzAxDzt87Om9EkqqcCiY9Gi5gfSXwI= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= @@ -203,19 +203,19 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus-community/go-runit v0.1.0 h1:uTWEj/Fn2RoLdfg/etSqwzgYNOYPrARx1BHUN052tGA= github.com/prometheus-community/go-runit v0.1.0/go.mod h1:AvJ9Jo3gAFu2lbM4+qfjdpq30FfiLDJZKbQ015u08IQ= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= -github.com/prometheus/node_exporter v1.7.0 h1:7MVpSdfWrThNo0SlldhUyAVFZ7LWbC9+QJRzB4QmkE8= -github.com/prometheus/node_exporter v1.7.0/go.mod h1:iPAWQmoxv93c51WymsZMdPOJtL/Q4IkGQrgkUGrrgIc= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/node_exporter v1.8.1 h1:qYIN+ghn7kEggHe4pcIRp9oXkljU8ARWyEHBr286RPY= +github.com/prometheus/node_exporter v1.8.1/go.mod h1:rJMoAQMglUjAZ7nggHnRuwfJ0hKUVW6+Gv+IaMxh6js= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= +github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= github.com/quic-go/quic-go v0.41.0 h1:aD8MmHfgqTURWNJy48IYFg2OnxwHT3JL7ahGs73lb4k= github.com/quic-go/quic-go v0.41.0/go.mod h1:qCkNjqczPEvgsOnxZ0eCD14lv+B2LHlFAB++CNOh9hA= github.com/refraction-networking/utls v1.6.3 h1:MFOfRN35sSx6K5AZNIoESsBuBxS2LCgRilRIdHb6fDc= @@ -273,15 +273,10 @@ github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973/go.mod h1:G81a github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= @@ -310,7 +305,6 @@ github.com/xtls/reality v0.0.0-20231112171332-de1173cf2b19 h1:capMfFYRgH9BCLd6A3 github.com/xtls/reality v0.0.0-20231112171332-de1173cf2b19/go.mod h1:dm4y/1QwzjGaK17ofi0Vs6NpKAHegZky8qk6J2JJZAE= github.com/xtls/xray-core v1.8.9 h1:wefcON0behu4DoQvCKJYZKsJlSvNhyq2I7vC2fxLFcY= github.com/xtls/xray-core v1.8.9/go.mod h1:XDE4f422qJKAU3hNDSNZyWrOHvn9kF8UHVdyOzU38rc= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -331,18 +325,14 @@ golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+ golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY= -golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= +golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 h1:vpzMC/iZhYFAjJzHU0Cfuq+w1vLLsF2vLkDrPjzKYck= +golang.org/x/exp v0.0.0-20240529005216-23cca8864a10/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -353,13 +343,7 @@ golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -371,10 +355,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -385,32 +366,18 @@ golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220804214406-8e32c043e418/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -422,12 +389,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg= golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI= golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173 h1:/jFs0duh4rdb8uIfPMv78iAJGcPKDeqAFnaLBropIC4= @@ -452,8 +415,8 @@ google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/echo/internal/metrics/metrics.go b/echo/internal/metrics/metrics.go index 8639bd80b2..4d155afb1a 100644 --- a/echo/internal/metrics/metrics.go +++ b/echo/internal/metrics/metrics.go @@ -1,22 +1,11 @@ package metrics import ( - "fmt" - "math" - "net/url" "os" - "runtime" - "strings" "time" "github.com/Ehco1996/ehco/internal/config" - "github.com/alecthomas/kingpin/v2" - "github.com/go-ping/ping" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/promlog" - "github.com/prometheus/common/version" - "github.com/prometheus/node_exporter/collector" - "go.uber.org/zap" ) const ( @@ -104,125 +93,6 @@ var ( }, []string{METRIC_LABEL_REMOTE}) ) -func (pg *PingGroup) newPinger(addr string) (*ping.Pinger, error) { - pinger := ping.New(addr) - if err := pinger.Resolve(); err != nil { - pg.logger.Error("failed to resolve pinger", zap.String("addr", addr), zap.Error(err)) - return nil, err - } - pinger.Interval = pingInterval - pinger.Timeout = time.Duration(math.MaxInt64) - pinger.RecordRtts = false - if runtime.GOOS != "darwin" { - pinger.SetPrivileged(true) - } - return pinger, nil -} - -type PingGroup struct { - logger *zap.Logger - - // k: addr - Pingers map[string]*ping.Pinger - - // k: addr v:relay rule label joined by "," - PingerLabels map[string]string -} - -func extractHost(input string) (string, error) { - // Check if the input string has a scheme, if not, add "http://" - if !strings.Contains(input, "://") { - input = "http://" + input - } - // Parse the URL - u, err := url.Parse(input) - if err != nil { - return "", err - } - return u.Hostname(), nil -} - -func NewPingGroup(cfg *config.Config) *PingGroup { - logger := zap.L().Named("pinger") - - pg := &PingGroup{ - logger: logger, - Pingers: make(map[string]*ping.Pinger), - PingerLabels: map[string]string{}, - } - - // parse addr from rule - for _, relayCfg := range cfg.RelayConfigs { - // NOTE for (https/ws/wss)://xxx.com -> xxx.com - for _, remote := range relayCfg.TCPRemotes { - addr, err := extractHost(remote) - if err != nil { - pg.logger.Error("try parse host error", zap.Error(err)) - } - if _, ok := pg.Pingers[addr]; ok { - // append rule label when remote host is same - pg.PingerLabels[addr] += fmt.Sprintf(",%s", relayCfg.Label) - continue - } - if pinger, err := pg.newPinger(addr); err != nil { - pg.logger.Error("new pinger meet error", zap.Error(err)) - } else { - pg.Pingers[pinger.Addr()] = pinger - pg.PingerLabels[addr] = relayCfg.Label - } - } - } - - // update metrics - for addr, pinger := range pg.Pingers { - pinger.OnRecv = func(pkt *ping.Packet) { - PingResponseDurationSeconds.WithLabelValues( - pkt.IPAddr.String(), pkt.Addr, pg.PingerLabels[addr]).Observe(pkt.Rtt.Seconds()) - pg.logger.Sugar().Infof("%d bytes from %s icmp_seq=%d time=%v ttl=%v", - pkt.Nbytes, pkt.Addr, pkt.Seq, pkt.Rtt, pkt.Ttl) - } - pinger.OnDuplicateRecv = func(pkt *ping.Packet) { - pg.logger.Sugar().Infof("%d bytes from %s icmp_seq=%d time=%v ttl=%v (DUP!)", - pkt.Nbytes, pkt.IPAddr, pkt.Seq, pkt.Rtt, pkt.Ttl) - } - } - return pg -} - -func (pg *PingGroup) Describe(ch chan<- *prometheus.Desc) { - ch <- PingRequestTotal -} - -func (pg *PingGroup) Collect(ch chan<- prometheus.Metric) { - for addr, pinger := range pg.Pingers { - stats := pinger.Statistics() - ch <- prometheus.MustNewConstMetric( - PingRequestTotal, - prometheus.CounterValue, - float64(stats.PacketsSent), - stats.IPAddr.String(), - stats.Addr, - pg.PingerLabels[addr], - ) - } -} - -func (pg *PingGroup) Run() { - if len(pg.Pingers) <= 0 { - return - } - pg.logger.Sugar().Infof("Start Ping Group now total pinger: %d", len(pg.Pingers)) - splay := time.Duration(pingInterval.Nanoseconds() / int64(len(pg.Pingers))) - for addr, pinger := range pg.Pingers { - go func() { - if err := pinger.Run(); err != nil { - pg.logger.Error("Starting pinger meet err", zap.String("addr", addr), zap.Error(err)) - } - }() - time.Sleep(splay) - } -} - func RegisterEhcoMetrics(cfg *config.Config) error { // traffic prometheus.MustRegister(EhcoAlive) @@ -241,27 +111,3 @@ func RegisterEhcoMetrics(cfg *config.Config) error { } return nil } - -func RegisterNodeExporterMetrics(cfg *config.Config) error { - level := &promlog.AllowedLevel{} - // mute node_exporter logger - if err := level.Set("error"); err != nil { - return err - } - promlogConfig := &promlog.Config{Level: level} - logger := promlog.New(promlogConfig) - // see this https://github.com/prometheus/node_exporter/pull/2463 - if _, err := kingpin.CommandLine.Parse([]string{}); err != nil { - return err - } - nc, err := collector.NewNodeCollector(logger) - if err != nil { - return fmt.Errorf("couldn't create collector: %s", err) - } - // nc.Collectors = collectors - prometheus.MustRegister( - nc, - version.NewCollector("node_exporter"), - ) - return nil -} diff --git a/echo/internal/metrics/node.go b/echo/internal/metrics/node.go new file mode 100644 index 0000000000..1b8a9c9eab --- /dev/null +++ b/echo/internal/metrics/node.go @@ -0,0 +1,32 @@ +package metrics + +import ( + "fmt" + + "github.com/Ehco1996/ehco/internal/config" + "github.com/alecthomas/kingpin/v2" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promlog" + "github.com/prometheus/node_exporter/collector" +) + +func RegisterNodeExporterMetrics(cfg *config.Config) error { + level := &promlog.AllowedLevel{} + // mute node_exporter logger + if err := level.Set("error"); err != nil { + return err + } + + logger := promlog.New(&promlog.Config{Level: level}) + // node_exporter relay on `kingpin` to enable default node collector + // see https://github.com/prometheus/node_exporter/pull/2463 + if _, err := kingpin.CommandLine.Parse([]string{}); err != nil { + return err + } + nc, err := collector.NewNodeCollector(logger) + if err != nil { + return fmt.Errorf("couldn't create collector: %s", err) + } + prometheus.MustRegister(nc) + return nil +} diff --git a/echo/internal/metrics/ping.go b/echo/internal/metrics/ping.go new file mode 100644 index 0000000000..9c9434742f --- /dev/null +++ b/echo/internal/metrics/ping.go @@ -0,0 +1,134 @@ +package metrics + +import ( + "fmt" + "math" + "net/url" + "runtime" + "strings" + "time" + + "github.com/Ehco1996/ehco/internal/config" + "github.com/go-ping/ping" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" +) + +func (pg *PingGroup) newPinger(addr string) (*ping.Pinger, error) { + pinger := ping.New(addr) + if err := pinger.Resolve(); err != nil { + pg.logger.Error("failed to resolve pinger", zap.String("addr", addr), zap.Error(err)) + return nil, err + } + pinger.Interval = pingInterval + pinger.Timeout = time.Duration(math.MaxInt64) + pinger.RecordRtts = false + if runtime.GOOS != "darwin" { + pinger.SetPrivileged(true) + } + return pinger, nil +} + +type PingGroup struct { + logger *zap.Logger + + // k: addr + Pingers map[string]*ping.Pinger + + // k: addr v:relay rule label joined by "," + PingerLabels map[string]string +} + +func extractHost(input string) (string, error) { + // Check if the input string has a scheme, if not, add "http://" + if !strings.Contains(input, "://") { + input = "http://" + input + } + // Parse the URL + u, err := url.Parse(input) + if err != nil { + return "", err + } + return u.Hostname(), nil +} + +func NewPingGroup(cfg *config.Config) *PingGroup { + logger := zap.L().Named("pinger") + + pg := &PingGroup{ + logger: logger, + Pingers: make(map[string]*ping.Pinger), + PingerLabels: map[string]string{}, + } + + // parse addr from rule + for _, relayCfg := range cfg.RelayConfigs { + // NOTE for (https/ws/wss)://xxx.com -> xxx.com + for _, remote := range relayCfg.TCPRemotes { + addr, err := extractHost(remote) + if err != nil { + pg.logger.Error("try parse host error", zap.Error(err)) + } + if _, ok := pg.Pingers[addr]; ok { + // append rule label when remote host is same + pg.PingerLabels[addr] += fmt.Sprintf(",%s", relayCfg.Label) + continue + } + if pinger, err := pg.newPinger(addr); err != nil { + pg.logger.Error("new pinger meet error", zap.Error(err)) + } else { + pg.Pingers[pinger.Addr()] = pinger + pg.PingerLabels[addr] = relayCfg.Label + } + } + } + + // update metrics + for addr, pinger := range pg.Pingers { + pinger.OnRecv = func(pkt *ping.Packet) { + PingResponseDurationSeconds.WithLabelValues( + pkt.IPAddr.String(), pkt.Addr, pg.PingerLabels[addr]).Observe(pkt.Rtt.Seconds()) + pg.logger.Sugar().Infof("%d bytes from %s icmp_seq=%d time=%v ttl=%v", + pkt.Nbytes, pkt.Addr, pkt.Seq, pkt.Rtt, pkt.Ttl) + } + pinger.OnDuplicateRecv = func(pkt *ping.Packet) { + pg.logger.Sugar().Infof("%d bytes from %s icmp_seq=%d time=%v ttl=%v (DUP!)", + pkt.Nbytes, pkt.IPAddr, pkt.Seq, pkt.Rtt, pkt.Ttl) + } + } + return pg +} + +func (pg *PingGroup) Describe(ch chan<- *prometheus.Desc) { + ch <- PingRequestTotal +} + +func (pg *PingGroup) Collect(ch chan<- prometheus.Metric) { + for addr, pinger := range pg.Pingers { + stats := pinger.Statistics() + ch <- prometheus.MustNewConstMetric( + PingRequestTotal, + prometheus.CounterValue, + float64(stats.PacketsSent), + stats.IPAddr.String(), + stats.Addr, + pg.PingerLabels[addr], + ) + } +} + +func (pg *PingGroup) Run() { + if len(pg.Pingers) <= 0 { + return + } + pg.logger.Sugar().Infof("Start Ping Group now total pinger: %d", len(pg.Pingers)) + splay := time.Duration(pingInterval.Nanoseconds() / int64(len(pg.Pingers))) + for addr, pinger := range pg.Pingers { + go func() { + if err := pinger.Run(); err != nil { + pg.logger.Error("Starting pinger meet err", zap.String("addr", addr), zap.Error(err)) + } + }() + time.Sleep(splay) + } +} diff --git a/echo/internal/transporter/ws.go b/echo/internal/transporter/ws.go index 402b11850c..3359b79028 100644 --- a/echo/internal/transporter/ws.go +++ b/echo/internal/transporter/ws.go @@ -43,7 +43,8 @@ func (s *WsClient) TCPHandShake(remote *lb.Node) (net.Conn, error) { latency := time.Since(t1) metrics.HandShakeDuration.WithLabelValues(remote.Label).Observe(float64(latency.Milliseconds())) remote.HandShakeDuration = latency - return wsc, nil + c := newWsConn(wsc, false) + return c, nil } type WsServer struct { @@ -90,7 +91,8 @@ func (s *WsServer) HandleRequest(w http.ResponseWriter, req *http.Request) { if err != nil { return } - if err := s.RelayTCPConn(wsc, s.relayer.TCPHandShake); err != nil { + c := newWsConn(wsc, true) + if err := s.RelayTCPConn(c, s.relayer.TCPHandShake); err != nil { s.l.Errorf("RelayTCPConn error: %s", err.Error()) } } diff --git a/echo/internal/transporter/ws_conn.go b/echo/internal/transporter/ws_conn.go new file mode 100644 index 0000000000..a2bc693373 --- /dev/null +++ b/echo/internal/transporter/ws_conn.go @@ -0,0 +1,82 @@ +package transporter + +import ( + "fmt" + "io" + "net" + "time" + + "github.com/Ehco1996/ehco/pkg/buffer" + "github.com/gobwas/ws" + "github.com/gobwas/ws/wsutil" +) + +type wsConn struct { + conn net.Conn + isServer bool + buf []byte +} + +func newWsConn(conn net.Conn, isServer bool) *wsConn { + return &wsConn{conn: conn, isServer: isServer, buf: buffer.BufferPool.Get()} +} + +func (c *wsConn) Read(b []byte) (n int, err error) { + header, err := ws.ReadHeader(c.conn) + if err != nil { + return 0, err + } + if header.Length > int64(cap(c.buf)) { + c.buf = make([]byte, header.Length) + } + payload := c.buf[:header.Length] + _, err = io.ReadFull(c.conn, payload) + if err != nil { + return 0, err + } + if header.Masked { + ws.Cipher(payload, header.Mask, 0) + } + if len(payload) > len(b) { + return 0, fmt.Errorf("buffer too small to transport ws msg") + } + copy(b, payload) + return len(payload), nil +} + +func (c *wsConn) Write(b []byte) (n int, err error) { + if c.isServer { + err = wsutil.WriteServerBinary(c.conn, b) + } else { + err = wsutil.WriteClientBinary(c.conn, b) + } + if err != nil { + return 0, err + } + return len(b), nil +} + +func (c *wsConn) Close() error { + defer buffer.BufferPool.Put(c.buf) + return c.conn.Close() +} + +func (c *wsConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *wsConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *wsConn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +func (c *wsConn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +func (c *wsConn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} diff --git a/echo/internal/transporter/ws_conn_test.go b/echo/internal/transporter/ws_conn_test.go new file mode 100644 index 0000000000..0c0dad5590 --- /dev/null +++ b/echo/internal/transporter/ws_conn_test.go @@ -0,0 +1,70 @@ +package transporter + +import ( + "context" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/gobwas/ws" + "github.com/stretchr/testify/assert" +) + +func TestClientConn_ReadWrite(t *testing.T) { + data := []byte("hello") + + // Create a WebSocket server + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + conn, _, _, err := ws.UpgradeHTTP(r, w) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + go func() { + defer conn.Close() + wsc := newWsConn(conn, true) + + buf := make([]byte, 1024) + for { + n, err := wsc.Read(buf) + if err != nil { + return + } + assert.Equal(t, len(data), n) + assert.Equal(t, "hello", string(buf[:n])) + _, err = wsc.Write(buf[:n]) + if err != nil { + return + } + } + }() + })) + defer server.Close() + + // Create a WebSocket client + addr, err := url.Parse(server.URL) + if err != nil { + t.Fatal(err) + } + conn, _, _, err := ws.DefaultDialer.Dial(context.TODO(), "ws://"+addr.Host) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + wsClientConn := newWsConn(conn, false) + for i := 0; i < 3; i++ { + // test write + n, err := wsClientConn.Write(data) + assert.NoError(t, err, "test cnt %d", i) + assert.Equal(t, len(data), n, "test cnt %d", i) + + // test read + buf := make([]byte, 100) + n, err = wsClientConn.Read(buf) + assert.NoError(t, err, "test cnt %d", i) + assert.Equal(t, len(data), n, "test cnt %d", i) + assert.Equal(t, "hello", string(buf[:n]), "test cnt %d", i) + } +} diff --git a/echo/test/cmd/tcp_client/main.go b/echo/test/cmd/tcp_client/main.go index 73df5be6ac..b78eae95ae 100644 --- a/echo/test/cmd/tcp_client/main.go +++ b/echo/test/cmd/tcp_client/main.go @@ -1,17 +1,26 @@ package main -import "github.com/Ehco1996/ehco/test/echo" +import ( + "time" + + "github.com/Ehco1996/ehco/test/echo" +) func main() { msg := []byte("hello") echoServerAddr := "127.0.0.1:2333" - println("real echo server at:", echoServerAddr) - - // start ehco real server - // go run cmd/ehco/main.go -l 0.0.0.0:2234 -r 0.0.0.0:2333 - relayAddr := "127.0.0.1:2234" + println("real echo server at:", echoServerAddr, "relay addr:", relayAddr) + ret := echo.SendTcpMsg(msg, relayAddr) - println(string(ret)) + if string(ret) != "hello" { + panic("relay short failed") + } + println("test short conn success, hello sended and received") + + if err := echo.EchoTcpMsgLong(msg, time.Second, relayAddr); err != nil { + panic("relay long failed:" + err.Error()) + } + println("test long conn success") } diff --git a/echo/test/echo/echo.go b/echo/test/echo/echo.go index c7c0eacbe8..1c4cd8c13b 100644 --- a/echo/test/echo/echo.go +++ b/echo/test/echo/echo.go @@ -27,6 +27,7 @@ func echo(conn net.Conn) { logger.Error(err.Error()) return } + println("echo server receive", string(buf[:i])) _, err = conn.Write(buf[:i]) if err != nil { logger.Error(err.Error()) @@ -135,7 +136,7 @@ func EchoTcpMsgLong(msg []byte, sleepTime time.Duration, address string) error { return err } if string(buf[:n]) != string(msg) { - return fmt.Errorf("msg not equal") + return fmt.Errorf("msg not equal at %d send:%s receive:%s n:%d", i, msg, buf[:n], n) } // to fake a long connection time.Sleep(sleepTime) diff --git a/echo/test/echo/ws.json b/echo/test/echo/ws.json new file mode 100644 index 0000000000..209bc299bd --- /dev/null +++ b/echo/test/echo/ws.json @@ -0,0 +1,16 @@ +{ + "relay_configs": [ + { + "listen": "127.0.0.1:2234", + "listen_type": "raw", + "transport_type": "ws", + "tcp_remotes": ["ws://0.0.0.0:2443"] + }, + { + "listen": "127.0.0.1:2443", + "listen_type": "ws", + "transport_type": "raw", + "tcp_remotes": ["127.0.0.1:2333"] + } + ] +} diff --git a/lede/target/linux/ramips/dts/mt7621_glinet_gl-mt1300.dts b/lede/target/linux/ramips/dts/mt7621_glinet_gl-mt1300.dts index 20f63902af..a9659a6e88 100644 --- a/lede/target/linux/ramips/dts/mt7621_glinet_gl-mt1300.dts +++ b/lede/target/linux/ramips/dts/mt7621_glinet_gl-mt1300.dts @@ -67,8 +67,8 @@ flash@0 { compatible = "jedec,spi-nor"; reg = <0>; - spi-max-frequency = <80000000>; - m25p,fast-read; + spi-max-frequency = <50000000>; + broken-flash-reset; partitions { compatible = "fixed-partitions"; diff --git a/lede/target/linux/ramips/dts/mt7621_jdcloud_re-cp-02.dts b/lede/target/linux/ramips/dts/mt7621_jdcloud_re-cp-02.dts new file mode 100644 index 0000000000..09330fc1ac --- /dev/null +++ b/lede/target/linux/ramips/dts/mt7621_jdcloud_re-cp-02.dts @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "mt7621.dtsi" + +#include +#include + +/ { + compatible = "jdcloud,re-cp-02", "mediatek,mt7621-soc"; + model = "JDCloud RE-CP-02"; + + aliases { + label-mac-device = &gmac0; + led-boot = &led_status_blue; + led-failsafe = &led_status_red; + led-running = &led_status_green; + led-upgrade = &led_status_blue; + }; + + chosen { + bootargs = "console=ttyS0,115200n8"; + }; + + leds { + compatible = "gpio-leds"; + + led_status_red: red { + label = "red:status"; + gpios = <&gpio 6 GPIO_ACTIVE_LOW>; + }; + + led_status_blue: blue { + label = "blue:status"; + gpios = <&gpio 7 GPIO_ACTIVE_LOW>; + }; + + led_status_green: green { + label = "green:status"; + gpios = <&gpio 8 GPIO_ACTIVE_LOW>; + }; + }; + + keys { + compatible = "gpio-keys"; + + wps { + label = "wps"; + gpios = <&gpio 15 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + + reset { + label = "reset"; + gpios = <&gpio 18 GPIO_ACTIVE_LOW>; + linux,code = ; + }; + }; +}; + +&spi0 { + status = "okay"; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <10000000>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "U-Boot"; + reg = <0x0 0x40000>; + read-only; + }; + + partition@40000 { + compatible = "u-boot,env"; + label = "Config"; + reg = <0x40000 0x10000>; + }; + + factory: partition@50000 { + label = "Factory"; + reg = <0x50000 0x40000>; + read-only; + }; + + partition@90000 { + compatible = "denx,uimage"; + label = "firmware"; + reg = <0x90000 0xf70000>; + }; + }; + }; +}; + +&state_default { + gpio { + groups = "uart3", "jtag", "wdt"; + function = "gpio"; + }; +}; + +&sdhci { + status = "okay"; +}; + +&pcie { + status = "okay"; +}; + +&pcie1 { + wifi@0,0 { + compatible = "mediatek,mt76"; + reg = <0x0000 0 0 0 0>; + mediatek,mtd-eeprom = <&factory 0x0>; + mediatek,disable-radar-background; + }; +}; + +&gmac0 { + mtd-mac-address = <&factory 0x3fff4>; +}; + +&gmac1 { + mtd-mac-address = <&factory 0x3fffa>; + status = "okay"; +}; + +&switch0 { + ports { + port@1 { + status = "okay"; + label = "lan1"; + }; + + port@2 { + status = "okay"; + label = "lan2"; + }; + + port@3 { + status = "okay"; + label = "lan3"; + }; + + port@4 { + status = "okay"; + label = "wan"; + mtd-mac-address = <&factory 0x3fffa>; + }; + }; +}; + +&xhci { + status = "disabled"; +}; diff --git a/lede/target/linux/ramips/image/mt7621.mk b/lede/target/linux/ramips/image/mt7621.mk index 8c7d937659..cb3382f7f2 100644 --- a/lede/target/linux/ramips/image/mt7621.mk +++ b/lede/target/linux/ramips/image/mt7621.mk @@ -1030,6 +1030,15 @@ define Device/jcg_y2 endef TARGET_DEVICES += jcg_y2 +define Device/jdcloud_re-cp-02 + $(Device/dsa-migration) + IMAGE_SIZE := 16000k + DEVICE_VENDOR := JD-Cloud + DEVICE_MODEL := RE-CP-02 + DEVICE_PACKAGES := kmod-mt7915-firmware kmod-sdhci-mt7620 +endef +TARGET_DEVICES += jdcloud_re-cp-02 + define Device/jdcloud_re-sp-01b $(Device/dsa-migration) IMAGE_SIZE := 27328k diff --git a/lede/target/linux/ramips/mt7621/base-files/etc/board.d/02_network b/lede/target/linux/ramips/mt7621/base-files/etc/board.d/02_network old mode 100755 new mode 100644 index 165278271c..6d5d8265d5 --- a/lede/target/linux/ramips/mt7621/base-files/etc/board.d/02_network +++ b/lede/target/linux/ramips/mt7621/base-files/etc/board.d/02_network @@ -26,6 +26,7 @@ ramips_setup_interfaces() h3c,tx1801-plus|\ h3c,tx1806|\ hiwifi,hc5962|\ + jdcloud,re-cp-02|\ xiaomi,mi-router-3-pro) ucidef_set_interfaces_lan_wan "lan1 lan2 lan3" "wan" ;; diff --git a/lede/target/linux/ramips/mt7621/base-files/etc/init.d/bootcount b/lede/target/linux/ramips/mt7621/base-files/etc/init.d/bootcount index 276d8bccc4..1db8733877 100755 --- a/lede/target/linux/ramips/mt7621/base-files/etc/init.d/bootcount +++ b/lede/target/linux/ramips/mt7621/base-files/etc/init.d/bootcount @@ -13,6 +13,9 @@ boot() { $((0xFF)) ]] || printf '\xff' | dd of=/dev/mtdblock3 count=1 \ bs=1 seek=$((0x20001)) ;; + jdcloud,re-cp-02) + echo -e "bootcount 0\nbootlimit 5\nupgrade_available 1" | /usr/sbin/fw_setenv -s - + ;; linksys,e5600|\ linksys,ea7300-v1|\ linksys,ea7300-v2|\ diff --git a/mieru/pkg/cli/client.go b/mieru/pkg/cli/client.go index 9e46bb7ad4..a1cd38a255 100644 --- a/mieru/pkg/cli/client.go +++ b/mieru/pkg/cli/client.go @@ -20,8 +20,10 @@ import ( "encoding/hex" "errors" "fmt" + "io" "net" "net/http" + "net/url" "os/exec" "runtime/pprof" "strconv" @@ -39,6 +41,7 @@ import ( "github.com/enfein/mieru/pkg/stderror" "github.com/enfein/mieru/pkg/util" "github.com/enfein/mieru/pkg/util/sockopts" + "golang.org/x/net/proxy" "google.golang.org/grpc" "google.golang.org/protobuf/proto" ) @@ -80,6 +83,13 @@ func RegisterClientCommands() { }, clientStatusFunc, ) + RegisterCallback( + []string{"", "test"}, + func(s []string) error { + return unexpectedArgsError(s, 2) + }, + clientTestFunc, + ) RegisterCallback( []string{"", "apply", "config"}, func(s []string) error { @@ -225,6 +235,10 @@ var clientHelpFunc = func(s []string) error { cmd: "status", help: "Check mieru client status.", }, + { + cmd: "test", + help: "Test mieru client connection to the Internet via proxy server.", + }, { cmd: "apply config ", help: "Apply client configuration from JSON file.", @@ -300,7 +314,7 @@ var clientStartFunc = func(s []string) error { if err == stderror.ErrFileNotExist { return fmt.Errorf(stderror.ClientConfigNotExist) } else { - return fmt.Errorf(stderror.LoadClientConfigFailedErr, err) + return fmt.Errorf(stderror.GetClientConfigFailedErr, err) } } if err = appctl.ValidateFullClientConfig(config); err != nil { @@ -309,9 +323,9 @@ var clientStartFunc = func(s []string) error { if err = appctl.IsClientDaemonRunning(context.Background()); err == nil { if config.GetSocks5ListenLAN() { - log.Infof("mieru client is running, listening to 0.0.0.0:%d", config.GetSocks5Port()) + log.Infof("mieru client is running, listening to socks5://0.0.0.0:%d", config.GetSocks5Port()) } else { - log.Infof("mieru client is running, listening to 127.0.0.1:%d", config.GetSocks5Port()) + log.Infof("mieru client is running, listening to socks5://127.0.0.1:%d", config.GetSocks5Port()) } return nil } @@ -331,9 +345,9 @@ var clientStartFunc = func(s []string) error { lastErr = appctl.IsClientDaemonRunning(context.Background()) if lastErr == nil { if config.GetSocks5ListenLAN() { - log.Infof("mieru client is started, listening to 0.0.0.0:%d", config.GetSocks5Port()) + log.Infof("mieru client is started, listening to socks5://0.0.0.0:%d", config.GetSocks5Port()) } else { - log.Infof("mieru client is started, listening to 127.0.0.1:%d", config.GetSocks5Port()) + log.Infof("mieru client is started, listening to socks5://127.0.0.1:%d", config.GetSocks5Port()) } return nil } @@ -362,7 +376,7 @@ var clientRunFunc = func(s []string) error { if err == stderror.ErrFileNotExist { return fmt.Errorf(stderror.ClientConfigNotExist) } else { - return fmt.Errorf(stderror.LoadClientConfigFailedErr, err) + return fmt.Errorf(stderror.GetClientConfigFailedErr, err) } } if proto.Equal(config, &appctlpb.ClientConfig{}) { @@ -555,18 +569,18 @@ var clientRunFunc = func(s []string) error { } var clientStopFunc = func(s []string) error { - if err := appctl.IsClientDaemonRunning(context.Background()); err != nil { + ctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) + defer cancelFunc() + client, running, err := newClientLifecycleRPCClient(ctx) + if !running { log.Infof(stderror.ClientNotRunning) return nil } - - timedctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) - defer cancelFunc() - client, err := appctl.NewClientLifecycleRPCClient(timedctx) if err != nil { - return fmt.Errorf(stderror.CreateClientLifecycleRPCClientFailedErr, err) + return err } - if _, err = client.Exit(timedctx, &appctlpb.Empty{}); err != nil { + + if _, err = client.Exit(ctx, &appctlpb.Empty{}); err != nil { return fmt.Errorf(stderror.ExitFailedErr, err) } log.Infof("mieru client is stopped") @@ -589,6 +603,50 @@ var clientStatusFunc = func(s []string) error { return nil } +var clientTestFunc = func(s []string) error { + if err := appctl.IsClientDaemonRunning(context.Background()); err != nil { + return fmt.Errorf(stderror.ClientNotRunning) + } + config, err := appctl.LoadClientConfig() + if err != nil { + return fmt.Errorf(stderror.GetClientConfigFailedErr, err) + } + + proxyURL, err := url.Parse(fmt.Sprintf("socks5://127.0.0.1:%d", config.GetSocks5Port())) + if err != nil { + return fmt.Errorf("failed to parse proxy URL: %w", err) + } + dialer, err := proxy.FromURL(proxyURL, proxy.Direct) + if err != nil { + return fmt.Errorf("failed to create proxy dialer: %w", err) + } + httpClient := &http.Client{ + Transport: &http.Transport{ + Dial: dialer.Dial, + }, + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return nil + }, + Timeout: appctl.RPCTimeout, + } + + beginTime := time.Now() + resp, err := httpClient.Get("https://google.com/generate_204") + if err != nil { + return err + } + endTime := time.Now() + d := endTime.Sub(beginTime).Round(time.Millisecond) + defer resp.Body.Close() + io.ReadAll(resp.Body) + + if resp.StatusCode != 204 { + return fmt.Errorf("received unexpected status code %d after %v", resp.StatusCode, d) + } + log.Infof("Connected to https://google.com after %v", d) + return nil +} + var clientApplyConfigFunc = func(s []string) error { _, err := appctl.LoadClientConfig() if err == stderror.ErrFileNotExist { @@ -642,24 +700,23 @@ var clientExportConfigFunc = func(s []string) error { var clientDeleteProfileFunc = func(s []string) error { _, err := appctl.LoadClientConfig() if err != nil { - return fmt.Errorf(stderror.LoadClientConfigFailedErr, err) + return fmt.Errorf(stderror.GetClientConfigFailedErr, err) } return appctl.DeleteClientConfigProfile(s[3]) } var clientGetMetricsFunc = func(s []string) error { - if err := appctl.IsClientDaemonRunning(context.Background()); err != nil { - log.Infof(stderror.ClientNotRunning) - return nil + ctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) + defer cancelFunc() + client, running, err := newClientLifecycleRPCClient(ctx) + if !running { + return fmt.Errorf(stderror.ClientNotRunning) + } + if err != nil { + return err } - timedctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) - defer cancelFunc() - client, err := appctl.NewClientLifecycleRPCClient(timedctx) - if err != nil { - return fmt.Errorf(stderror.CreateClientLifecycleRPCClientFailedErr, err) - } - metrics, err := client.GetMetrics(timedctx, &appctlpb.Empty{}) + metrics, err := client.GetMetrics(ctx, &appctlpb.Empty{}) if err != nil { return fmt.Errorf(stderror.GetMetricsFailedErr, err) } @@ -668,18 +725,17 @@ var clientGetMetricsFunc = func(s []string) error { } var clientGetConnectionsFunc = func(s []string) error { - if err := appctl.IsClientDaemonRunning(context.Background()); err != nil { - log.Infof(stderror.ClientNotRunning) - return nil + ctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) + defer cancelFunc() + client, running, err := newClientLifecycleRPCClient(ctx) + if !running { + return fmt.Errorf(stderror.ClientNotRunning) + } + if err != nil { + return err } - timedctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) - defer cancelFunc() - client, err := appctl.NewClientLifecycleRPCClient(timedctx) - if err != nil { - return fmt.Errorf(stderror.CreateClientLifecycleRPCClientFailedErr, err) - } - info, err := client.GetSessionInfo(timedctx, &appctlpb.Empty{}) + info, err := client.GetSessionInfo(ctx, &appctlpb.Empty{}) if err != nil { return fmt.Errorf(stderror.GetConnectionsFailedErr, err) } @@ -690,18 +746,17 @@ var clientGetConnectionsFunc = func(s []string) error { } var clientGetThreadDumpFunc = func(s []string) error { - if err := appctl.IsClientDaemonRunning(context.Background()); err != nil { - log.Infof(stderror.ClientNotRunning) - return nil + ctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) + defer cancelFunc() + client, running, err := newClientLifecycleRPCClient(ctx) + if !running { + return fmt.Errorf(stderror.ClientNotRunning) + } + if err != nil { + return err } - timedctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) - defer cancelFunc() - client, err := appctl.NewClientLifecycleRPCClient(timedctx) - if err != nil { - return fmt.Errorf(stderror.CreateClientLifecycleRPCClientFailedErr, err) - } - dump, err := client.GetThreadDump(timedctx, &appctlpb.Empty{}) + dump, err := client.GetThreadDump(ctx, &appctlpb.Empty{}) if err != nil { return fmt.Errorf(stderror.GetThreadDumpFailedErr, err) } @@ -710,18 +765,17 @@ var clientGetThreadDumpFunc = func(s []string) error { } var clientGetHeapProfileFunc = func(s []string) error { - if err := appctl.IsClientDaemonRunning(context.Background()); err != nil { - log.Infof(stderror.ClientNotRunning) - return nil + ctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) + defer cancelFunc() + client, running, err := newClientLifecycleRPCClient(ctx) + if !running { + return fmt.Errorf(stderror.ClientNotRunning) + } + if err != nil { + return err } - timedctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) - defer cancelFunc() - client, err := appctl.NewClientLifecycleRPCClient(timedctx) - if err != nil { - return fmt.Errorf(stderror.CreateClientLifecycleRPCClientFailedErr, err) - } - if _, err := client.GetHeapProfile(timedctx, &appctlpb.ProfileSavePath{FilePath: proto.String(s[3])}); err != nil { + if _, err := client.GetHeapProfile(ctx, &appctlpb.ProfileSavePath{FilePath: proto.String(s[3])}); err != nil { return fmt.Errorf(stderror.GetHeapProfileFailedErr, err) } log.Infof("heap profile is saved to %q", s[3]) @@ -729,18 +783,17 @@ var clientGetHeapProfileFunc = func(s []string) error { } var clientGetMemoryStatisticsFunc = func(s []string) error { - if err := appctl.IsClientDaemonRunning(context.Background()); err != nil { - log.Infof(stderror.ClientNotRunning) - return nil + ctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) + defer cancelFunc() + client, running, err := newClientLifecycleRPCClient(ctx) + if !running { + return fmt.Errorf(stderror.ClientNotRunning) + } + if err != nil { + return err } - timedctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) - defer cancelFunc() - client, err := appctl.NewClientLifecycleRPCClient(timedctx) - if err != nil { - return fmt.Errorf(stderror.CreateClientLifecycleRPCClientFailedErr, err) - } - memStats, err := client.GetMemoryStatistics(timedctx, &appctlpb.Empty{}) + memStats, err := client.GetMemoryStatistics(ctx, &appctlpb.Empty{}) if err != nil { return fmt.Errorf(stderror.GetMemoryStatisticsFailedErr, err) } @@ -749,18 +802,17 @@ var clientGetMemoryStatisticsFunc = func(s []string) error { } var clientStartCPUProfileFunc = func(s []string) error { - if err := appctl.IsClientDaemonRunning(context.Background()); err != nil { - log.Infof(stderror.ClientNotRunning) - return nil + ctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) + defer cancelFunc() + client, running, err := newClientLifecycleRPCClient(ctx) + if !running { + return fmt.Errorf(stderror.ClientNotRunning) + } + if err != nil { + return err } - timedctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) - defer cancelFunc() - client, err := appctl.NewClientLifecycleRPCClient(timedctx) - if err != nil { - return fmt.Errorf(stderror.CreateClientLifecycleRPCClientFailedErr, err) - } - if _, err := client.StartCPUProfile(timedctx, &appctlpb.ProfileSavePath{FilePath: proto.String(s[4])}); err != nil { + if _, err := client.StartCPUProfile(ctx, &appctlpb.ProfileSavePath{FilePath: proto.String(s[4])}); err != nil { return fmt.Errorf(stderror.StartCPUProfileFailedErr, err) } log.Infof("CPU profile will be saved to %q", s[4]) @@ -768,17 +820,30 @@ var clientStartCPUProfileFunc = func(s []string) error { } var clientStopCPUProfileFunc = func(s []string) error { - if err := appctl.IsClientDaemonRunning(context.Background()); err != nil { - log.Infof(stderror.ClientNotRunning) - return nil + ctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) + defer cancelFunc() + client, running, err := newClientLifecycleRPCClient(ctx) + if !running { + return fmt.Errorf(stderror.ClientNotRunning) + } + if err != nil { + return err } - timedctx, cancelFunc := context.WithTimeout(context.Background(), appctl.RPCTimeout) - defer cancelFunc() - client, err := appctl.NewClientLifecycleRPCClient(timedctx) - if err != nil { - return fmt.Errorf(stderror.CreateClientLifecycleRPCClientFailedErr, err) - } - client.StopCPUProfile(timedctx, &appctlpb.Empty{}) + client.StopCPUProfile(ctx, &appctlpb.Empty{}) return nil } + +// newClientLifecycleRPCClient returns a new client lifecycle RPC client. +// No RPC client is returned if mieru is not running. +func newClientLifecycleRPCClient(ctx context.Context) (client appctlpb.ClientLifecycleServiceClient, running bool, err error) { + if err := appctl.IsClientDaemonRunning(ctx); err != nil { + return nil, false, nil + } + running = true + client, err = appctl.NewClientLifecycleRPCClient(ctx) + if err != nil { + return nil, true, fmt.Errorf(stderror.CreateClientLifecycleRPCClientFailedErr, err) + } + return +} diff --git a/mieru/pkg/cli/server.go b/mieru/pkg/cli/server.go index 7197cf8208..23630bd209 100644 --- a/mieru/pkg/cli/server.go +++ b/mieru/pkg/cli/server.go @@ -360,7 +360,7 @@ var serverRunFunc = func(s []string) error { if config == nil { config, err = appctl.LoadServerConfig() if err != nil { - return fmt.Errorf(stderror.LoadServerConfigFailedErr, err) + return fmt.Errorf(stderror.GetServerConfigFailedErr, err) } } diff --git a/mieru/pkg/stderror/template.go b/mieru/pkg/stderror/template.go index 2f9521edb2..d78c7dd9a5 100644 --- a/mieru/pkg/stderror/template.go +++ b/mieru/pkg/stderror/template.go @@ -38,8 +38,6 @@ const ( GetThreadDumpFailedErr = "get thread dump failed: %w" InvalidPortBindingsErr = "invalid port bindings: %w" InvalidTransportProtocol = "invalid transport protocol" - LoadClientConfigFailedErr = "load mieru client config failed: %w" - LoadServerConfigFailedErr = "load mieru server config failed: %w" LookupIPFailedErr = "look up IP address failed: %w" ParseIPFailed = "parse IP address failed" ReloadServerFailedErr = "reload mieru server failed: %w" diff --git a/mihomo/adapter/inbound/listen.go b/mihomo/adapter/inbound/listen.go index 18dc1bc242..edbccea70a 100644 --- a/mihomo/adapter/inbound/listen.go +++ b/mihomo/adapter/inbound/listen.go @@ -3,22 +3,10 @@ package inbound import ( "context" "net" - - "github.com/metacubex/tfo-go" ) -var ( - lc = tfo.ListenConfig{ - DisableTFO: true, - } -) - -func SetTfo(open bool) { - lc.DisableTFO = !open -} - func SetMPTCP(open bool) { - setMultiPathTCP(&lc.ListenConfig, open) + setMultiPathTCP(getListenConfig(), open) } func ListenContext(ctx context.Context, network, address string) (net.Listener, error) { diff --git a/mihomo/adapter/inbound/listen_unix.go b/mihomo/adapter/inbound/listen_unix.go new file mode 100644 index 0000000000..bb78adb222 --- /dev/null +++ b/mihomo/adapter/inbound/listen_unix.go @@ -0,0 +1,23 @@ +//go:build unix + +package inbound + +import ( + "net" + + "github.com/metacubex/tfo-go" +) + +var ( + lc = tfo.ListenConfig{ + DisableTFO: true, + } +) + +func SetTfo(open bool) { + lc.DisableTFO = !open +} + +func getListenConfig() *net.ListenConfig { + return &lc.ListenConfig +} diff --git a/mihomo/adapter/inbound/listen_windows.go b/mihomo/adapter/inbound/listen_windows.go new file mode 100644 index 0000000000..a4223e2b58 --- /dev/null +++ b/mihomo/adapter/inbound/listen_windows.go @@ -0,0 +1,15 @@ +package inbound + +import ( + "net" +) + +var ( + lc = net.ListenConfig{} +) + +func SetTfo(open bool) {} + +func getListenConfig() *net.ListenConfig { + return &lc +} diff --git a/mihomo/component/dialer/tfo.go b/mihomo/component/dialer/tfo.go index 76fe94d021..bc32b38a74 100644 --- a/mihomo/component/dialer/tfo.go +++ b/mihomo/component/dialer/tfo.go @@ -5,12 +5,8 @@ import ( "io" "net" "time" - - "github.com/metacubex/tfo-go" ) -var DisableTFO = false - type tfoConn struct { net.Conn closed bool @@ -124,16 +120,3 @@ func (c *tfoConn) ReaderReplaceable() bool { func (c *tfoConn) WriterReplaceable() bool { return c.Conn != nil } - -func dialTFO(ctx context.Context, netDialer net.Dialer, network, address string) (net.Conn, error) { - ctx, cancel := context.WithTimeout(context.Background(), DefaultTCPTimeout) - dialer := tfo.Dialer{Dialer: netDialer, DisableTFO: false} - return &tfoConn{ - dialed: make(chan bool, 1), - cancel: cancel, - ctx: ctx, - dialFn: func(ctx context.Context, earlyData []byte) (net.Conn, error) { - return dialer.DialContext(ctx, network, address, earlyData) - }, - }, nil -} diff --git a/mihomo/component/dialer/tfo_unix.go b/mihomo/component/dialer/tfo_unix.go new file mode 100644 index 0000000000..b8908849e8 --- /dev/null +++ b/mihomo/component/dialer/tfo_unix.go @@ -0,0 +1,25 @@ +//go:build unix + +package dialer + +import ( + "context" + "net" + + "github.com/metacubex/tfo-go" +) + +const DisableTFO = false + +func dialTFO(ctx context.Context, netDialer net.Dialer, network, address string) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), DefaultTCPTimeout) + dialer := tfo.Dialer{Dialer: netDialer, DisableTFO: false} + return &tfoConn{ + dialed: make(chan bool, 1), + cancel: cancel, + ctx: ctx, + dialFn: func(ctx context.Context, earlyData []byte) (net.Conn, error) { + return dialer.DialContext(ctx, network, address, earlyData) + }, + }, nil +} diff --git a/mihomo/component/dialer/tfo_windows.go b/mihomo/component/dialer/tfo_windows.go index 632661186c..f1dddcf44e 100644 --- a/mihomo/component/dialer/tfo_windows.go +++ b/mihomo/component/dialer/tfo_windows.go @@ -1,11 +1,12 @@ package dialer -import "github.com/metacubex/mihomo/constant/features" +import ( + "context" + "net" +) -func init() { - // According to MSDN, this option is available since Windows 10, 1607 - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms738596(v=vs.85).aspx - if features.WindowsMajorVersion < 10 || (features.WindowsMajorVersion == 10 && features.WindowsBuildNumber < 14393) { - DisableTFO = true - } +const DisableTFO = true + +func dialTFO(ctx context.Context, netDialer net.Dialer, network, address string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, address) } diff --git a/openwrt-packages/luci-app-unblockneteasemusic/root/etc/init.d/unblockneteasemusic b/openwrt-packages/luci-app-unblockneteasemusic/root/etc/init.d/unblockneteasemusic index fa653a0bc1..8c8c51113e 100755 --- a/openwrt-packages/luci-app-unblockneteasemusic/root/etc/init.d/unblockneteasemusic +++ b/openwrt-packages/luci-app-unblockneteasemusic/root/etc/init.d/unblockneteasemusic @@ -76,7 +76,6 @@ start_service() { local update_time config_get update_time "config" "update_time" "3" sed -i "/$NAME/d" /etc/crontabs/root - echo -e "30 2 * * * /etc/init.d/unblockneteasemusic" >> "/etc/crontabs/root" ! is_enabled "config" "auto_update" || echo -e "0 ${update_time} * * * $UNM_DIR/update.sh update_core" >> "/etc/crontabs/root" /etc/init.d/cron restart diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/acl.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/acl.lua index c303efe35f..6b4319ef93 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/acl.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/acl.lua @@ -3,7 +3,6 @@ local appname = "passwall" local sys = api.sys m = Map(appname) -api.set_apply_on_parse(m) s = m:section(TypedSection, "global", translate("ACLs"), "" .. translate("ACLs is a tools which used to designate specific IP proxy mode.") .. "") s.anonymous = true diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/acl_config.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/acl_config.lua index 22a76c6602..6b10651fe1 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/acl_config.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/acl_config.lua @@ -13,7 +13,6 @@ local port_validate = function(self, value, t) end m = Map(appname) -api.set_apply_on_parse(m) local nodes_table = {} for k, e in ipairs(api.get_valid_nodes()) do diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/app_update.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/app_update.lua index 8a59a43106..9d47562106 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/app_update.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/app_update.lua @@ -2,7 +2,6 @@ local api = require "luci.passwall.api" local appname = "passwall" m = Map(appname) -api.set_apply_on_parse(m) -- [[ App Settings ]]-- s = m:section(TypedSection, "global_app", translate("App Update"), diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/global.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/global.lua index d2b8593b67..89378eec45 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/global.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/global.lua @@ -9,7 +9,6 @@ local has_chnlist = api.fs.access("/usr/share/passwall/rules/chnlist") local has_chnroute = api.fs.access("/usr/share/passwall/rules/chnroute") m = Map(appname) -api.set_apply_on_parse(m) local nodes_table = {} for k, e in ipairs(api.get_valid_nodes()) do diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/haproxy.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/haproxy.lua index 563df4d0a6..4f3bd9a962 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/haproxy.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/haproxy.lua @@ -16,7 +16,6 @@ for k, e in ipairs(api.get_valid_nodes()) do end m = Map(appname) -api.set_apply_on_parse(m) -- [[ Haproxy Settings ]]-- s = m:section(TypedSection, "global_haproxy") diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_config.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_config.lua index 4f1ff1a17b..98504b8b2a 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_config.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_config.lua @@ -10,7 +10,6 @@ end m = Map(appname, translate("Node Config")) m.redirect = api.url() -api.set_apply_on_parse(m) s = m:section(NamedSection, arg[1], "nodes", "") s.addremove = false diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_list.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_list.lua index 041814f7da..b37587f5ef 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_list.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_list.lua @@ -4,7 +4,6 @@ local sys = api.sys local datatypes = api.datatypes m = Map(appname) -api.set_apply_on_parse(m) -- [[ Other Settings ]]-- s = m:section(TypedSection, "global_other") diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe.lua index 9682502650..c8688e224f 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe.lua @@ -44,7 +44,6 @@ if has_hysteria2 then end m = Map(appname) -api.set_apply_on_parse(m) -- [[ Subscribe Settings ]]-- s = m:section(TypedSection, "global_subscribe", "") diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe_config.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe_config.lua index 4d07ef468e..d4d8c2c48e 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe_config.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe_config.lua @@ -45,7 +45,6 @@ end m = Map(appname) m.redirect = api.url("node_subscribe") -api.set_apply_on_parse(m) s = m:section(NamedSection, arg[1]) s.addremove = false diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/other.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/other.lua index 72997c3442..04b70f30fe 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/other.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/other.lua @@ -11,7 +11,6 @@ local port_validate = function(self, value, t) end m = Map(appname) -api.set_apply_on_parse(m) -- [[ Delay Settings ]]-- s = m:section(TypedSection, "global_delay", translate("Delay Settings")) diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/rule.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/rule.lua index 8e6947d646..505f6890bb 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/rule.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/rule.lua @@ -4,8 +4,6 @@ local has_xray = api.finded_com("xray") local has_singbox = api.finded_com("singbox") m = Map(appname) -api.set_apply_on_parse(m) - -- [[ Rule Settings ]]-- s = m:section(TypedSection, "global_rules", translate("Rule status")) s.anonymous = true diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/rule_list.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/rule_list.lua index f6570bf797..92a9178d1f 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/rule_list.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/rule_list.lua @@ -9,7 +9,6 @@ local chnlist_path = "/usr/share/passwall/rules/chnlist" local chnroute_path = "/usr/share/passwall/rules/chnroute" m = Map(appname) -api.set_apply_on_parse(m) -- [[ Rule List Settings ]]-- s = m:section(TypedSection, "global_rules") diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/shunt_rules.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/shunt_rules.lua index 8e516384de..ba7735e6eb 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/shunt_rules.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/shunt_rules.lua @@ -4,7 +4,6 @@ local datatypes = api.datatypes m = Map(appname, "Sing-Box/Xray " .. translate("Shunt Rule")) m.redirect = api.url() -api.set_apply_on_parse(m) s = m:section(NamedSection, arg[1], "shunt_rules", "") s.addremove = false diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/socks_config.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/socks_config.lua index b390eac2a3..62d7495e31 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/socks_config.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/socks_config.lua @@ -5,7 +5,6 @@ local has_singbox = api.finded_com("singbox") local has_xray = api.finded_com("xray") m = Map(appname) -api.set_apply_on_parse(m) local nodes_table = {} for k, e in ipairs(api.get_valid_nodes()) do diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/server/index.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/server/index.lua index 5491e42497..d18b754442 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/server/index.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/server/index.lua @@ -1,7 +1,6 @@ local api = require "luci.passwall.api" m = Map("passwall_server", translate("Server-Side")) -api.set_apply_on_parse(m) t = m:section(NamedSection, "global", "global") t.anonymous = true diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/server/user.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/server/user.lua index 7c462591bf..b4371db336 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/server/user.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/server/user.lua @@ -4,7 +4,6 @@ local types_dir = "/usr/lib/lua/luci/model/cbi/passwall/server/type/" m = Map("passwall_server", translate("Server Config")) m.redirect = api.url("server") -api.set_apply_on_parse(m) s = m:section(NamedSection, arg[1], "user", "") s.addremove = false diff --git a/openwrt-passwall/luci-app-passwall/luasrc/passwall/api.lua b/openwrt-passwall/luci-app-passwall/luasrc/passwall/api.lua index 0a3f8e1cd6..d0b2f857a5 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/passwall/api.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/passwall/api.lua @@ -999,22 +999,6 @@ function to_check_self() } end -function is_js_luci() - return sys.call('[ -f "/www/luci-static/resources/uci.js" ]') == 0 -end - -function set_apply_on_parse(map) - if is_js_luci() == true then - map.apply_on_parse = false - map.on_after_apply = function(self) - if self.redirect then - os.execute("sleep 1") - luci.http.redirect(self.redirect) - end - end - end -end - function luci_types(id, m, s, type_name, option_prefix) local rewrite_option_table = {} for key, value in pairs(s.fields) do diff --git a/sing-box/docs/changelog.md b/sing-box/docs/changelog.md index 0add2ca7af..1fc656b522 100644 --- a/sing-box/docs/changelog.md +++ b/sing-box/docs/changelog.md @@ -2,6 +2,14 @@ icon: material/alert-decagram --- +#### 1.10.0-alpha.3 + +* Fix auto redirect **1** + +**1**: + +Tun inbound with `auto_route` and `auto_redirect` now works as expected on routers without intervention. + #### 1.10.0-alpha.2 * Move auto redirect to Tun **1** diff --git a/sing-box/docs/configuration/inbound/tun.md b/sing-box/docs/configuration/inbound/tun.md index cd5f735ffe..4e475cc448 100644 --- a/sing-box/docs/configuration/inbound/tun.md +++ b/sing-box/docs/configuration/inbound/tun.md @@ -88,8 +88,8 @@ icon: material/new-box "match_domain": [] } }, - - ... // Listen Fields + ... + // Listen Fields } ``` @@ -150,7 +150,7 @@ Enforce strict routing rules when `auto_route` is enabled: *In Linux*: * Let unsupported network unreachable -* Route all connections to tun +* Make ICMP traffic route to tun instead of upstream interfaces It prevents address leaks and makes DNS hijacking work on Android. diff --git a/sing-box/docs/configuration/inbound/tun.zh.md b/sing-box/docs/configuration/inbound/tun.zh.md index dd5a457b8c..0a916f20bd 100644 --- a/sing-box/docs/configuration/inbound/tun.zh.md +++ b/sing-box/docs/configuration/inbound/tun.zh.md @@ -150,7 +150,7 @@ tun 接口的 IPv6 前缀。 *在 Linux 中*: * 让不支持的网络无法到达 -* 将所有连接路由到 tun +* 使 ICMP 流量路由到 tun 而不是上游接口 它可以防止地址泄漏,并使 DNS 劫持在 Android 上工作。 diff --git a/sing-box/go.mod b/sing-box/go.mod index 56eba8b241..1ba6601ad3 100644 --- a/sing-box/go.mod +++ b/sing-box/go.mod @@ -33,7 +33,7 @@ require ( github.com/sagernet/sing-shadowsocks v0.2.6 github.com/sagernet/sing-shadowsocks2 v0.2.0 github.com/sagernet/sing-shadowtls v0.1.4 - github.com/sagernet/sing-tun v0.3.0-beta.6 + github.com/sagernet/sing-tun v0.4.0-beta.2 github.com/sagernet/sing-vmess v0.1.8 github.com/sagernet/smux v0.0.0-20231208180855-7041f6ea79e7 github.com/sagernet/tfo-go v0.0.0-20231209031829-7b5343ac1dc6 @@ -45,6 +45,7 @@ require ( go.uber.org/zap v1.27.0 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba golang.org/x/crypto v0.23.0 + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 golang.org/x/net v0.25.0 golang.org/x/sys v0.20.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 @@ -84,7 +85,6 @@ require ( github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect github.com/zeebo/blake3 v0.2.3 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/text v0.15.0 // indirect diff --git a/sing-box/go.sum b/sing-box/go.sum index 7e401733d5..46acbc87fd 100644 --- a/sing-box/go.sum +++ b/sing-box/go.sum @@ -120,8 +120,8 @@ github.com/sagernet/sing-shadowsocks2 v0.2.0 h1:wpZNs6wKnR7mh1wV9OHwOyUr21VkS3wK github.com/sagernet/sing-shadowsocks2 v0.2.0/go.mod h1:RnXS0lExcDAovvDeniJ4IKa2IuChrdipolPYWBv9hWQ= github.com/sagernet/sing-shadowtls v0.1.4 h1:aTgBSJEgnumzFenPvc+kbD9/W0PywzWevnVpEx6Tw3k= github.com/sagernet/sing-shadowtls v0.1.4/go.mod h1:F8NBgsY5YN2beQavdgdm1DPlhaKQlaL6lpDdcBglGK4= -github.com/sagernet/sing-tun v0.3.0-beta.6 h1:L11kMrM7UfUW0pzQiU66Fffh4o86KZc1SFGbkYi8Ma8= -github.com/sagernet/sing-tun v0.3.0-beta.6/go.mod h1:DxLIyhjWU/HwGYoX0vNGg2c5QgTQIakphU1MuERR5tQ= +github.com/sagernet/sing-tun v0.4.0-beta.2 h1:Czf5w73shqnbbLFDUyrFkGnm5B2EuYZB5D8bMP2d0lk= +github.com/sagernet/sing-tun v0.4.0-beta.2/go.mod h1:DxLIyhjWU/HwGYoX0vNGg2c5QgTQIakphU1MuERR5tQ= github.com/sagernet/sing-vmess v0.1.8 h1:XVWad1RpTy9b5tPxdm5MCU8cGfrTGdR8qCq6HV2aCNc= github.com/sagernet/sing-vmess v0.1.8/go.mod h1:vhx32UNzTDUkNwOyIjcZQohre1CaytquC5mPplId8uA= github.com/sagernet/smux v0.0.0-20231208180855-7041f6ea79e7 h1:DImB4lELfQhplLTxeq2z31Fpv8CQqqrUwTbrIRumZqQ= diff --git a/sing-box/inbound/tun_auto_redirect.go b/sing-box/inbound/tun_auto_redirect.go index ac071271c6..ed75d1abf8 100644 --- a/sing-box/inbound/tun_auto_redirect.go +++ b/sing-box/inbound/tun_auto_redirect.go @@ -13,10 +13,15 @@ import ( C "github.com/sagernet/sing-box/constant" "github.com/sagernet/sing-box/option" "github.com/sagernet/sing-tun" + "github.com/sagernet/sing/common" + "github.com/sagernet/sing/common/control" E "github.com/sagernet/sing/common/exceptions" F "github.com/sagernet/sing/common/format" M "github.com/sagernet/sing/common/metadata" N "github.com/sagernet/sing/common/network" + "github.com/sagernet/sing/common/x/list" + + "golang.org/x/exp/slices" ) const ( @@ -27,12 +32,18 @@ const ( type tunAutoRedirect struct { myInboundAdapter - tunOptions *tun.Options - iptablesPath string - androidSu bool - suPath string - enableIPv6 bool - ip6tablesPath string + tunOptions *tun.Options + interfaceFinder control.InterfaceFinder + networkMonitor tun.NetworkUpdateMonitor + networkCallback *list.Element[tun.NetworkUpdateCallback] + enableIPv4 bool + enableIPv6 bool + localAddresses4 []netip.Prefix + localAddresses6 []netip.Prefix + iptablesPath string + ip6tablesPath string + androidSu bool + suPath string } func newAutoRedirect(t *Tun) (*tunAutoRedirect, error) { @@ -47,39 +58,41 @@ func newAutoRedirect(t *Tun) (*tunAutoRedirect, error) { router: t.router, logger: t.logger, tag: t.tag, - listenOptions: option.ListenOptions{ - Listen: option.NewListenAddress(netip.AddrFrom4([4]byte{127, 0, 0, 1})), - }, }, - tunOptions: &t.tunOptions, + tunOptions: &t.tunOptions, + interfaceFinder: t.router.InterfaceFinder(), + networkMonitor: t.router.NetworkMonitor(), } server.connHandler = server - if C.IsAndroid { - server.iptablesPath = "/system/bin/iptables" - userId := os.Getuid() - if userId != 0 { - var ( - suPath string - err error - ) - if t.platformInterface != nil { - suPath, err = exec.LookPath("/bin/su") - } else { - suPath, err = exec.LookPath("su") + if len(t.tunOptions.Inet4Address) > 0 { + server.enableIPv4 = true + if C.IsAndroid { + server.iptablesPath = "/system/bin/iptables" + userId := os.Getuid() + if userId != 0 { + var ( + suPath string + err error + ) + if t.platformInterface != nil { + suPath, err = exec.LookPath("/bin/su") + } else { + suPath, err = exec.LookPath("su") + } + if err == nil { + server.androidSu = true + server.suPath = suPath + } else { + return nil, E.Extend(E.Cause(err, "root permission is required for auto redirect"), os.Getenv("PATH")) + } } - if err == nil { - server.androidSu = true - server.suPath = suPath - } else { - return nil, E.Extend(E.Cause(err, "root permission is required for auto redirect"), os.Getenv("PATH")) + } else { + iptablesPath, err := exec.LookPath("iptables") + if err != nil { + return nil, E.Cause(err, "iptables is required") } + server.iptablesPath = iptablesPath } - } else { - iptablesPath, err := exec.LookPath("iptables") - if err != nil { - return nil, E.Cause(err, "iptables is required") - } - server.iptablesPath = iptablesPath } if !C.IsAndroid && len(t.tunOptions.Inet6Address) > 0 { err := server.initializeIP6Tables() @@ -87,6 +100,15 @@ func newAutoRedirect(t *Tun) (*tunAutoRedirect, error) { t.logger.Debug("device has no ip6tables nat support: ", err) } } + var listenAddr netip.Addr + if C.IsAndroid { + listenAddr = netip.AddrFrom4([4]byte{127, 0, 0, 1}) + } else if server.enableIPv6 { + listenAddr = netip.IPv6Unspecified() + } else { + listenAddr = netip.IPv4Unspecified() + } + server.listenOptions.Listen = option.NewListenAddress(listenAddr) return server, nil } @@ -95,7 +117,7 @@ func (t *tunAutoRedirect) initializeIP6Tables() error { if err != nil { return err } - output, err := exec.Command(ip6tablesPath, "-t nat -L", tableNameOutput).CombinedOutput() + /*output, err := exec.Command(ip6tablesPath, "-t nat -L", tableNameOutput).CombinedOutput() switch exitErr := err.(type) { case nil: case *exec.ExitError: @@ -104,7 +126,7 @@ func (t *tunAutoRedirect) initializeIP6Tables() error { } default: return err - } + }*/ t.ip6tablesPath = ip6tablesPath t.enableIPv6 = true return nil @@ -115,25 +137,79 @@ func (t *tunAutoRedirect) Start(tunName string) error { if err != nil { return E.Cause(err, "start redirect server") } - t.cleanupIPTables(t.iptablesPath) + if t.enableIPv4 { + t.cleanupIPTables(t.iptablesPath) + } if t.enableIPv6 { t.cleanupIPTables(t.ip6tablesPath) } - err = t.setupIPTables(t.iptablesPath, tunName) + err = t.updateInterfaces(false) if err != nil { return err } + if t.enableIPv4 { + err = t.setupIPTables(t.iptablesPath, tunName) + if err != nil { + return err + } + } if t.enableIPv6 { err = t.setupIPTables(t.ip6tablesPath, tunName) if err != nil { return err } } + t.networkCallback = t.networkMonitor.RegisterCallback(func() { + rErr := t.updateInterfaces(true) + if rErr != nil { + t.logger.Error("recreate prerouting rules: ", rErr) + } + }) + return nil +} + +func (t *tunAutoRedirect) updateInterfaces(recreate bool) error { + addresses := common.Filter(common.FlatMap(common.Filter(t.interfaceFinder.Interfaces(), func(it control.Interface) bool { + return it.Name != t.tunOptions.Name + }), func(it control.Interface) []netip.Prefix { + return it.Addresses + }), func(it netip.Prefix) bool { + address := it.Addr() + return !(address.IsLoopback() || address.IsLinkLocalUnicast()) + }) + oldLocalAddresses4 := t.localAddresses4 + oldLocalAddresses6 := t.localAddresses6 + localAddresses4 := common.Filter(addresses, func(it netip.Prefix) bool { return it.Addr().Is4() }) + localAddresses6 := common.Filter(addresses, func(it netip.Prefix) bool { return it.Addr().Is6() }) + t.localAddresses4 = localAddresses4 + t.localAddresses6 = localAddresses6 + if !recreate || t.androidSu { + return nil + } + if t.enableIPv4 { + if !slices.Equal(localAddresses4, oldLocalAddresses4) { + err := t.setupIPTablesPreRouting(t.iptablesPath, true) + if err != nil { + return err + } + } + } + if t.enableIPv6 { + if !slices.Equal(localAddresses6, oldLocalAddresses6) { + err := t.setupIPTablesPreRouting(t.ip6tablesPath, true) + if err != nil { + return err + } + } + } return nil } func (t *tunAutoRedirect) Close() error { - t.cleanupIPTables(t.iptablesPath) + t.networkMonitor.UnregisterCallback(t.networkCallback) + if t.enableIPv4 { + t.cleanupIPTables(t.iptablesPath) + } if t.enableIPv6 { t.cleanupIPTables(t.ip6tablesPath) } @@ -186,7 +262,7 @@ func (t *tunAutoRedirect) setupIPTables(iptablesPath string, tunName string) err return err } // PREROUTING - err = t.setupIPTablesPreRouting(iptablesPath) + err = t.setupIPTablesPreRouting(iptablesPath, false) if err != nil { return err } @@ -194,8 +270,13 @@ func (t *tunAutoRedirect) setupIPTables(iptablesPath string, tunName string) err return nil } -func (t *tunAutoRedirect) setupIPTablesPreRouting(iptablesPath string) error { - err := t.runShell(iptablesPath, "-t nat -N", tableNamePreRouteing) +func (t *tunAutoRedirect) setupIPTablesPreRouting(iptablesPath string, recreate bool) error { + var err error + if !recreate { + err = t.runShell(iptablesPath, "-t nat -N", tableNamePreRouteing) + } else { + err = t.runShell(iptablesPath, "-t nat -F", tableNamePreRouteing) + } if err != nil { return err } @@ -225,7 +306,7 @@ func (t *tunAutoRedirect) setupIPTablesPreRouting(iptablesPath string) error { if len(t.tunOptions.ExcludeInterface) > 0 { for _, name := range t.tunOptions.ExcludeInterface { err = t.runShell(iptablesPath, "-t nat -A", tableNamePreRouteing, - "-o", name, "-j RETURN") + "-i", name, "-j RETURN") if err != nil { return err } @@ -240,15 +321,16 @@ func (t *tunAutoRedirect) setupIPTablesPreRouting(iptablesPath string) error { } } } - for _, netIf := range t.router.(adapter.Router).InterfaceFinder().Interfaces() { - for _, addr := range netIf.Addresses { - if (t.iptablesPath == iptablesPath) != addr.Addr().Is4() { - continue - } - err = t.runShell(iptablesPath, "-t nat -A", tableNamePreRouteing, "-d", addr.String(), "-j RETURN") - if err != nil { - return err - } + var addresses []netip.Prefix + if t.iptablesPath == iptablesPath { + addresses = t.localAddresses4 + } else { + addresses = t.localAddresses6 + } + for _, address := range addresses { + err = t.runShell(iptablesPath, "-t nat -A", tableNamePreRouteing, "-d", address.String(), "-j RETURN") + if err != nil { + return err } } if len(routeAddress) > 0 { @@ -262,7 +344,7 @@ func (t *tunAutoRedirect) setupIPTablesPreRouting(iptablesPath string) error { } else if len(t.tunOptions.IncludeInterface) > 0 || len(t.tunOptions.IncludeUID) > 0 { for _, name := range t.tunOptions.IncludeInterface { err = t.runShell(iptablesPath, "-t nat -A", tableNamePreRouteing, - "-o", name, "-p tcp -j REDIRECT --to-ports", M.AddrPortFromNet(t.tcpListener.Addr()).Port()) + "-i", name, "-p tcp -j REDIRECT --to-ports", M.AddrPortFromNet(t.tcpListener.Addr()).Port()) if err != nil { return err } diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/acl.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/acl.lua index c303efe35f..6b4319ef93 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/acl.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/acl.lua @@ -3,7 +3,6 @@ local appname = "passwall" local sys = api.sys m = Map(appname) -api.set_apply_on_parse(m) s = m:section(TypedSection, "global", translate("ACLs"), "" .. translate("ACLs is a tools which used to designate specific IP proxy mode.") .. "") s.anonymous = true diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/acl_config.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/acl_config.lua index 22a76c6602..6b10651fe1 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/acl_config.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/acl_config.lua @@ -13,7 +13,6 @@ local port_validate = function(self, value, t) end m = Map(appname) -api.set_apply_on_parse(m) local nodes_table = {} for k, e in ipairs(api.get_valid_nodes()) do diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/app_update.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/app_update.lua index 8a59a43106..9d47562106 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/app_update.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/app_update.lua @@ -2,7 +2,6 @@ local api = require "luci.passwall.api" local appname = "passwall" m = Map(appname) -api.set_apply_on_parse(m) -- [[ App Settings ]]-- s = m:section(TypedSection, "global_app", translate("App Update"), diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/global.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/global.lua index d2b8593b67..89378eec45 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/global.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/global.lua @@ -9,7 +9,6 @@ local has_chnlist = api.fs.access("/usr/share/passwall/rules/chnlist") local has_chnroute = api.fs.access("/usr/share/passwall/rules/chnroute") m = Map(appname) -api.set_apply_on_parse(m) local nodes_table = {} for k, e in ipairs(api.get_valid_nodes()) do diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/haproxy.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/haproxy.lua index 563df4d0a6..4f3bd9a962 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/haproxy.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/haproxy.lua @@ -16,7 +16,6 @@ for k, e in ipairs(api.get_valid_nodes()) do end m = Map(appname) -api.set_apply_on_parse(m) -- [[ Haproxy Settings ]]-- s = m:section(TypedSection, "global_haproxy") diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_config.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_config.lua index 4f1ff1a17b..98504b8b2a 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_config.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_config.lua @@ -10,7 +10,6 @@ end m = Map(appname, translate("Node Config")) m.redirect = api.url() -api.set_apply_on_parse(m) s = m:section(NamedSection, arg[1], "nodes", "") s.addremove = false diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_list.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_list.lua index 041814f7da..b37587f5ef 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_list.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_list.lua @@ -4,7 +4,6 @@ local sys = api.sys local datatypes = api.datatypes m = Map(appname) -api.set_apply_on_parse(m) -- [[ Other Settings ]]-- s = m:section(TypedSection, "global_other") diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe.lua index 9682502650..c8688e224f 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe.lua @@ -44,7 +44,6 @@ if has_hysteria2 then end m = Map(appname) -api.set_apply_on_parse(m) -- [[ Subscribe Settings ]]-- s = m:section(TypedSection, "global_subscribe", "") diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe_config.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe_config.lua index 4d07ef468e..d4d8c2c48e 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe_config.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/node_subscribe_config.lua @@ -45,7 +45,6 @@ end m = Map(appname) m.redirect = api.url("node_subscribe") -api.set_apply_on_parse(m) s = m:section(NamedSection, arg[1]) s.addremove = false diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/other.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/other.lua index 72997c3442..04b70f30fe 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/other.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/other.lua @@ -11,7 +11,6 @@ local port_validate = function(self, value, t) end m = Map(appname) -api.set_apply_on_parse(m) -- [[ Delay Settings ]]-- s = m:section(TypedSection, "global_delay", translate("Delay Settings")) diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/rule.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/rule.lua index 8e6947d646..505f6890bb 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/rule.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/rule.lua @@ -4,8 +4,6 @@ local has_xray = api.finded_com("xray") local has_singbox = api.finded_com("singbox") m = Map(appname) -api.set_apply_on_parse(m) - -- [[ Rule Settings ]]-- s = m:section(TypedSection, "global_rules", translate("Rule status")) s.anonymous = true diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/rule_list.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/rule_list.lua index f6570bf797..92a9178d1f 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/rule_list.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/rule_list.lua @@ -9,7 +9,6 @@ local chnlist_path = "/usr/share/passwall/rules/chnlist" local chnroute_path = "/usr/share/passwall/rules/chnroute" m = Map(appname) -api.set_apply_on_parse(m) -- [[ Rule List Settings ]]-- s = m:section(TypedSection, "global_rules") diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/shunt_rules.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/shunt_rules.lua index 8e516384de..ba7735e6eb 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/shunt_rules.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/shunt_rules.lua @@ -4,7 +4,6 @@ local datatypes = api.datatypes m = Map(appname, "Sing-Box/Xray " .. translate("Shunt Rule")) m.redirect = api.url() -api.set_apply_on_parse(m) s = m:section(NamedSection, arg[1], "shunt_rules", "") s.addremove = false diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/socks_config.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/socks_config.lua index b390eac2a3..62d7495e31 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/socks_config.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/socks_config.lua @@ -5,7 +5,6 @@ local has_singbox = api.finded_com("singbox") local has_xray = api.finded_com("xray") m = Map(appname) -api.set_apply_on_parse(m) local nodes_table = {} for k, e in ipairs(api.get_valid_nodes()) do diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/server/index.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/server/index.lua index 5491e42497..d18b754442 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/server/index.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/server/index.lua @@ -1,7 +1,6 @@ local api = require "luci.passwall.api" m = Map("passwall_server", translate("Server-Side")) -api.set_apply_on_parse(m) t = m:section(NamedSection, "global", "global") t.anonymous = true diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/server/user.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/server/user.lua index 7c462591bf..b4371db336 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/server/user.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/server/user.lua @@ -4,7 +4,6 @@ local types_dir = "/usr/lib/lua/luci/model/cbi/passwall/server/type/" m = Map("passwall_server", translate("Server Config")) m.redirect = api.url("server") -api.set_apply_on_parse(m) s = m:section(NamedSection, arg[1], "user", "") s.addremove = false diff --git a/small/luci-app-passwall/luasrc/passwall/api.lua b/small/luci-app-passwall/luasrc/passwall/api.lua index 0a3f8e1cd6..d0b2f857a5 100644 --- a/small/luci-app-passwall/luasrc/passwall/api.lua +++ b/small/luci-app-passwall/luasrc/passwall/api.lua @@ -999,22 +999,6 @@ function to_check_self() } end -function is_js_luci() - return sys.call('[ -f "/www/luci-static/resources/uci.js" ]') == 0 -end - -function set_apply_on_parse(map) - if is_js_luci() == true then - map.apply_on_parse = false - map.on_after_apply = function(self) - if self.redirect then - os.execute("sleep 1") - luci.http.redirect(self.redirect) - end - end - end -end - function luci_types(id, m, s, type_name, option_prefix) local rewrite_option_table = {} for key, value in pairs(s.fields) do diff --git a/small/redsocks2/Makefile b/small/redsocks2/Makefile index f68f88ed65..88b8a752f2 100644 --- a/small/redsocks2/Makefile +++ b/small/redsocks2/Makefile @@ -10,9 +10,9 @@ PKG_RELEASE:=1 PKG_SOURCE_PROTO:=git PKG_SOURCE_URL:=https://github.com/semigodking/redsocks.git -PKG_SOURCE_DATE:=2024-01-27 -PKG_SOURCE_VERSION:=92dbff008a54540159bbb4c0ff19ccf224155d76 -PKG_MIRROR_HASH:=6c45324e824fd261eb919592207b368c8a2668c01ef882bd348868362ea80f44 +PKG_SOURCE_DATE:=2024-05-28 +PKG_SOURCE_VERSION:=c8e1e6c4c1d623b2e540528ac9efd06dde952006 +PKG_MIRROR_HASH:=b1e64e3af162ed91976eec9fa07ccd569ee8369f896bb2a19b8507eb01f4f769 PKG_MAINTAINER:=semigodking PKG_LICENSE:=Apache-2.0 diff --git a/small/v2ray-geodata/Makefile b/small/v2ray-geodata/Makefile index b2b762b055..715e21f107 100644 --- a/small/v2ray-geodata/Makefile +++ b/small/v2ray-geodata/Makefile @@ -12,13 +12,13 @@ PKG_MAINTAINER:=Tianling Shen include $(INCLUDE_DIR)/package.mk -GEOIP_VER:=202405230041 +GEOIP_VER:=202405300042 GEOIP_FILE:=geoip.dat.$(GEOIP_VER) define Download/geoip URL:=https://github.com/v2fly/geoip/releases/download/$(GEOIP_VER)/ URL_FILE:=geoip.dat FILE:=$(GEOIP_FILE) - HASH:=0401b0a1b82ad0d01c119f311d7ae0e0bae4d928f287251df2a98281d173f3d7 + HASH:=ee22e254e7cb9a2e45d8851a70022662c15c739604c379029ae8f6a19a3ccc4f endef GEOSITE_VER:=20240508170917 diff --git a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/service/SubscriptionUpdater.kt b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/service/SubscriptionUpdater.kt index 6b836c1f83..9cd6e1aeaa 100644 --- a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/service/SubscriptionUpdater.kt +++ b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/service/SubscriptionUpdater.kt @@ -67,7 +67,7 @@ object SubscriptionUpdater { } fun importBatchConfig(server: String?, subid: String = "") { - val append = subid.isEmpty() + val append = false val count = AngConfigManager.importBatchConfig(server, subid, append) if (count <= 0) { diff --git a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/ui/MainActivity.kt b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/ui/MainActivity.kt index 072f2b66c8..e0668a3c1f 100644 --- a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/ui/MainActivity.kt +++ b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/ui/MainActivity.kt @@ -31,6 +31,7 @@ import com.v2ray.ang.AppConfig import com.v2ray.ang.AppConfig.ANG_PACKAGE import com.v2ray.ang.R import com.v2ray.ang.databinding.ActivityMainBinding +import com.v2ray.ang.databinding.LayoutProgressBinding import com.v2ray.ang.dto.EConfigType import com.v2ray.ang.extension.toast import com.v2ray.ang.helper.SimpleItemTouchHelperCallback @@ -39,7 +40,9 @@ import com.v2ray.ang.util.AngConfigManager import com.v2ray.ang.util.MmkvManager import com.v2ray.ang.util.Utils import com.v2ray.ang.viewmodel.MainViewModel +import com.v2ray.ang.viewmodel.SubViewModel import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.delay import kotlinx.coroutines.launch import me.drakeet.support.toast.ToastCompat import rx.Observable @@ -61,6 +64,7 @@ class MainActivity : BaseActivity(), NavigationView.OnNavigationItemSelectedList } private var mItemTouchHelper: ItemTouchHelper? = null val mainViewModel: MainViewModel by viewModels() + val subViewModel: SubViewModel by viewModels() override fun onCreate(savedInstanceState: Bundle?) { super.onCreate(savedInstanceState) @@ -281,11 +285,6 @@ class MainActivity : BaseActivity(), NavigationView.OnNavigationItemSelectedList true } -// R.id.sub_setting -> { -// startActivity() -// true -// } - R.id.sub_update -> { importConfigViaSub() true @@ -423,26 +422,24 @@ class MainActivity : BaseActivity(), NavigationView.OnNavigationItemSelectedList return true } - fun importBatchConfig(server: String?, subid: String = "") { - val subid2 = if(subid.isNullOrEmpty()){ - mainViewModel.subscriptionId - }else{ - subid - } - val append = subid.isNullOrEmpty() + fun importBatchConfig(server: String?) { + val dialog = AlertDialog.Builder(this) + .setView(LayoutProgressBinding.inflate(layoutInflater).root) + .setCancelable(false) + .show() - var count = AngConfigManager.importBatchConfig(server, subid2, append) - if (count <= 0) { - count = AngConfigManager.importBatchConfig(Utils.decode(server!!), subid2, append) - } - if (count <= 0) { - count = AngConfigManager.appendCustomConfigServer(server, subid2) - } - if (count > 0) { - toast(R.string.toast_success) - mainViewModel.reloadServerList() - } else { - toast(R.string.toast_failure) + lifecycleScope.launch(Dispatchers.IO) { + val count = subViewModel.importBatchConfig(server, mainViewModel.subscriptionId, true) + delay(500L) + launch(Dispatchers.Main) { + if (count > 0) { + toast(R.string.toast_success) + mainViewModel.reloadServerList() + } else { + toast(R.string.toast_failure) + } + dialog.dismiss() + } } } @@ -520,55 +517,24 @@ class MainActivity : BaseActivity(), NavigationView.OnNavigationItemSelectedList /** * import config from sub */ - fun importConfigViaSub() - : Boolean { - try { - toast(R.string.title_sub_update) - MmkvManager.decodeSubscriptions().forEach { - if (TextUtils.isEmpty(it.first) - || TextUtils.isEmpty(it.second.remarks) - || TextUtils.isEmpty(it.second.url) - ) { - return@forEach - } - if (!it.second.enabled) { - return@forEach - } - val url = Utils.idnToASCII(it.second.url) - if (!Utils.isValidUrl(url)) { - return@forEach - } - Log.d(ANG_PACKAGE, url) - lifecycleScope.launch(Dispatchers.IO) { - var configText = try { - Utils.getUrlContentWithCustomUserAgent(url) - } catch (e: Exception) { - e.printStackTrace() - "" - } - if(configText.isEmpty()) { - configText = try { - val httpPort = Utils.parseInt(settingsStorage?.decodeString(AppConfig.PREF_HTTP_PORT), AppConfig.PORT_HTTP.toInt()) - Utils.getUrlContentWithCustomUserAgent(url, httpPort) - } catch (e: Exception) { - e.printStackTrace() - "" - } - } - if(configText.isEmpty()) { - launch(Dispatchers.Main) { - toast("\"" + it.second.remarks + "\" " + getString(R.string.toast_failure)) - } - return@launch - } - launch(Dispatchers.Main) { - importBatchConfig(configText, it.first) - } + fun importConfigViaSub() : Boolean { + val dialog = AlertDialog.Builder(this) + .setView(LayoutProgressBinding.inflate(layoutInflater).root) + .setCancelable(false) + .show() + + lifecycleScope.launch(Dispatchers.IO) { + val count = subViewModel.updateConfigViaSubAll() + delay(500L) + launch(Dispatchers.Main) { + if (count > 0) { + toast(R.string.toast_success) + mainViewModel.reloadServerList() + } else { + toast(R.string.toast_failure) } + dialog.dismiss() } - } catch (e: Exception) { - e.printStackTrace() - return false } return true } diff --git a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/ui/SubSettingActivity.kt b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/ui/SubSettingActivity.kt index 469af3be1d..18c9ed7732 100644 --- a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/ui/SubSettingActivity.kt +++ b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/ui/SubSettingActivity.kt @@ -1,20 +1,30 @@ package com.v2ray.ang.ui import android.content.Intent -import androidx.recyclerview.widget.LinearLayoutManager +import android.os.Bundle import android.view.Menu import android.view.MenuItem +import androidx.activity.viewModels +import androidx.appcompat.app.AlertDialog +import androidx.lifecycle.lifecycleScope +import androidx.recyclerview.widget.LinearLayoutManager import com.v2ray.ang.R -import android.os.Bundle import com.v2ray.ang.databinding.ActivitySubSettingBinding +import com.v2ray.ang.databinding.LayoutProgressBinding import com.v2ray.ang.dto.SubscriptionItem +import com.v2ray.ang.extension.toast import com.v2ray.ang.util.MmkvManager +import com.v2ray.ang.viewmodel.SubViewModel +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.delay +import kotlinx.coroutines.launch class SubSettingActivity : BaseActivity() { private lateinit var binding: ActivitySubSettingBinding - var subscriptions:List> = listOf() + var subscriptions: List> = listOf() private val adapter by lazy { SubSettingRecyclerAdapter(this) } + val subViewModel: SubViewModel by viewModels() override fun onCreate(savedInstanceState: Bundle?) { super.onCreate(savedInstanceState) @@ -37,9 +47,6 @@ class SubSettingActivity : BaseActivity() { override fun onCreateOptionsMenu(menu: Menu): Boolean { menuInflater.inflate(R.menu.action_sub_setting, menu) - menu.findItem(R.id.del_config)?.isVisible = false - menu.findItem(R.id.save_config)?.isVisible = false - return super.onCreateOptionsMenu(menu) } @@ -48,6 +55,30 @@ class SubSettingActivity : BaseActivity() { startActivity(Intent(this, SubEditActivity::class.java)) true } + + R.id.sub_update -> { + val dialog = AlertDialog.Builder(this) + .setView(LayoutProgressBinding.inflate(layoutInflater).root) + .setCancelable(false) + .show() + + lifecycleScope.launch(Dispatchers.IO) { + val count = subViewModel.updateConfigViaSubAll() + delay(500L) + launch(Dispatchers.Main) { + if (count > 0) { + toast(R.string.toast_success) + } else { + toast(R.string.toast_failure) + } + dialog.dismiss() + } + } + + true + } + else -> super.onOptionsItemSelected(item) + } } diff --git a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/AngConfigManager.kt b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/AngConfigManager.kt index ac4d1f687b..94e09e4754 100644 --- a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/AngConfigManager.kt +++ b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/AngConfigManager.kt @@ -3,7 +3,6 @@ package com.v2ray.ang.util import android.content.Context import android.graphics.Bitmap import android.text.TextUtils -import android.util.Log import com.google.gson.Gson import com.google.gson.GsonBuilder import com.google.gson.JsonPrimitive @@ -11,20 +10,18 @@ import com.google.gson.JsonSerializationContext import com.google.gson.JsonSerializer import com.google.gson.reflect.TypeToken import com.tencent.mmkv.MMKV -import com.v2ray.ang.AppConfig import com.v2ray.ang.AppConfig.PROTOCOL_HTTP import com.v2ray.ang.AppConfig.PROTOCOL_HTTPS -import com.v2ray.ang.AppConfig.WIREGUARD_LOCAL_ADDRESS_V4 -import com.v2ray.ang.AppConfig.WIREGUARD_LOCAL_MTU import com.v2ray.ang.R import com.v2ray.ang.dto.* -import com.v2ray.ang.dto.V2rayConfig.Companion.DEFAULT_SECURITY -import com.v2ray.ang.dto.V2rayConfig.Companion.TLS -import com.v2ray.ang.extension.idnHost -import com.v2ray.ang.extension.removeWhiteSpace import com.v2ray.ang.util.MmkvManager.KEY_SELECTED_SERVER +import com.v2ray.ang.util.fmt.ShadowsocksFmt +import com.v2ray.ang.util.fmt.SocksFmt +import com.v2ray.ang.util.fmt.TrojanFmt +import com.v2ray.ang.util.fmt.VlessFmt +import com.v2ray.ang.util.fmt.VmessFmt +import com.v2ray.ang.util.fmt.WireguardFmt import java.lang.reflect.Type -import java.net.URI import java.util.* object AngConfigManager { @@ -219,250 +216,28 @@ object AngConfigManager { //maybe Subscription if (TextUtils.isEmpty(subid) - && (str.startsWith(PROTOCOL_HTTP) || str.startsWith(PROTOCOL_HTTPS))) { + && (str.startsWith(PROTOCOL_HTTP) || str.startsWith(PROTOCOL_HTTPS)) + ) { MmkvManager.importUrlAsSubscription(str) return 0 } - var config: ServerConfig? = null - val allowInsecure = settingsStorage?.decodeBool(AppConfig.PREF_ALLOW_INSECURE) ?: false - if (str.startsWith(EConfigType.VMESS.protocolScheme)) { - config = ServerConfig.create(EConfigType.VMESS) - val streamSetting = config.outboundBean?.streamSettings ?: return -1 - - - if (!tryParseNewVmess(str, config, allowInsecure)) { - if (str.indexOf("?") > 0) { - if (!tryResolveVmess4Kitsunebi(str, config)) { - return R.string.toast_incorrect_protocol - } - } else { - var result = str.replace(EConfigType.VMESS.protocolScheme, "") - result = Utils.decode(result) - if (TextUtils.isEmpty(result)) { - return R.string.toast_decoding_failed - } - val vmessQRCode = Gson().fromJson(result, VmessQRCode::class.java) - // Although VmessQRCode fields are non null, looks like Gson may still create null fields - if (TextUtils.isEmpty(vmessQRCode.add) - || TextUtils.isEmpty(vmessQRCode.port) - || TextUtils.isEmpty(vmessQRCode.id) - || TextUtils.isEmpty(vmessQRCode.net) - ) { - return R.string.toast_incorrect_protocol - } - - config.remarks = vmessQRCode.ps - config.outboundBean?.settings?.vnext?.get(0)?.let { vnext -> - vnext.address = vmessQRCode.add - vnext.port = Utils.parseInt(vmessQRCode.port) - vnext.users[0].id = vmessQRCode.id - vnext.users[0].security = - if (TextUtils.isEmpty(vmessQRCode.scy)) DEFAULT_SECURITY else vmessQRCode.scy - vnext.users[0].alterId = Utils.parseInt(vmessQRCode.aid) - } - val sni = streamSetting.populateTransportSettings( - vmessQRCode.net, - vmessQRCode.type, - vmessQRCode.host, - vmessQRCode.path, - vmessQRCode.path, - vmessQRCode.host, - vmessQRCode.path, - vmessQRCode.type, - vmessQRCode.path, - vmessQRCode.host - ) - - val fingerprint = vmessQRCode.fp ?: streamSetting.tlsSettings?.fingerprint - streamSetting.populateTlsSettings( - vmessQRCode.tls, allowInsecure, - if (TextUtils.isEmpty(vmessQRCode.sni)) sni else vmessQRCode.sni, - fingerprint, vmessQRCode.alpn, null, null, null - ) - } - } + val config = if (str.startsWith(EConfigType.VMESS.protocolScheme)) { + VmessFmt.parseVmess(str) } else if (str.startsWith(EConfigType.SHADOWSOCKS.protocolScheme)) { - config = ServerConfig.create(EConfigType.SHADOWSOCKS) - if (!tryResolveResolveSip002(str, config)) { - var result = str.replace(EConfigType.SHADOWSOCKS.protocolScheme, "") - val indexSplit = result.indexOf("#") - if (indexSplit > 0) { - try { - config.remarks = - Utils.urlDecode(result.substring(indexSplit + 1, result.length)) - } catch (e: Exception) { - e.printStackTrace() - } - - result = result.substring(0, indexSplit) - } - - //part decode - val indexS = result.indexOf("@") - result = if (indexS > 0) { - Utils.decode(result.substring(0, indexS)) + result.substring( - indexS, - result.length - ) - } else { - Utils.decode(result) - } - - val legacyPattern = "^(.+?):(.*)@(.+?):(\\d+?)/?$".toRegex() - val match = legacyPattern.matchEntire(result) - ?: return R.string.toast_incorrect_protocol - - config.outboundBean?.settings?.servers?.get(0)?.let { server -> - server.address = match.groupValues[3].removeSurrounding("[", "]") - server.port = match.groupValues[4].toInt() - server.password = match.groupValues[2] - server.method = match.groupValues[1].lowercase() - } - } + ShadowsocksFmt.parseShadowsocks(str) } else if (str.startsWith(EConfigType.SOCKS.protocolScheme)) { - var result = str.replace(EConfigType.SOCKS.protocolScheme, "") - val indexSplit = result.indexOf("#") - config = ServerConfig.create(EConfigType.SOCKS) - if (indexSplit > 0) { - try { - config.remarks = - Utils.urlDecode(result.substring(indexSplit + 1, result.length)) - } catch (e: Exception) { - e.printStackTrace() - } - - result = result.substring(0, indexSplit) - } - - //part decode - val indexS = result.indexOf("@") - if (indexS > 0) { - result = Utils.decode(result.substring(0, indexS)) + result.substring( - indexS, - result.length - ) - } else { - result = Utils.decode(result) - } - - val legacyPattern = "^(.*):(.*)@(.+?):(\\d+?)$".toRegex() - val match = - legacyPattern.matchEntire(result) ?: return R.string.toast_incorrect_protocol - - config.outboundBean?.settings?.servers?.get(0)?.let { server -> - server.address = match.groupValues[3].removeSurrounding("[", "]") - server.port = match.groupValues[4].toInt() - val socksUsersBean = - V2rayConfig.OutboundBean.OutSettingsBean.ServersBean.SocksUsersBean() - socksUsersBean.user = match.groupValues[1] - socksUsersBean.pass = match.groupValues[2] - server.users = listOf(socksUsersBean) - } + SocksFmt.parseSocks(str) } else if (str.startsWith(EConfigType.TROJAN.protocolScheme)) { - val uri = URI(Utils.fixIllegalUrl(str)) - config = ServerConfig.create(EConfigType.TROJAN) - config.remarks = Utils.urlDecode(uri.fragment ?: "") - - var flow = "" - var fingerprint = config.outboundBean?.streamSettings?.tlsSettings?.fingerprint - if (uri.rawQuery != null) { - val queryParam = uri.rawQuery.split("&") - .associate { it.split("=").let { (k, v) -> k to Utils.urlDecode(v) } } - - val sni = config.outboundBean?.streamSettings?.populateTransportSettings( - queryParam["type"] ?: "tcp", - queryParam["headerType"], - queryParam["host"], - queryParam["path"], - queryParam["seed"], - queryParam["quicSecurity"], - queryParam["key"], - queryParam["mode"], - queryParam["serviceName"], - queryParam["authority"] - ) - fingerprint = queryParam["fp"] ?: "" - config.outboundBean?.streamSettings?.populateTlsSettings( - queryParam["security"] ?: TLS, - allowInsecure, queryParam["sni"] ?: sni!!, fingerprint, queryParam["alpn"], - null, null, null - ) - flow = queryParam["flow"] ?: "" - } else { - config.outboundBean?.streamSettings?.populateTlsSettings( - TLS, allowInsecure, "", - fingerprint, null, null, null, null - ) - } - - config.outboundBean?.settings?.servers?.get(0)?.let { server -> - server.address = uri.idnHost - server.port = uri.port - server.password = uri.userInfo - server.flow = flow - } + TrojanFmt.parseTrojan(str) } else if (str.startsWith(EConfigType.VLESS.protocolScheme)) { - val uri = URI(Utils.fixIllegalUrl(str)) - val queryParam = uri.rawQuery.split("&") - .associate { it.split("=").let { (k, v) -> k to Utils.urlDecode(v) } } - config = ServerConfig.create(EConfigType.VLESS) - val streamSetting = config.outboundBean?.streamSettings ?: return -1 - - config.remarks = Utils.urlDecode(uri.fragment ?: "") - config.outboundBean?.settings?.vnext?.get(0)?.let { vnext -> - vnext.address = uri.idnHost - vnext.port = uri.port - vnext.users[0].id = uri.userInfo - vnext.users[0].encryption = queryParam["encryption"] ?: "none" - vnext.users[0].flow = queryParam["flow"] ?: "" - } - - val sni = streamSetting.populateTransportSettings( - queryParam["type"] ?: "tcp", - queryParam["headerType"], - queryParam["host"], - queryParam["path"], - queryParam["seed"], - queryParam["quicSecurity"], - queryParam["key"], - queryParam["mode"], - queryParam["serviceName"], - queryParam["authority"] - ) - streamSetting.populateTlsSettings( - queryParam["security"] ?: "", - allowInsecure, - queryParam["sni"] ?: sni, - queryParam["fp"] ?: "", - queryParam["alpn"], - queryParam["pbk"] ?: "", - queryParam["sid"] ?: "", - queryParam["spx"] ?: "" - ) + VlessFmt.parseVless(str) } else if (str.startsWith(EConfigType.WIREGUARD.protocolScheme)) { - val uri = URI(Utils.fixIllegalUrl(str)) - config = ServerConfig.create(EConfigType.WIREGUARD) - config.remarks = Utils.urlDecode(uri.fragment ?: "") - - if (uri.rawQuery != null) { - val queryParam = uri.rawQuery.split("&") - .associate { it.split("=").let { (k, v) -> k to Utils.urlDecode(v) } } - - config.outboundBean?.settings?.let { wireguard -> - wireguard.secretKey = uri.userInfo - wireguard.address = - (queryParam["address"] ?: WIREGUARD_LOCAL_ADDRESS_V4).removeWhiteSpace() - .split(",") - wireguard.peers?.get(0)?.publicKey = queryParam["publickey"] ?: "" - wireguard.peers?.get(0)?.endpoint = "${uri.idnHost}:${uri.port}" - wireguard.mtu = Utils.parseInt(queryParam["mtu"] ?: WIREGUARD_LOCAL_MTU) - wireguard.reserved = - (queryParam["reserved"] ?: "0,0,0").removeWhiteSpace().split(",") - .map { it.toInt() } - } - } + WireguardFmt.parseWireguard(str) + } else { + null } + if (config == null) { return R.string.toast_incorrect_protocol } @@ -485,171 +260,6 @@ object AngConfigManager { return 0 } - private fun tryParseNewVmess( - uriString: String, - config: ServerConfig, - allowInsecure: Boolean - ): Boolean { - return runCatching { - val uri = URI(Utils.fixIllegalUrl(uriString)) - check(uri.scheme == "vmess") - val (_, protocol, tlsStr, uuid, alterId) = - Regex("(tcp|http|ws|kcp|quic|grpc)(\\+tls)?:([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})") - .matchEntire(uri.userInfo)?.groupValues - ?: error("parse user info fail.") - val tls = tlsStr.isNotBlank() - val queryParam = uri.rawQuery.split("&") - .associate { it.split("=").let { (k, v) -> k to Utils.urlDecode(v) } } - - val streamSetting = config.outboundBean?.streamSettings ?: return false - config.remarks = Utils.urlDecode(uri.fragment ?: "") - config.outboundBean.settings?.vnext?.get(0)?.let { vnext -> - vnext.address = uri.idnHost - vnext.port = uri.port - vnext.users[0].id = uuid - vnext.users[0].security = DEFAULT_SECURITY - vnext.users[0].alterId = alterId.toInt() - } - var fingerprint = streamSetting.tlsSettings?.fingerprint - val sni = streamSetting.populateTransportSettings(protocol, - queryParam["type"], - queryParam["host"]?.split("|")?.get(0) ?: "", - queryParam["path"]?.takeIf { it.trim() != "/" } ?: "", - queryParam["seed"], - queryParam["security"], - queryParam["key"], - queryParam["mode"], - queryParam["serviceName"], - queryParam["authority"]) - streamSetting.populateTlsSettings( - if (tls) TLS else "", allowInsecure, sni, fingerprint, null, - null, null, null - ) - true - }.getOrElse { false } - } - - private fun tryResolveVmess4Kitsunebi(server: String, config: ServerConfig): Boolean { - - var result = server.replace(EConfigType.VMESS.protocolScheme, "") - val indexSplit = result.indexOf("?") - if (indexSplit > 0) { - result = result.substring(0, indexSplit) - } - result = Utils.decode(result) - - val arr1 = result.split('@') - if (arr1.count() != 2) { - return false - } - val arr21 = arr1[0].split(':') - val arr22 = arr1[1].split(':') - if (arr21.count() != 2) { - return false - } - - config.remarks = "Alien" - config.outboundBean?.settings?.vnext?.get(0)?.let { vnext -> - vnext.address = arr22[0] - vnext.port = Utils.parseInt(arr22[1]) - vnext.users[0].id = arr21[1] - vnext.users[0].security = arr21[0] - vnext.users[0].alterId = 0 - } - return true - } - - private fun tryResolveResolveSip002(str: String, config: ServerConfig): Boolean { - try { - val uri = URI(Utils.fixIllegalUrl(str)) - config.remarks = Utils.urlDecode(uri.fragment ?: "") - - val method: String - val password: String - if (uri.userInfo.contains(":")) { - val arrUserInfo = uri.userInfo.split(":").map { it.trim() } - if (arrUserInfo.count() != 2) { - return false - } - method = arrUserInfo[0] - password = Utils.urlDecode(arrUserInfo[1]) - } else { - val base64Decode = Utils.decode(uri.userInfo) - val arrUserInfo = base64Decode.split(":").map { it.trim() } - if (arrUserInfo.count() < 2) { - return false - } - method = arrUserInfo[0] - password = base64Decode.substringAfter(":") - } - - val query = Utils.urlDecode(uri.query ?: "") - if (query != "") { - val queryPairs = HashMap() - val pairs = query.split(";") - Log.d(AppConfig.ANG_PACKAGE, pairs.toString()) - for (pair in pairs) { - val idx = pair.indexOf("=") - if (idx == -1) { - queryPairs[Utils.urlDecode(pair)] = ""; - } else { - queryPairs[Utils.urlDecode(pair.substring(0, idx))] = - Utils.urlDecode(pair.substring(idx + 1)) - } - } - Log.d(AppConfig.ANG_PACKAGE, queryPairs.toString()) - var sni: String? = "" - if (queryPairs["plugin"] == "obfs-local" && queryPairs["obfs"] == "http") { - sni = config.outboundBean?.streamSettings?.populateTransportSettings( - "tcp", - "http", - queryPairs["obfs-host"], - queryPairs["path"], - null, - null, - null, - null, - null, - null - ) - } else if (queryPairs["plugin"] == "v2ray-plugin") { - var network = "ws"; - if (queryPairs["mode"] == "quic") { - network = "quic"; - } - sni = config.outboundBean?.streamSettings?.populateTransportSettings( - network, - null, - queryPairs["host"], - queryPairs["path"], - null, - null, - null, - null, - null, - null - ) - } - if ("tls" in queryPairs) { - config.outboundBean?.streamSettings?.populateTlsSettings( - "tls", false, sni ?: "", null, null, null, null, null - ) - } - - } - - config.outboundBean?.settings?.servers?.get(0)?.let { server -> - server.address = uri.idnHost - server.port = uri.port - server.password = password - server.method = method - } - return true - } catch (e: Exception) { - Log.d(AppConfig.ANG_PACKAGE, e.toString()) - return false - } - } /** * share config @@ -657,211 +267,15 @@ object AngConfigManager { private fun shareConfig(guid: String): String { try { val config = MmkvManager.decodeServerConfig(guid) ?: return "" - val outbound = config.getProxyOutbound() ?: return "" - val streamSetting = - outbound.streamSettings ?: V2rayConfig.OutboundBean.StreamSettingsBean() - if (config.configType != EConfigType.WIREGUARD) { - if (outbound.streamSettings == null) return "" - } + return config.configType.protocolScheme + when (config.configType) { - EConfigType.VMESS -> { - val vmessQRCode = VmessQRCode() - vmessQRCode.v = "2" - vmessQRCode.ps = config.remarks - vmessQRCode.add = outbound.getServerAddress().orEmpty() - vmessQRCode.port = outbound.getServerPort().toString() - vmessQRCode.id = outbound.getPassword().orEmpty() - vmessQRCode.aid = - outbound.settings?.vnext?.get(0)?.users?.get(0)?.alterId.toString() - vmessQRCode.scy = - outbound.settings?.vnext?.get(0)?.users?.get(0)?.security.toString() - vmessQRCode.net = streamSetting.network - vmessQRCode.tls = streamSetting.security - vmessQRCode.sni = streamSetting.tlsSettings?.serverName.orEmpty() - vmessQRCode.alpn = - Utils.removeWhiteSpace(streamSetting.tlsSettings?.alpn?.joinToString()) - .orEmpty() - vmessQRCode.fp = streamSetting.tlsSettings?.fingerprint.orEmpty() - outbound.getTransportSettingDetails()?.let { transportDetails -> - vmessQRCode.type = transportDetails[0] - vmessQRCode.host = transportDetails[1] - vmessQRCode.path = transportDetails[2] - } - val json = Gson().toJson(vmessQRCode) - Utils.encode(json) - } - + EConfigType.VMESS -> VmessFmt.toUri(config) EConfigType.CUSTOM -> "" - - EConfigType.SHADOWSOCKS -> { - val remark = "#" + Utils.urlEncode(config.remarks) - val pw = - Utils.encode("${outbound.getSecurityEncryption()}:${outbound.getPassword()}") - val url = String.format( - "%s@%s:%s", - pw, - Utils.getIpv6Address(outbound.getServerAddress()!!), - outbound.getServerPort() - ) - url + remark - } - - EConfigType.SOCKS -> { - val remark = "#" + Utils.urlEncode(config.remarks) - val pw = - if (outbound.settings?.servers?.get(0)?.users?.get(0)?.user != null) - "${outbound.settings?.servers?.get(0)?.users?.get(0)?.user}:${outbound.getPassword()}" - else - ":" - val url = String.format( - "%s@%s:%s", - Utils.encode(pw), - Utils.getIpv6Address(outbound.getServerAddress()!!), - outbound.getServerPort() - ) - url + remark - } - - EConfigType.VLESS, - EConfigType.TROJAN -> { - val remark = "#" + Utils.urlEncode(config.remarks) - - val dicQuery = HashMap() - if (config.configType == EConfigType.VLESS) { - outbound.settings?.vnext?.get(0)?.users?.get(0)?.flow?.let { - if (!TextUtils.isEmpty(it)) { - dicQuery["flow"] = it - } - } - dicQuery["encryption"] = - if (outbound.getSecurityEncryption().isNullOrEmpty()) "none" - else outbound.getSecurityEncryption().orEmpty() - } else if (config.configType == EConfigType.TROJAN) { - config.outboundBean?.settings?.servers?.get(0)?.flow?.let { - if (!TextUtils.isEmpty(it)) { - dicQuery["flow"] = it - } - } - } - - dicQuery["security"] = streamSetting.security.ifEmpty { "none" } - (streamSetting.tlsSettings - ?: streamSetting.realitySettings)?.let { tlsSetting -> - if (!TextUtils.isEmpty(tlsSetting.serverName)) { - dicQuery["sni"] = tlsSetting.serverName - } - if (!tlsSetting.alpn.isNullOrEmpty() && tlsSetting.alpn.isNotEmpty()) { - dicQuery["alpn"] = - Utils.removeWhiteSpace(tlsSetting.alpn.joinToString()).orEmpty() - } - if (!TextUtils.isEmpty(tlsSetting.fingerprint)) { - dicQuery["fp"] = tlsSetting.fingerprint!! - } - if (!TextUtils.isEmpty(tlsSetting.publicKey)) { - dicQuery["pbk"] = tlsSetting.publicKey!! - } - if (!TextUtils.isEmpty(tlsSetting.shortId)) { - dicQuery["sid"] = tlsSetting.shortId!! - } - if (!TextUtils.isEmpty(tlsSetting.spiderX)) { - dicQuery["spx"] = Utils.urlEncode(tlsSetting.spiderX!!) - } - } - dicQuery["type"] = - streamSetting.network.ifEmpty { V2rayConfig.DEFAULT_NETWORK } - - outbound.getTransportSettingDetails()?.let { transportDetails -> - when (streamSetting.network) { - "tcp" -> { - dicQuery["headerType"] = transportDetails[0].ifEmpty { "none" } - if (!TextUtils.isEmpty(transportDetails[1])) { - dicQuery["host"] = Utils.urlEncode(transportDetails[1]) - } - } - - "kcp" -> { - dicQuery["headerType"] = transportDetails[0].ifEmpty { "none" } - if (!TextUtils.isEmpty(transportDetails[2])) { - dicQuery["seed"] = Utils.urlEncode(transportDetails[2]) - } - } - - "ws", "httpupgrade" -> { - if (!TextUtils.isEmpty(transportDetails[1])) { - dicQuery["host"] = Utils.urlEncode(transportDetails[1]) - } - if (!TextUtils.isEmpty(transportDetails[2])) { - dicQuery["path"] = Utils.urlEncode(transportDetails[2]) - } - } - - "http", "h2" -> { - dicQuery["type"] = "http" - if (!TextUtils.isEmpty(transportDetails[1])) { - dicQuery["host"] = Utils.urlEncode(transportDetails[1]) - } - if (!TextUtils.isEmpty(transportDetails[2])) { - dicQuery["path"] = Utils.urlEncode(transportDetails[2]) - } - } - - "quic" -> { - dicQuery["headerType"] = transportDetails[0].ifEmpty { "none" } - dicQuery["quicSecurity"] = Utils.urlEncode(transportDetails[1]) - dicQuery["key"] = Utils.urlEncode(transportDetails[2]) - } - - "grpc" -> { - dicQuery["mode"] = transportDetails[0] - dicQuery["authority"] = Utils.urlEncode(transportDetails[1]) - dicQuery["serviceName"] = Utils.urlEncode(transportDetails[2]) - } - } - } - val query = "?" + dicQuery.toList().joinToString( - separator = "&", - transform = { it.first + "=" + it.second }) - - val url = String.format( - "%s@%s:%s", - outbound.getPassword(), - Utils.getIpv6Address(outbound.getServerAddress()!!), - outbound.getServerPort() - ) - url + query + remark - } - - EConfigType.WIREGUARD -> { - val remark = "#" + Utils.urlEncode(config.remarks) - - val dicQuery = HashMap() - dicQuery["publickey"] = - Utils.urlEncode(outbound.settings?.peers?.get(0)?.publicKey.toString()) - if (outbound.settings?.reserved != null) { - dicQuery["reserved"] = Utils.urlEncode( - Utils.removeWhiteSpace(outbound.settings?.reserved?.joinToString()) - .toString() - ) - } - dicQuery["address"] = Utils.urlEncode( - Utils.removeWhiteSpace((outbound.settings?.address as List<*>).joinToString()) - .toString() - ) - if (outbound.settings?.mtu != null) { - dicQuery["mtu"] = outbound.settings?.mtu.toString() - } - val query = "?" + dicQuery.toList().joinToString( - separator = "&", - transform = { it.first + "=" + it.second }) - - val url = String.format( - "%s@%s:%s", - Utils.urlEncode(outbound.getPassword().toString()), - Utils.getIpv6Address(outbound.getServerAddress()!!), - outbound.getServerPort() - ) - url + query + remark - } + EConfigType.SHADOWSOCKS -> ShadowsocksFmt.toUri(config) + EConfigType.SOCKS -> SocksFmt.toUri(config) + EConfigType.VLESS-> VlessFmt.toUri(config) + EConfigType.TROJAN-> TrojanFmt.toUri(config) + EConfigType.WIREGUARD -> WireguardFmt.toUri(config) } } catch (e: Exception) { e.printStackTrace() @@ -1023,7 +437,7 @@ object AngConfigManager { return 0 } - fun appendCustomConfigServer(server: String?, subid: String): Int { + fun appendCustomConfigServer(server: String?, subid: String): Int { if (server == null) { return 0 } @@ -1052,7 +466,8 @@ object AngConfigManager { var count = 0 for (srv in serverList) { val config = ServerConfig.create(EConfigType.CUSTOM) - config.fullConfig = Gson().fromJson(Gson().toJson(srv), V2rayConfig::class.java) + config.fullConfig = + Gson().fromJson(Gson().toJson(srv), V2rayConfig::class.java) config.remarks = config.fullConfig?.remarks ?: ("%04d-".format(count + 1) + System.currentTimeMillis() .toString()) diff --git a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/ShadowsocksFmt.kt b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/ShadowsocksFmt.kt new file mode 100644 index 0000000000..f79ccb0815 --- /dev/null +++ b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/ShadowsocksFmt.kt @@ -0,0 +1,159 @@ +package com.v2ray.ang.util.fmt + +import android.util.Log +import com.v2ray.ang.AppConfig +import com.v2ray.ang.dto.EConfigType +import com.v2ray.ang.dto.ServerConfig +import com.v2ray.ang.extension.idnHost +import com.v2ray.ang.util.Utils +import java.net.URI + +object ShadowsocksFmt { + fun parseShadowsocks(str: String): ServerConfig? { + val config = ServerConfig.create(EConfigType.SHADOWSOCKS) + if (!tryResolveResolveSip002(str, config)) { + var result = str.replace(EConfigType.SHADOWSOCKS.protocolScheme, "") + val indexSplit = result.indexOf("#") + if (indexSplit > 0) { + try { + config.remarks = + Utils.urlDecode(result.substring(indexSplit + 1, result.length)) + } catch (e: Exception) { + e.printStackTrace() + } + + result = result.substring(0, indexSplit) + } + + //part decode + val indexS = result.indexOf("@") + result = if (indexS > 0) { + Utils.decode(result.substring(0, indexS)) + result.substring( + indexS, + result.length + ) + } else { + Utils.decode(result) + } + + val legacyPattern = "^(.+?):(.*)@(.+?):(\\d+?)/?$".toRegex() + val match = legacyPattern.matchEntire(result) + ?: return null + + config.outboundBean?.settings?.servers?.get(0)?.let { server -> + server.address = match.groupValues[3].removeSurrounding("[", "]") + server.port = match.groupValues[4].toInt() + server.password = match.groupValues[2] + server.method = match.groupValues[1].lowercase() + } + } + return config + } + + fun toUri(config: ServerConfig): String { + val outbound = config.getProxyOutbound() ?: return "" + val remark = "#" + Utils.urlEncode(config.remarks) + val pw = + Utils.encode("${outbound.getSecurityEncryption()}:${outbound.getPassword()}") + val url = String.format( + "%s@%s:%s", + pw, + Utils.getIpv6Address(outbound.getServerAddress()!!), + outbound.getServerPort() + ) + return url + remark + } + + private fun tryResolveResolveSip002(str: String, config: ServerConfig): Boolean { + try { + val uri = URI(Utils.fixIllegalUrl(str)) + config.remarks = Utils.urlDecode(uri.fragment ?: "") + + val method: String + val password: String + if (uri.userInfo.contains(":")) { + val arrUserInfo = uri.userInfo.split(":").map { it.trim() } + if (arrUserInfo.count() != 2) { + return false + } + method = arrUserInfo[0] + password = Utils.urlDecode(arrUserInfo[1]) + } else { + val base64Decode = Utils.decode(uri.userInfo) + val arrUserInfo = base64Decode.split(":").map { it.trim() } + if (arrUserInfo.count() < 2) { + return false + } + method = arrUserInfo[0] + password = base64Decode.substringAfter(":") + } + + val query = Utils.urlDecode(uri.query ?: "") + if (query != "") { + val queryPairs = HashMap() + val pairs = query.split(";") + Log.d(AppConfig.ANG_PACKAGE, pairs.toString()) + for (pair in pairs) { + val idx = pair.indexOf("=") + if (idx == -1) { + queryPairs[Utils.urlDecode(pair)] = ""; + } else { + queryPairs[Utils.urlDecode(pair.substring(0, idx))] = + Utils.urlDecode(pair.substring(idx + 1)) + } + } + Log.d(AppConfig.ANG_PACKAGE, queryPairs.toString()) + var sni: String? = "" + if (queryPairs["plugin"] == "obfs-local" && queryPairs["obfs"] == "http") { + sni = config.outboundBean?.streamSettings?.populateTransportSettings( + "tcp", + "http", + queryPairs["obfs-host"], + queryPairs["path"], + null, + null, + null, + null, + null, + null + ) + } else if (queryPairs["plugin"] == "v2ray-plugin") { + var network = "ws"; + if (queryPairs["mode"] == "quic") { + network = "quic"; + } + sni = config.outboundBean?.streamSettings?.populateTransportSettings( + network, + null, + queryPairs["host"], + queryPairs["path"], + null, + null, + null, + null, + null, + null + ) + } + if ("tls" in queryPairs) { + config.outboundBean?.streamSettings?.populateTlsSettings( + "tls", false, sni ?: "", null, null, null, null, null + ) + } + + } + + config.outboundBean?.settings?.servers?.get(0)?.let { server -> + server.address = uri.idnHost + server.port = uri.port + server.password = password + server.method = method + } + return true + } catch (e: Exception) { + Log.d(AppConfig.ANG_PACKAGE, e.toString()) + return false + } + } + +} \ No newline at end of file diff --git a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/SocksFmt.kt b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/SocksFmt.kt new file mode 100644 index 0000000000..420f8a68f9 --- /dev/null +++ b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/SocksFmt.kt @@ -0,0 +1,69 @@ +package com.v2ray.ang.util.fmt + +import com.v2ray.ang.dto.EConfigType +import com.v2ray.ang.dto.ServerConfig +import com.v2ray.ang.dto.V2rayConfig +import com.v2ray.ang.util.Utils + +object SocksFmt { + fun parseSocks(str: String): ServerConfig? { + val config = ServerConfig.create(EConfigType.SOCKS) + var result = str.replace(EConfigType.SOCKS.protocolScheme, "") + val indexSplit = result.indexOf("#") + + if (indexSplit > 0) { + try { + config.remarks = + Utils.urlDecode(result.substring(indexSplit + 1, result.length)) + } catch (e: Exception) { + e.printStackTrace() + } + + result = result.substring(0, indexSplit) + } + + //part decode + val indexS = result.indexOf("@") + if (indexS > 0) { + result = Utils.decode(result.substring(0, indexS)) + result.substring( + indexS, + result.length + ) + } else { + result = Utils.decode(result) + } + + val legacyPattern = "^(.*):(.*)@(.+?):(\\d+?)$".toRegex() + val match = + legacyPattern.matchEntire(result) ?: return null + + config.outboundBean?.settings?.servers?.get(0)?.let { server -> + server.address = match.groupValues[3].removeSurrounding("[", "]") + server.port = match.groupValues[4].toInt() + val socksUsersBean = + V2rayConfig.OutboundBean.OutSettingsBean.ServersBean.SocksUsersBean() + socksUsersBean.user = match.groupValues[1] + socksUsersBean.pass = match.groupValues[2] + server.users = listOf(socksUsersBean) + } + + return config + } + + fun toUri(config: ServerConfig): String { + val outbound = config.getProxyOutbound() ?: return "" + val remark = "#" + Utils.urlEncode(config.remarks) + val pw = + if (outbound.settings?.servers?.get(0)?.users?.get(0)?.user != null) + "${outbound.settings?.servers?.get(0)?.users?.get(0)?.user}:${outbound.getPassword()}" + else + ":" + val url = String.format( + "%s@%s:%s", + Utils.encode(pw), + Utils.getIpv6Address(outbound.getServerAddress()!!), + outbound.getServerPort() + ) + return url + remark + } +} \ No newline at end of file diff --git a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/TrojanFmt.kt b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/TrojanFmt.kt new file mode 100644 index 0000000000..b3365fcb62 --- /dev/null +++ b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/TrojanFmt.kt @@ -0,0 +1,170 @@ +package com.v2ray.ang.util.fmt + +import android.text.TextUtils +import com.tencent.mmkv.MMKV +import com.v2ray.ang.AppConfig +import com.v2ray.ang.dto.EConfigType +import com.v2ray.ang.dto.ServerConfig +import com.v2ray.ang.dto.V2rayConfig +import com.v2ray.ang.extension.idnHost +import com.v2ray.ang.util.MmkvManager +import com.v2ray.ang.util.Utils +import java.net.URI + +object TrojanFmt { + private val settingsStorage by lazy { + MMKV.mmkvWithID( + MmkvManager.ID_SETTING, + MMKV.MULTI_PROCESS_MODE + ) + } + + fun parseTrojan(str: String): ServerConfig? { + val allowInsecure = settingsStorage?.decodeBool(AppConfig.PREF_ALLOW_INSECURE) ?: false + val config = ServerConfig.create(EConfigType.TROJAN) + + val uri = URI(Utils.fixIllegalUrl(str)) + config.remarks = Utils.urlDecode(uri.fragment ?: "") + + var flow = "" + var fingerprint = config.outboundBean?.streamSettings?.tlsSettings?.fingerprint + if (uri.rawQuery.isNullOrEmpty()) { + config.outboundBean?.streamSettings?.populateTlsSettings( + V2rayConfig.TLS, allowInsecure, "", + fingerprint, null, null, null, null + ) + } else { + val queryParam = uri.rawQuery.split("&") + .associate { it.split("=").let { (k, v) -> k to Utils.urlDecode(v) } } + + val sni = config.outboundBean?.streamSettings?.populateTransportSettings( + queryParam["type"] ?: "tcp", + queryParam["headerType"], + queryParam["host"], + queryParam["path"], + queryParam["seed"], + queryParam["quicSecurity"], + queryParam["key"], + queryParam["mode"], + queryParam["serviceName"], + queryParam["authority"] + ) + fingerprint = queryParam["fp"] ?: "" + config.outboundBean?.streamSettings?.populateTlsSettings( + queryParam["security"] ?: V2rayConfig.TLS, + allowInsecure, queryParam["sni"] ?: sni!!, fingerprint, queryParam["alpn"], + null, null, null + ) + flow = queryParam["flow"] ?: "" + } + + config.outboundBean?.settings?.servers?.get(0)?.let { server -> + server.address = uri.idnHost + server.port = uri.port + server.password = uri.userInfo + server.flow = flow + } + + return config + } + + fun toUri(config: ServerConfig): String { + val outbound = config.getProxyOutbound() ?: return "" + val streamSetting = outbound.streamSettings ?: V2rayConfig.OutboundBean.StreamSettingsBean() + + val remark = "#" + Utils.urlEncode(config.remarks) + val dicQuery = HashMap() + config.outboundBean?.settings?.servers?.get(0)?.flow?.let { + if (!TextUtils.isEmpty(it)) { + dicQuery["flow"] = it + } + + } + + dicQuery["security"] = streamSetting.security.ifEmpty { "none" } + (streamSetting.tlsSettings + ?: streamSetting.realitySettings)?.let { tlsSetting -> + if (!TextUtils.isEmpty(tlsSetting.serverName)) { + dicQuery["sni"] = tlsSetting.serverName + } + if (!tlsSetting.alpn.isNullOrEmpty() && tlsSetting.alpn.isNotEmpty()) { + dicQuery["alpn"] = + Utils.removeWhiteSpace(tlsSetting.alpn.joinToString()).orEmpty() + } + if (!TextUtils.isEmpty(tlsSetting.fingerprint)) { + dicQuery["fp"] = tlsSetting.fingerprint!! + } + if (!TextUtils.isEmpty(tlsSetting.publicKey)) { + dicQuery["pbk"] = tlsSetting.publicKey!! + } + if (!TextUtils.isEmpty(tlsSetting.shortId)) { + dicQuery["sid"] = tlsSetting.shortId!! + } + if (!TextUtils.isEmpty(tlsSetting.spiderX)) { + dicQuery["spx"] = Utils.urlEncode(tlsSetting.spiderX!!) + } + } + dicQuery["type"] = + streamSetting.network.ifEmpty { V2rayConfig.DEFAULT_NETWORK } + + outbound.getTransportSettingDetails()?.let { transportDetails -> + when (streamSetting.network) { + "tcp" -> { + dicQuery["headerType"] = transportDetails[0].ifEmpty { "none" } + if (!TextUtils.isEmpty(transportDetails[1])) { + dicQuery["host"] = Utils.urlEncode(transportDetails[1]) + } + } + + "kcp" -> { + dicQuery["headerType"] = transportDetails[0].ifEmpty { "none" } + if (!TextUtils.isEmpty(transportDetails[2])) { + dicQuery["seed"] = Utils.urlEncode(transportDetails[2]) + } + } + + "ws", "httpupgrade" -> { + if (!TextUtils.isEmpty(transportDetails[1])) { + dicQuery["host"] = Utils.urlEncode(transportDetails[1]) + } + if (!TextUtils.isEmpty(transportDetails[2])) { + dicQuery["path"] = Utils.urlEncode(transportDetails[2]) + } + } + + "http", "h2" -> { + dicQuery["type"] = "http" + if (!TextUtils.isEmpty(transportDetails[1])) { + dicQuery["host"] = Utils.urlEncode(transportDetails[1]) + } + if (!TextUtils.isEmpty(transportDetails[2])) { + dicQuery["path"] = Utils.urlEncode(transportDetails[2]) + } + } + + "quic" -> { + dicQuery["headerType"] = transportDetails[0].ifEmpty { "none" } + dicQuery["quicSecurity"] = Utils.urlEncode(transportDetails[1]) + dicQuery["key"] = Utils.urlEncode(transportDetails[2]) + } + + "grpc" -> { + dicQuery["mode"] = transportDetails[0] + dicQuery["authority"] = Utils.urlEncode(transportDetails[1]) + dicQuery["serviceName"] = Utils.urlEncode(transportDetails[2]) + } + } + } + val query = "?" + dicQuery.toList().joinToString( + separator = "&", + transform = { it.first + "=" + it.second }) + + val url = String.format( + "%s@%s:%s", + outbound.getPassword(), + Utils.getIpv6Address(outbound.getServerAddress()!!), + outbound.getServerPort() + ) + return url + query + remark + } +} \ No newline at end of file diff --git a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/VlessFmt.kt b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/VlessFmt.kt new file mode 100644 index 0000000000..e689a31e83 --- /dev/null +++ b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/VlessFmt.kt @@ -0,0 +1,170 @@ +package com.v2ray.ang.util.fmt + +import android.text.TextUtils +import com.tencent.mmkv.MMKV +import com.v2ray.ang.AppConfig +import com.v2ray.ang.dto.EConfigType +import com.v2ray.ang.dto.ServerConfig +import com.v2ray.ang.dto.V2rayConfig +import com.v2ray.ang.extension.idnHost +import com.v2ray.ang.util.MmkvManager +import com.v2ray.ang.util.Utils +import java.net.URI + +object VlessFmt { + private val settingsStorage by lazy { + MMKV.mmkvWithID( + MmkvManager.ID_SETTING, + MMKV.MULTI_PROCESS_MODE + ) + } + + fun parseVless(str: String): ServerConfig? { + val allowInsecure = settingsStorage?.decodeBool(AppConfig.PREF_ALLOW_INSECURE) ?: false + val config = ServerConfig.create(EConfigType.VLESS) + + val uri = URI(Utils.fixIllegalUrl(str)) + if (uri.rawQuery.isNullOrEmpty()) return null + val queryParam = uri.rawQuery.split("&") + .associate { it.split("=").let { (k, v) -> k to Utils.urlDecode(v) } } + + val streamSetting = config.outboundBean?.streamSettings ?: return null + + config.remarks = Utils.urlDecode(uri.fragment ?: "") + config.outboundBean?.settings?.vnext?.get(0)?.let { vnext -> + vnext.address = uri.idnHost + vnext.port = uri.port + vnext.users[0].id = uri.userInfo + vnext.users[0].encryption = queryParam["encryption"] ?: "none" + vnext.users[0].flow = queryParam["flow"] ?: "" + } + + val sni = streamSetting.populateTransportSettings( + queryParam["type"] ?: "tcp", + queryParam["headerType"], + queryParam["host"], + queryParam["path"], + queryParam["seed"], + queryParam["quicSecurity"], + queryParam["key"], + queryParam["mode"], + queryParam["serviceName"], + queryParam["authority"] + ) + streamSetting.populateTlsSettings( + queryParam["security"] ?: "", + allowInsecure, + queryParam["sni"] ?: sni, + queryParam["fp"] ?: "", + queryParam["alpn"], + queryParam["pbk"] ?: "", + queryParam["sid"] ?: "", + queryParam["spx"] ?: "" + ) + + return config + } + + fun toUri(config: ServerConfig): String { + val outbound = config.getProxyOutbound() ?: return "" + val streamSetting = outbound.streamSettings ?: V2rayConfig.OutboundBean.StreamSettingsBean() + + val remark = "#" + Utils.urlEncode(config.remarks) + val dicQuery = HashMap() + outbound.settings?.vnext?.get(0)?.users?.get(0)?.flow?.let { + if (!TextUtils.isEmpty(it)) { + dicQuery["flow"] = it + } + } + dicQuery["encryption"] = + if (outbound.getSecurityEncryption().isNullOrEmpty()) "none" + else outbound.getSecurityEncryption().orEmpty() + + + dicQuery["security"] = streamSetting.security.ifEmpty { "none" } + (streamSetting.tlsSettings + ?: streamSetting.realitySettings)?.let { tlsSetting -> + if (!TextUtils.isEmpty(tlsSetting.serverName)) { + dicQuery["sni"] = tlsSetting.serverName + } + if (!tlsSetting.alpn.isNullOrEmpty() && tlsSetting.alpn.isNotEmpty()) { + dicQuery["alpn"] = + Utils.removeWhiteSpace(tlsSetting.alpn.joinToString()).orEmpty() + } + if (!TextUtils.isEmpty(tlsSetting.fingerprint)) { + dicQuery["fp"] = tlsSetting.fingerprint!! + } + if (!TextUtils.isEmpty(tlsSetting.publicKey)) { + dicQuery["pbk"] = tlsSetting.publicKey!! + } + if (!TextUtils.isEmpty(tlsSetting.shortId)) { + dicQuery["sid"] = tlsSetting.shortId!! + } + if (!TextUtils.isEmpty(tlsSetting.spiderX)) { + dicQuery["spx"] = Utils.urlEncode(tlsSetting.spiderX!!) + } + } + dicQuery["type"] = + streamSetting.network.ifEmpty { V2rayConfig.DEFAULT_NETWORK } + + outbound.getTransportSettingDetails()?.let { transportDetails -> + when (streamSetting.network) { + "tcp" -> { + dicQuery["headerType"] = transportDetails[0].ifEmpty { "none" } + if (!TextUtils.isEmpty(transportDetails[1])) { + dicQuery["host"] = Utils.urlEncode(transportDetails[1]) + } + } + + "kcp" -> { + dicQuery["headerType"] = transportDetails[0].ifEmpty { "none" } + if (!TextUtils.isEmpty(transportDetails[2])) { + dicQuery["seed"] = Utils.urlEncode(transportDetails[2]) + } + } + + "ws", "httpupgrade" -> { + if (!TextUtils.isEmpty(transportDetails[1])) { + dicQuery["host"] = Utils.urlEncode(transportDetails[1]) + } + if (!TextUtils.isEmpty(transportDetails[2])) { + dicQuery["path"] = Utils.urlEncode(transportDetails[2]) + } + } + + "http", "h2" -> { + dicQuery["type"] = "http" + if (!TextUtils.isEmpty(transportDetails[1])) { + dicQuery["host"] = Utils.urlEncode(transportDetails[1]) + } + if (!TextUtils.isEmpty(transportDetails[2])) { + dicQuery["path"] = Utils.urlEncode(transportDetails[2]) + } + } + + "quic" -> { + dicQuery["headerType"] = transportDetails[0].ifEmpty { "none" } + dicQuery["quicSecurity"] = Utils.urlEncode(transportDetails[1]) + dicQuery["key"] = Utils.urlEncode(transportDetails[2]) + } + + "grpc" -> { + dicQuery["mode"] = transportDetails[0] + dicQuery["authority"] = Utils.urlEncode(transportDetails[1]) + dicQuery["serviceName"] = Utils.urlEncode(transportDetails[2]) + } + } + } + val query = "?" + dicQuery.toList().joinToString( + separator = "&", + transform = { it.first + "=" + it.second }) + + val url = String.format( + "%s@%s:%s", + outbound.getPassword(), + Utils.getIpv6Address(outbound.getServerAddress()!!), + outbound.getServerPort() + ) + return url + query + remark + } +} \ No newline at end of file diff --git a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/VmessFmt.kt b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/VmessFmt.kt new file mode 100644 index 0000000000..83dbe67905 --- /dev/null +++ b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/VmessFmt.kt @@ -0,0 +1,193 @@ +package com.v2ray.ang.util.fmt + +import android.text.TextUtils +import android.util.Log +import com.google.gson.Gson +import com.tencent.mmkv.MMKV +import com.v2ray.ang.AppConfig +import com.v2ray.ang.dto.EConfigType +import com.v2ray.ang.dto.ServerConfig +import com.v2ray.ang.dto.V2rayConfig +import com.v2ray.ang.dto.VmessQRCode +import com.v2ray.ang.extension.idnHost +import com.v2ray.ang.util.MmkvManager +import com.v2ray.ang.util.Utils +import java.net.URI + +object VmessFmt { + private val settingsStorage by lazy { + MMKV.mmkvWithID( + MmkvManager.ID_SETTING, MMKV.MULTI_PROCESS_MODE + ) + } + + fun parseVmess(str: String): ServerConfig? { + val allowInsecure = settingsStorage?.decodeBool(AppConfig.PREF_ALLOW_INSECURE) ?: false + val config = ServerConfig.create(EConfigType.VMESS) + val streamSetting = config.outboundBean?.streamSettings ?: return null + + if (!tryParseNewVmess(str, config, allowInsecure)) { + if (str.indexOf("?") > 0) { + if (!tryResolveVmess4Kitsunebi(str, config)) { + Log.d(AppConfig.ANG_PACKAGE, "R.string.toast_incorrect_protocol") + return null + } + } else { + var result = str.replace(EConfigType.VMESS.protocolScheme, "") + result = Utils.decode(result) + if (TextUtils.isEmpty(result)) { + Log.d(AppConfig.ANG_PACKAGE, "R.string.toast_decoding_failed") + return null + } + val vmessQRCode = Gson().fromJson(result, VmessQRCode::class.java) + // Although VmessQRCode fields are non null, looks like Gson may still create null fields + if (TextUtils.isEmpty(vmessQRCode.add) || TextUtils.isEmpty(vmessQRCode.port) || TextUtils.isEmpty( + vmessQRCode.id + ) || TextUtils.isEmpty(vmessQRCode.net) + ) { + Log.d(AppConfig.ANG_PACKAGE, "R.string.toast_incorrect_protocol") + return null + } + + config.remarks = vmessQRCode.ps + config.outboundBean?.settings?.vnext?.get(0)?.let { vnext -> + vnext.address = vmessQRCode.add + vnext.port = Utils.parseInt(vmessQRCode.port) + vnext.users[0].id = vmessQRCode.id + vnext.users[0].security = + if (TextUtils.isEmpty(vmessQRCode.scy)) V2rayConfig.DEFAULT_SECURITY else vmessQRCode.scy + vnext.users[0].alterId = Utils.parseInt(vmessQRCode.aid) + } + val sni = streamSetting.populateTransportSettings( + vmessQRCode.net, + vmessQRCode.type, + vmessQRCode.host, + vmessQRCode.path, + vmessQRCode.path, + vmessQRCode.host, + vmessQRCode.path, + vmessQRCode.type, + vmessQRCode.path, + vmessQRCode.host + ) + + val fingerprint = vmessQRCode.fp ?: streamSetting.tlsSettings?.fingerprint + streamSetting.populateTlsSettings( + vmessQRCode.tls, + allowInsecure, + if (TextUtils.isEmpty(vmessQRCode.sni)) sni else vmessQRCode.sni, + fingerprint, + vmessQRCode.alpn, + null, + null, + null + ) + } + } + return config + } + + fun toUri(config: ServerConfig): String { + val outbound = config.getProxyOutbound() ?: return "" + val streamSetting = outbound.streamSettings ?: V2rayConfig.OutboundBean.StreamSettingsBean() + + val vmessQRCode = VmessQRCode() + vmessQRCode.v = "2" + vmessQRCode.ps = config.remarks + vmessQRCode.add = outbound.getServerAddress().orEmpty() + vmessQRCode.port = outbound.getServerPort().toString() + vmessQRCode.id = outbound.getPassword().orEmpty() + vmessQRCode.aid = outbound.settings?.vnext?.get(0)?.users?.get(0)?.alterId.toString() + vmessQRCode.scy = outbound.settings?.vnext?.get(0)?.users?.get(0)?.security.toString() + vmessQRCode.net = streamSetting.network + vmessQRCode.tls = streamSetting.security + vmessQRCode.sni = streamSetting.tlsSettings?.serverName.orEmpty() + vmessQRCode.alpn = + Utils.removeWhiteSpace(streamSetting.tlsSettings?.alpn?.joinToString()).orEmpty() + vmessQRCode.fp = streamSetting.tlsSettings?.fingerprint.orEmpty() + outbound.getTransportSettingDetails()?.let { transportDetails -> + vmessQRCode.type = transportDetails[0] + vmessQRCode.host = transportDetails[1] + vmessQRCode.path = transportDetails[2] + } + val json = Gson().toJson(vmessQRCode) + return Utils.encode(json) + } + + private fun tryParseNewVmess( + uriString: String, config: ServerConfig, allowInsecure: Boolean + ): Boolean { + return runCatching { + val uri = URI(Utils.fixIllegalUrl(uriString)) + check(uri.scheme == "vmess") + val (_, protocol, tlsStr, uuid, alterId) = Regex("(tcp|http|ws|kcp|quic|grpc)(\\+tls)?:([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})").matchEntire( + uri.userInfo + )?.groupValues ?: error("parse user info fail.") + val tls = tlsStr.isNotBlank() + val queryParam = uri.rawQuery.split("&") + .associate { it.split("=").let { (k, v) -> k to Utils.urlDecode(v) } } + + val streamSetting = config.outboundBean?.streamSettings ?: return false + config.remarks = Utils.urlDecode(uri.fragment ?: "") + config.outboundBean.settings?.vnext?.get(0)?.let { vnext -> + vnext.address = uri.idnHost + vnext.port = uri.port + vnext.users[0].id = uuid + vnext.users[0].security = V2rayConfig.DEFAULT_SECURITY + vnext.users[0].alterId = alterId.toInt() + } + var fingerprint = streamSetting.tlsSettings?.fingerprint + val sni = streamSetting.populateTransportSettings(protocol, + queryParam["type"], + queryParam["host"]?.split("|")?.get(0) ?: "", + queryParam["path"]?.takeIf { it.trim() != "/" } ?: "", + queryParam["seed"], + queryParam["security"], + queryParam["key"], + queryParam["mode"], + queryParam["serviceName"], + queryParam["authority"]) + streamSetting.populateTlsSettings( + if (tls) V2rayConfig.TLS else "", + allowInsecure, + sni, + fingerprint, + null, + null, + null, + null + ) + true + }.getOrElse { false } + } + + private fun tryResolveVmess4Kitsunebi(server: String, config: ServerConfig): Boolean { + + var result = server.replace(EConfigType.VMESS.protocolScheme, "") + val indexSplit = result.indexOf("?") + if (indexSplit > 0) { + result = result.substring(0, indexSplit) + } + result = Utils.decode(result) + + val arr1 = result.split('@') + if (arr1.count() != 2) { + return false + } + val arr21 = arr1[0].split(':') + val arr22 = arr1[1].split(':') + if (arr21.count() != 2) { + return false + } + + config.remarks = "Alien" + config.outboundBean?.settings?.vnext?.get(0)?.let { vnext -> + vnext.address = arr22[0] + vnext.port = Utils.parseInt(arr22[1]) + vnext.users[0].id = arr21[1] + vnext.users[0].security = arr21[0] + vnext.users[0].alterId = 0 + } + return true + } +} \ No newline at end of file diff --git a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/WireguardFmt.kt b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/WireguardFmt.kt new file mode 100644 index 0000000000..90c1ebe9ee --- /dev/null +++ b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/util/fmt/WireguardFmt.kt @@ -0,0 +1,71 @@ +package com.v2ray.ang.util.fmt + +import com.v2ray.ang.AppConfig +import com.v2ray.ang.dto.EConfigType +import com.v2ray.ang.dto.ServerConfig +import com.v2ray.ang.extension.idnHost +import com.v2ray.ang.extension.removeWhiteSpace +import com.v2ray.ang.util.Utils +import java.net.URI + +object WireguardFmt { + fun parseWireguard(str: String): ServerConfig? { + val config = ServerConfig.create(EConfigType.WIREGUARD) + val uri = URI(Utils.fixIllegalUrl(str)) + config.remarks = Utils.urlDecode(uri.fragment ?: "") + + if (uri.rawQuery != null) { + val queryParam = uri.rawQuery.split("&") + .associate { it.split("=").let { (k, v) -> k to Utils.urlDecode(v) } } + + config.outboundBean?.settings?.let { wireguard -> + wireguard.secretKey = uri.userInfo + wireguard.address = + (queryParam["address"] + ?: AppConfig.WIREGUARD_LOCAL_ADDRESS_V4).removeWhiteSpace() + .split(",") + wireguard.peers?.get(0)?.publicKey = queryParam["publickey"] ?: "" + wireguard.peers?.get(0)?.endpoint = "${uri.idnHost}:${uri.port}" + wireguard.mtu = Utils.parseInt(queryParam["mtu"] ?: AppConfig.WIREGUARD_LOCAL_MTU) + wireguard.reserved = + (queryParam["reserved"] ?: "0,0,0").removeWhiteSpace().split(",") + .map { it.toInt() } + } + } + + return config + } + + fun toUri(config: ServerConfig): String { + val outbound = config.getProxyOutbound() ?: return "" + + val remark = "#" + Utils.urlEncode(config.remarks) + val dicQuery = HashMap() + dicQuery["publickey"] = + Utils.urlEncode(outbound.settings?.peers?.get(0)?.publicKey.toString()) + if (outbound.settings?.reserved != null) { + dicQuery["reserved"] = Utils.urlEncode( + Utils.removeWhiteSpace(outbound.settings?.reserved?.joinToString()) + .toString() + ) + } + dicQuery["address"] = Utils.urlEncode( + Utils.removeWhiteSpace((outbound.settings?.address as List<*>).joinToString()) + .toString() + ) + if (outbound.settings?.mtu != null) { + dicQuery["mtu"] = outbound.settings?.mtu.toString() + } + val query = "?" + dicQuery.toList().joinToString( + separator = "&", + transform = { it.first + "=" + it.second }) + + val url = String.format( + "%s@%s:%s", + Utils.urlEncode(outbound.getPassword().toString()), + Utils.getIpv6Address(outbound.getServerAddress()!!), + outbound.getServerPort() + ) + return url + query + remark + } +} \ No newline at end of file diff --git a/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/viewmodel/SubViewModel.kt b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/viewmodel/SubViewModel.kt new file mode 100644 index 0000000000..b2f5cd42d8 --- /dev/null +++ b/v2rayng/V2rayNG/app/src/main/kotlin/com/v2ray/ang/viewmodel/SubViewModel.kt @@ -0,0 +1,93 @@ +package com.v2ray.ang.viewmodel + +import android.app.Application +import android.text.TextUtils +import android.util.Log +import androidx.lifecycle.AndroidViewModel +import com.tencent.mmkv.MMKV +import com.v2ray.ang.AppConfig +import com.v2ray.ang.dto.SubscriptionItem +import com.v2ray.ang.util.AngConfigManager +import com.v2ray.ang.util.MmkvManager +import com.v2ray.ang.util.Utils +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers + +class SubViewModel(application: Application) : AndroidViewModel(application) { + private val settingsStorage by lazy { + MMKV.mmkvWithID( + MmkvManager.ID_SETTING, + MMKV.MULTI_PROCESS_MODE + ) + } + + private val tcpingTestScope by lazy { CoroutineScope(Dispatchers.IO) } + + fun updateConfigViaSubAll(): Int { + var count = 0 + try { + MmkvManager.decodeSubscriptions().forEach { + count += updateConfigViaSub(it) + } + } catch (e: Exception) { + e.printStackTrace() + return 0 + } + return count + } + + fun updateConfigViaSub(it: Pair): Int { + try { + if (TextUtils.isEmpty(it.first) + || TextUtils.isEmpty(it.second.remarks) + || TextUtils.isEmpty(it.second.url) + ) { + return 0 + } + if (!it.second.enabled) { + return 0 + } + val url = Utils.idnToASCII(it.second.url) + if (!Utils.isValidUrl(url)) { + return 0 + } + Log.d(AppConfig.ANG_PACKAGE, url) + var configText = try { + Utils.getUrlContentWithCustomUserAgent(url) + } catch (e: Exception) { + e.printStackTrace() + "" + } + if (configText.isEmpty()) { + configText = try { + val httpPort = Utils.parseInt( + settingsStorage?.decodeString(AppConfig.PREF_HTTP_PORT), + AppConfig.PORT_HTTP.toInt() + ) + Utils.getUrlContentWithCustomUserAgent(url, httpPort) + } catch (e: Exception) { + e.printStackTrace() + "" + } + } + if (configText.isEmpty()) { + return 0 + } + return importBatchConfig(configText, it.first, false) + } catch (e: Exception) { + e.printStackTrace() + return 0 + } + } + + fun importBatchConfig(server: String?, subid: String = "", append: Boolean): Int { + var count = AngConfigManager.importBatchConfig(server, subid, append) + if (count <= 0) { + count = AngConfigManager.importBatchConfig(Utils.decode(server!!), subid, append) + } + if (count <= 0) { + count = AngConfigManager.appendCustomConfigServer(server, subid) + } + return count + } +} diff --git a/v2rayng/V2rayNG/app/src/main/res/layout/layout_progress.xml b/v2rayng/V2rayNG/app/src/main/res/layout/layout_progress.xml new file mode 100644 index 0000000000..78ff04c7c0 --- /dev/null +++ b/v2rayng/V2rayNG/app/src/main/res/layout/layout_progress.xml @@ -0,0 +1,16 @@ + + + + + + + \ No newline at end of file diff --git a/v2rayng/V2rayNG/app/src/main/res/menu/action_sub_setting.xml b/v2rayng/V2rayNG/app/src/main/res/menu/action_sub_setting.xml index 9477de42bb..3179cb1a5e 100644 --- a/v2rayng/V2rayNG/app/src/main/res/menu/action_sub_setting.xml +++ b/v2rayng/V2rayNG/app/src/main/res/menu/action_sub_setting.xml @@ -7,13 +7,8 @@ android:title="@string/menu_item_add_config" app:showAsAction="always" /> - \ No newline at end of file diff --git a/v2rayng/V2rayNG/app/src/main/res/values-ru/strings.xml b/v2rayng/V2rayNG/app/src/main/res/values-ru/strings.xml index 1602aabab1..c2b561ba0a 100644 --- a/v2rayng/V2rayNG/app/src/main/res/values-ru/strings.xml +++ b/v2rayng/V2rayNG/app/src/main/res/values-ru/strings.xml @@ -149,8 +149,8 @@ Внутренняя DNS (необязательно) DNS - True delay test url (http/https) - Url + Сервис проверки времени отклика (HTTP/HTTPS) + URL Разрешать подключения из LAN Другие устройства могут подключаться, используя ваш IP-адрес, чтобы использовать прокси по протоколам SOCKS/HTTP. Используйте только в доверенной сети, чтобы избежать несанкционированного подключения. diff --git a/yass/.github/workflows/compiler.yml b/yass/.github/workflows/compiler.yml index 9ee577a0d2..effe72a704 100644 --- a/yass/.github/workflows/compiler.yml +++ b/yass/.github/workflows/compiler.yml @@ -225,7 +225,7 @@ jobs: rm -f *.tar.gz - name: Build run: | - ./tools/build --variant gui --arch ${{ matrix.arch }} --system mingw -build-test -use-tcmalloc=false \ + ./tools/build --variant gui --arch ${{ matrix.arch }} --system mingw -build-test \ --clang-path $PWD/third_party/llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64 -no-packaging - name: Populate depedencies (Tests-i686) if: ${{ matrix.arch == 'i686' }} diff --git a/yass/.gitmodules b/yass/.gitmodules index d74d6d9024..899116124d 100644 --- a/yass/.gitmodules +++ b/yass/.gitmodules @@ -65,3 +65,6 @@ [submodule "third_party/gperftools"] path = third_party/gperftools url = https://github.com/Chilledheart/gperftools +[submodule "third_party/mimalloc"] + path = third_party/mimalloc + url = https://github.com/Chilledheart/mimalloc diff --git a/yass/CMakeLists.txt b/yass/CMakeLists.txt index aff9be8588..287b47a6ca 100644 --- a/yass/CMakeLists.txt +++ b/yass/CMakeLists.txt @@ -434,11 +434,21 @@ option(BUILD_SHARED_LIBS "Build with shared libraries." OFF) option(BUILD_TESTS "Build with test." OFF) option(OPTIMIZED_PROTOC "Force protobuf compiler to be built with optimization" OFF) -option(USE_TCMALLOC "Build with tcmalloc" OFF) +cmake_dependent_option( + USE_TCMALLOC "Build with tcmalloc" OFF + "LINUX" OFF) cmake_dependent_option( USE_SYSTEM_TCMALLOC "Build with system or vendored tcmalloc" OFF USE_TCMALLOC OFF) +cmake_dependent_option( + USE_MIMALLOC "Use mimalloc" OFF + "NOT USE_TCMALLOC; NOT WIN32; NOT IOS; NOT ANDROID; NOT OHOS" OFF) + +cmake_dependent_option( + USE_SYSTEM_MIMALLOC "Use system or vendored mimalloc" OFF + USE_MIMALLOC OFF) + option(USE_LIBCXX "Build with libc++" ON) option(USE_NGHTTP2 "Build with libnghttp2" ON) @@ -1908,6 +1918,9 @@ endif() if (NOT WIN32 AND NOT APPLE) check_library_exists(c pipe2 "" HAVE_PIPE2) check_library_exists(c dup3 "" HAVE_DUP3) + # for android mallinfo2 is alias to mallinfo + check_symbol_exists(mallinfo malloc.h HAVE_MALLINFO) + check_symbol_exists(mallinfo2 malloc.h HAVE_MALLINFO2) endif() if (HAVE_PIPE2) add_definitions(-DHAVE_PIPE2) @@ -1915,6 +1928,12 @@ endif() if (HAVE_DUP3) add_definitions(-DHAVE_DUP3) endif() +if (HAVE_MALLINFO) + add_definitions(-DHAVE_MALLINFO) +endif() +if (HAVE_MALLINFO2) + add_definitions(-DHAVE_MALLINFO2) +endif() # ***************************************************************************************** # Core Support Libraries @@ -2320,24 +2339,10 @@ set(SUPPORT_LIBS # only support platform: linux # mac: __thread limitation # + if (USE_TCMALLOC AND NOT USE_SYSTEM_TCMALLOC) - if (WIN32 OR CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") - if (NOT (OS_X86 OR OS_X64 OR OS_ARM OR OS_AARCH64 OR OS_MIPS OR OS_MIPS64 OR OS_RISCV64 OR OS_LOONGARCH64)) - message(WARNING "tcmalloc: arch ${CMAKE_SYSTEM_PROCESSOR} is not supported, disabling...") - set(USE_TCMALLOC OFF) - endif() - # FIXME tcmalloc's preamble patcher doesn't support arm64 assembly - if (WIN32 AND OS_AARCH64) - message(WARNING "tcmalloc: arm64 windows is not supported, disabling...") - set(USE_TCMALLOC OFF) - endif() - # FIXME windows binaries doesn't pass UtilsTest.GetHomeDir for both mingw and msvc builds - if (WIN32) - message(WARNING "tcmalloc: mingw and msvc builds are not supported, disabling...") - set(USE_TCMALLOC OFF) - endif() - else() - message(WARNING "tcmalloc: only Linux and FreeBSD are supported, disabling...") + if (NOT (OS_X86 OR OS_X64 OR OS_ARM OR OS_AARCH64 OR OS_MIPS OR OS_MIPS64 OR OS_RISCV64 OR OS_LOONGARCH64)) + message(WARNING "tcmalloc: arch ${CMAKE_SYSTEM_PROCESSOR} is not supported, disabling...") set(USE_TCMALLOC OFF) endif() endif() @@ -2389,6 +2394,7 @@ elseif (USE_TCMALLOC) ${SUPPORT_DEFINITIONS} ) set(SUPPORT_INCLUDE_DIRS + ${CMAKE_CURRENT_BINARY_DIR}/third_party/gperftools third_party/gperftools/src ${SUPPORT_INCLUDE_DIRS} ) @@ -2406,6 +2412,69 @@ elseif (USE_TCMALLOC) endif() endif() +# ***************************************************************************************** +# mimalloc Library +# ***************************************************************************************** + +if (USE_SYSTEM_MIMALLOC) + find_package(PkgConfig) + if (PKG_CONFIG_FOUND) + pkg_check_modules(MIMALLOC mimalloc) + endif() + if(NOT MIMALLOC_FOUND) + message(STATUS "System mimalloc not found, using bundled one") + set(USE_SYSTEM_MIMALLOC FALSE) + endif() + if (USE_LIBCXX) + message(STATUS "Cannot use system mimalloc with custom libc++, using bundled one") + set(USE_SYSTEM_MIMALLOC FALSE) + endif() +endif() + +# By default, we build a bundled mimalloc and statically-link it to +# mold. If you want to dynamically link to the system's +# libmimalloc.so, pass -DUSE_SYSTEM_MIMALLOC=ON. +if (USE_MIMALLOC AND USE_SYSTEM_MIMALLOC) + message(STATUS "Compiling with system tcmalloc support") + list(APPEND YASS_APP_FEATURES "system mimalloc") + + set(MIMALLOC_LIB mimalloc) + + set(SUPPORT_DEFINITIONS + HAVE_MIMALLOC + ${SUPPORT_DEFINITIONS} + ) + set(SUPPORT_LIBS + ${SUPPORT_LIBS} + ${MIMALLOC_LIB} + ) + +elseif(USE_MIMALLOC) + message(STATUS "Compiling with bundled mimalloc support") + + set(MI_BUILD_STATIC ON CACHE INTERNAL "") + set(MI_BUILD_SHARED OFF CACHE INTERNAL "") + set(MI_BUILD_OBJECT OFF CACHE INTERNAL "") + set(MI_BUILD_TESTS OFF CACHE INTERNAL "") + set(MI_ENABLE_INSTALL OFF CACHE INTERNAL "") + set(MI_LIBC_MUSL "${USE_MUSL}" CACHE INTERNAL "") + add_subdirectory(third_party/mimalloc EXCLUDE_FROM_ALL) + target_compile_definitions(mimalloc-static PRIVATE MI_USE_ENVIRON=0) + + set(MIMALLOC_LIB mimalloc-static) + + set(SUPPORT_DEFINITIONS + HAVE_MIMALLOC + ${SUPPORT_DEFINITIONS} + ) + set(SUPPORT_LIBS + ${SUPPORT_LIBS} + ${MIMALLOC_LIB} + ) + + list(APPEND YASS_APP_FEATURES "mimalloc") +endif() + # ***************************************************************************************** # re2 Library # ***************************************************************************************** @@ -2657,9 +2726,6 @@ foreach(source ${BCM_ASM_SOURCES}) set_source_files_properties(${source} PROPERTIES GENERATED true) endforeach() -set_target_properties(bssl PROPERTIES EXCLUDE_FROM_ALL TRUE) -set_target_properties(decrepit PROPERTIES EXCLUDE_FROM_ALL TRUE) - if (BORINGSSL_BUILD_TESTS) add_custom_target(check_boringssl COMMAND ${GO_EXECUTABLE} run util/all_tests.go -build-dir @@ -3716,6 +3782,10 @@ if (USE_TCMALLOC) target_link_libraries(yass_core PUBLIC ${TCMALLOC_LIB}) endif() +if (USE_MIMALLOC) + target_link_libraries(yass_core PUBLIC ${MIMALLOC_LIB}) +endif() + target_include_directories(yass_core PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/third_party/googleurl-override ${CMAKE_CURRENT_SOURCE_DIR}/third_party/googleurl/polyfills @@ -4640,6 +4710,7 @@ if(NOT CMAKE_SKIP_INSTALL_RULES) file(READ third_party/gperftools/COPYING _TCMALLOC_LICENSE) file(READ third_party/tun2proxy/LICENSE _TUN2PROXY_LICENSE) file(READ third_party/zlib/LICENSE _ZLIB_LICENSE) + file(READ third_party/mimalloc/LICENSE _MIMALLOC_LICENSE) set(LICENSE_FILE "${CMAKE_CURRENT_BINARY_DIR}/LICENSE") configure_file("LICENSE.cmake.in" "${LICENSE_FILE}" @ONLY) @@ -4662,6 +4733,7 @@ if (BUILD_TESTS) src/net/cipher_test.cpp src/net/c-ares_test.cpp src/net/padding_test.cpp + src/net/dns_addrinfo_helper_test.cpp src/net/dns_message_test.cpp src/net/doh_resolver_test.cpp src/net/dot_resolver_test.cpp diff --git a/yass/LICENSE.cmake.in b/yass/LICENSE.cmake.in index e1e78d6a96..38b8e88857 100644 --- a/yass/LICENSE.cmake.in +++ b/yass/LICENSE.cmake.in @@ -79,3 +79,7 @@ === zlib == @_ZLIB_LICENSE@ + +=== mimalloc == + +@_MIMALLOC_LICENSE@ diff --git a/yass/src/cli/cli.cpp b/yass/src/cli/cli.cpp index a64071ae68..8fa6499cf8 100644 --- a/yass/src/cli/cli.cpp +++ b/yass/src/cli/cli.cpp @@ -178,14 +178,14 @@ int main(int argc, const char* argv[]) { #ifdef SIGQUIT signals.add(SIGQUIT, ec); #endif -#if defined(HAVE_TCMALLOC) && defined(SIGUSR1) +#if defined(SIGUSR1) signals.add(SIGUSR1, ec); #endif std::function cb; cb = [&](asio::error_code /*ec*/, int signal_number) { -#if defined(HAVE_TCMALLOC) && defined(SIGUSR1) +#if defined(SIGUSR1) if (signal_number == SIGUSR1) { - PrintTcmallocStats(); + PrintMallocStats(); signals.async_wait(cb); return; } diff --git a/yass/src/config/config_version.cpp b/yass/src/config/config_version.cpp index 9d68a76571..389a23dd8f 100644 --- a/yass/src/config/config_version.cpp +++ b/yass/src/config/config_version.cpp @@ -15,6 +15,14 @@ #include "feature.h" #include "version.h" +#ifdef HAVE_TCMALLOC +#include +#endif + +#ifdef HAVE_MIMALLOC +#include +#endif + namespace config { static void ParseConfigFileOption(int argc, const char** argv) { @@ -59,6 +67,12 @@ static void ParseConfigFileOption(int argc, const char** argv) { std::cout << absl::flags_internal::ShortProgramInvocationName() << " " << YASS_APP_TAG << std::endl; std::cout << "Last Change: " << YASS_APP_LAST_CHANGE << std::endl; std::cout << "Features: " << YASS_APP_FEATURES << std::endl; +#ifdef HAVE_TCMALLOC + std::cerr << "TCMALLOC: " << tc_version(nullptr, nullptr, nullptr) << std::endl; +#endif +#ifdef HAVE_MIMALLOC + std::cerr << "MIMALLOC: " << mi_version() << std::endl; +#endif #ifndef NDEBUG std::cout << "Debug build (NDEBUG not #defined)" << std::endl; #endif @@ -72,6 +86,12 @@ static void ParseConfigFileOption(int argc, const char** argv) { std::cerr << "Application starting: " << YASS_APP_TAG << std::endl; std::cerr << "Last Change: " << YASS_APP_LAST_CHANGE << std::endl; std::cerr << "Features: " << YASS_APP_FEATURES << std::endl; +#ifdef HAVE_TCMALLOC + std::cerr << "TCMALLOC: " << tc_version(nullptr, nullptr, nullptr) << std::endl; +#endif +#ifdef HAVE_MIMALLOC + std::cerr << "MIMALLOC: " << mi_version() << std::endl; +#endif #ifndef NDEBUG std::cerr << "Debug build (NDEBUG not #defined)\n" << std::endl; #endif @@ -96,6 +116,12 @@ void ReadConfigFileAndArguments(int argc, const char** argv) { LOG(WARNING) << "Application starting: " << YASS_APP_TAG << " type: " << ProgramTypeToStr(pType); LOG(WARNING) << "Last Change: " << YASS_APP_LAST_CHANGE; LOG(WARNING) << "Features: " << YASS_APP_FEATURES; +#ifdef HAVE_TCMALLOC + LOG(WARNING) << "TCMALLOC: " << tc_version(nullptr, nullptr, nullptr); +#endif +#ifdef HAVE_MIMALLOC + LOG(WARNING) << "MIMALLOC: " << mi_version(); +#endif #ifndef NDEBUG LOG(WARNING) << "Debug build (NDEBUG not #defined)\n"; #endif diff --git a/yass/src/core/utils.cpp b/yass/src/core/utils.cpp index 04ef400400..7e06af615e 100644 --- a/yass/src/core/utils.cpp +++ b/yass/src/core/utils.cpp @@ -18,6 +18,14 @@ #include #endif +#ifdef HAVE_MIMALLOC +#include +#endif + +#if defined(HAVE_MALLINFO) || defined(HAVE_MALLINFO2) || BUILDFLAG(IS_FREEBSD) +#include +#endif + #include #include #include @@ -385,8 +393,8 @@ bool IsProgramConsole(int fd) { } #endif +void PrintMallocStats() { #ifdef HAVE_TCMALLOC -void PrintTcmallocStats() { constexpr const char* properties[] = { "generic.current_allocated_bytes", "generic.heap_size", @@ -402,8 +410,39 @@ void PrintTcmallocStats() { LOG(ERROR) << "TCMALLOC: " << property << " = " << size << " bytes"; } } -} +#elif defined(HAVE_MIMALLOC) + auto printer = [](const char* msg, void* arg) { LOG(ERROR) << "MIMALLOC: " << msg; }; + mi_stats_print_out(printer, nullptr); +#elif defined(HAVE_MALLINFO2) && !defined(MEMORY_SANITIZER) + struct mallinfo2 info = mallinfo2(); + LOG(ERROR) << "MALLOC: non-mmapped space allocated from system: " << info.arena; + LOG(ERROR) << "MALLOC: number of free chunks: " << info.ordblks; + LOG(ERROR) << "MALLOC: number of fastbin blocks: " << info.smblks; + LOG(ERROR) << "MALLOC: number of mmapped regions: " << info.hblks; + LOG(ERROR) << "MALLOC: space in mmapped regions: " << info.hblkhd; + LOG(ERROR) << "MALLOC: space available in freed fastbin blocks: " << info.fsmblks; + LOG(ERROR) << "MALLOC: total allocated space: " << info.uordblks; + LOG(ERROR) << "MALLOC: total free space: " << info.fordblks; + LOG(ERROR) << "MALLOC: top-most, releasable (via malloc_trim) space: " << info.keepcost; +#elif defined(HAVE_MALLINFO) && !defined(MEMORY_SANITIZER) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + struct mallinfo info = mallinfo(); +#pragma GCC diagnostic pop + LOG(ERROR) << "MALLOC: non-mmapped space allocated from system: " << info.arena; + LOG(ERROR) << "MALLOC: number of free chunks: " << info.ordblks; + LOG(ERROR) << "MALLOC: number of fastbin blocks: " << info.smblks; + LOG(ERROR) << "MALLOC: number of mmapped regions: " << info.hblks; + LOG(ERROR) << "MALLOC: space in mmapped regions: " << info.hblkhd; + LOG(ERROR) << "MALLOC: space available in freed fastbin blocks: " << info.fsmblks; + LOG(ERROR) << "MALLOC: total allocated space: " << info.uordblks; + LOG(ERROR) << "MALLOC: total free space: " << info.fordblks; + LOG(ERROR) << "MALLOC: top-most, releasable (via malloc_trim) space: " << info.keepcost; +#elif BUILDFLAG(IS_FREEBSD) + auto printer = [](void* data, const char* msg) { LOG(ERROR) << "MALLOC: " << msg; }; + malloc_stats_print(printer, nullptr, nullptr); #endif +} template static void HumanReadableByteCountBinT(T* ss, uint64_t bytes) { diff --git a/yass/src/core/utils.hpp b/yass/src/core/utils.hpp index 1c7b4c7d26..8a13a690b1 100644 --- a/yass/src/core/utils.hpp +++ b/yass/src/core/utils.hpp @@ -212,9 +212,7 @@ PlatformFile OpenReadFile(const std::string& path); PlatformFile OpenReadFile(const std::wstring& path); #endif -#ifdef HAVE_TCMALLOC -void PrintTcmallocStats(); -#endif +void PrintMallocStats(); #ifdef __APPLE__ #if BUILDFLAG(IS_IOS) diff --git a/yass/src/net/dns_addrinfo_helper.cpp b/yass/src/net/dns_addrinfo_helper.cpp index a1055b9681..f32854129d 100644 --- a/yass/src/net/dns_addrinfo_helper.cpp +++ b/yass/src/net/dns_addrinfo_helper.cpp @@ -20,22 +20,17 @@ namespace net { using namespace dns_message; +using namespace std::string_view_literals; + /* RFC6761 6.3 says : The domain "localhost." and any names falling within ".localhost." */ -bool is_localhost(const std::string& host) { +bool is_localhost(std::string_view host) { if (host.empty()) { return false; } - if (host == "localhost") { + if (host == "localhost"sv) { return true; } - constexpr const char suffix[] = ".localhost"; - constexpr const int suffixLength = std::size(suffix) - 1; - static_assert(suffixLength == 10); - if (host.size() < suffixLength) { - return false; - } - - return 0 == host.compare(host.size() - suffixLength, suffixLength, suffix, suffixLength); + return host.ends_with(".localhost"sv); } // TODO more strictly we should load loopback address from system first diff --git a/yass/src/net/dns_addrinfo_helper.hpp b/yass/src/net/dns_addrinfo_helper.hpp index 85e523da29..ec913874e1 100644 --- a/yass/src/net/dns_addrinfo_helper.hpp +++ b/yass/src/net/dns_addrinfo_helper.hpp @@ -11,7 +11,7 @@ extern "C" struct addrinfo; namespace net { -bool is_localhost(const std::string& host); +bool is_localhost(std::string_view host); struct addrinfo* addrinfo_loopback(bool is_ipv6, int port); struct addrinfo* addrinfo_dup(bool is_ipv6, const net::dns_message::response& response, int port); void addrinfo_freedup(struct addrinfo* addrinfo); diff --git a/yass/src/net/dns_addrinfo_helper_test.cpp b/yass/src/net/dns_addrinfo_helper_test.cpp new file mode 100644 index 0000000000..02a4ad4961 --- /dev/null +++ b/yass/src/net/dns_addrinfo_helper_test.cpp @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Chilledheart */ + +#include + +#include "net/dns_addrinfo_helper.hpp" + +using namespace net; + +TEST(DnsAddrInfoHelper, IsLocalhost) { + EXPECT_TRUE(is_localhost("localhost")); + EXPECT_FALSE(is_localhost("badlocalhost")); + EXPECT_FALSE(is_localhost("localhostbad")); + EXPECT_TRUE(is_localhost(".localhost")); + EXPECT_TRUE(is_localhost("good.localhost")); + EXPECT_FALSE(is_localhost(".localhostbad")); + EXPECT_FALSE(is_localhost(".badlocalhost")); + EXPECT_FALSE(is_localhost(".bad")); +} + +TEST(DnsAddrInfoHelper, LoopbackIpv4) { + struct addrinfo* addr = addrinfo_loopback(false, 80); + EXPECT_EQ(AF_INET, addr->ai_family); + ASSERT_EQ(sizeof(struct sockaddr_in), (size_t)addr->ai_addrlen); + EXPECT_EQ(nullptr, addr->ai_canonname); + EXPECT_EQ(nullptr, addr->ai_next); + struct sockaddr_in* in = (struct sockaddr_in*)addr->ai_addr; + EXPECT_EQ(AF_INET, in->sin_family); + EXPECT_EQ(htons(80), in->sin_port); + asio::ip::address_v4::bytes_type addrv; + static_assert(sizeof(struct in_addr) == sizeof(asio::ip::address_v4::bytes_type)); + memcpy(&addrv, &in->sin_addr, sizeof(asio::ip::address_v4::bytes_type)); + EXPECT_TRUE(asio::ip::make_address_v4(addrv).is_loopback()); + addrinfo_freedup(addr); +} + +TEST(DnsAddrInfoHelper, LoopbackIpv6) { + struct addrinfo* addr = addrinfo_loopback(true, 80); + EXPECT_EQ(AF_INET6, addr->ai_family); + ASSERT_EQ(sizeof(struct sockaddr_in6), (size_t)addr->ai_addrlen); + EXPECT_EQ(nullptr, addr->ai_canonname); + EXPECT_EQ(nullptr, addr->ai_next); + struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr->ai_addr; + EXPECT_EQ(AF_INET6, in6->sin6_family); + EXPECT_EQ(htons(80), in6->sin6_port); + asio::ip::address_v6::bytes_type addrv6; + static_assert(sizeof(struct in6_addr) == sizeof(asio::ip::address_v6::bytes_type)); + memcpy(&addrv6, &in6->sin6_addr, sizeof(asio::ip::address_v6::bytes_type)); + EXPECT_TRUE(asio::ip::make_address_v6(addrv6).is_loopback()); + addrinfo_freedup(addr); +} diff --git a/yass/src/net/doh_request.cpp b/yass/src/net/doh_request.cpp index 617b352423..6a404e4860 100644 --- a/yass/src/net/doh_request.cpp +++ b/yass/src/net/doh_request.cpp @@ -39,7 +39,7 @@ void DoHRequest::DoRequest(dns_message::DNStype dns_type, const std::string& hos cb_ = std::move(cb); if (is_localhost(host_)) { - VLOG(3) << "DoH Request: is_localhost host: " << host_; + VLOG(3) << "DoH Request: localhost host: " << host_; scoped_refptr self(this); asio::post(io_context_, [this, self]() { struct addrinfo* addrinfo = addrinfo_loopback(dns_type_ == dns_message::DNS_TYPE_AAAA, port_); diff --git a/yass/src/net/dot_request.cpp b/yass/src/net/dot_request.cpp index 6fc9cc9b72..e697c3812f 100644 --- a/yass/src/net/dot_request.cpp +++ b/yass/src/net/dot_request.cpp @@ -38,7 +38,7 @@ void DoTRequest::DoRequest(dns_message::DNStype dns_type, const std::string& hos cb_ = std::move(cb); if (is_localhost(host_)) { - VLOG(3) << "DoT Request: is_localhost host: " << host_; + VLOG(3) << "DoT Request: localhost host: " << host_; scoped_refptr self(this); asio::post(io_context_, [this, self]() { struct addrinfo* addrinfo = addrinfo_loopback(dns_type_ == dns_message::DNS_TYPE_AAAA, port_); diff --git a/yass/src/server/server.cpp b/yass/src/server/server.cpp index ee8cb007cc..14197c5188 100644 --- a/yass/src/server/server.cpp +++ b/yass/src/server/server.cpp @@ -148,14 +148,14 @@ int main(int argc, const char* argv[]) { #ifdef SIGQUIT signals.add(SIGQUIT, ec); #endif -#if defined(HAVE_TCMALLOC) && defined(SIGUSR1) +#if defined(SIGUSR1) signals.add(SIGUSR1, ec); #endif std::function cb; cb = [&](asio::error_code /*ec*/, int signal_number) { -#if defined(HAVE_TCMALLOC) && defined(SIGUSR1) +#if defined(SIGUSR1) if (signal_number == SIGUSR1) { - PrintTcmallocStats(); + PrintMallocStats(); signals.async_wait(cb); return; } diff --git a/yass/src/ss_test.cpp b/yass/src/ss_test.cpp index 2c9a673cd1..079d74fd94 100644 --- a/yass/src/ss_test.cpp +++ b/yass/src/ss_test.cpp @@ -800,9 +800,7 @@ int main(int argc, char** argv) { int ret = RUN_ALL_TESTS(); -#ifdef HAVE_TCMALLOC - PrintTcmallocStats(); -#endif + PrintMallocStats(); #ifdef HAVE_CURL curl_global_cleanup(); diff --git a/yass/third_party/benchmark-winxp-fix.patch b/yass/third_party/benchmark-winxp-fix.patch index e28a84c80a..ab508e231d 100644 --- a/yass/third_party/benchmark-winxp-fix.patch +++ b/yass/third_party/benchmark-winxp-fix.patch @@ -1,21 +1,15 @@ -From f9dae25d4721574d55a75e79c29615f3407717a2 Mon Sep 17 00:00:00 2001 -From: Chilledheart -Date: Fri, 21 Jul 2023 16:00:26 +0800 -Subject: [PATCH] allow windows xp build - ---- - src/sysinfo.cc | 8 +++++--- - src/timers.cc | 2 +- - 2 files changed, 6 insertions(+), 4 deletions(-) - diff --git a/src/sysinfo.cc b/src/sysinfo.cc -index 4578cb0..3ad29eb 100644 +index 7261e2a..962ad00 100644 --- a/src/sysinfo.cc +++ b/src/sysinfo.cc -@@ -15,12 +15,15 @@ +@@ -15,16 +15,15 @@ #include "internal_macros.h" #ifdef BENCHMARK_OS_WINDOWS +-#if !defined(WINVER) || WINVER < 0x0600 +-#undef WINVER +-#define WINVER 0x0600 +-#endif // WINVER handling +struct IUnknown; #include #undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA @@ -29,7 +23,7 @@ index 4578cb0..3ad29eb 100644 #else #include #if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) -@@ -733,8 +736,7 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { +@@ -748,8 +747,7 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { // In NT, read MHz from the registry. If we fail to do so or we're in win9x // then make a crude estimate. DWORD data, data_size = sizeof(data); @@ -40,7 +34,7 @@ index 4578cb0..3ad29eb 100644 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", "~MHz", nullptr, &data, &data_size))) diff --git a/src/timers.cc b/src/timers.cc -index b23feea..a871002 100644 +index d0821f3..3390e38 100644 --- a/src/timers.cc +++ b/src/timers.cc @@ -17,9 +17,9 @@ @@ -54,6 +48,3 @@ index b23feea..a871002 100644 #include #else #include --- -2.40.1.windows.1 - diff --git a/yass/third_party/benchmark/.clang-tidy b/yass/third_party/benchmark/.clang-tidy index 56938a598d..1e229e582e 100644 --- a/yass/third_party/benchmark/.clang-tidy +++ b/yass/third_party/benchmark/.clang-tidy @@ -2,6 +2,5 @@ Checks: 'clang-analyzer-*,readability-redundant-*,performance-*' WarningsAsErrors: 'clang-analyzer-*,readability-redundant-*,performance-*' HeaderFilterRegex: '.*' -AnalyzeTemporaryDtors: false FormatStyle: none User: user diff --git a/yass/third_party/benchmark/.github/install_bazel.sh b/yass/third_party/benchmark/.github/install_bazel.sh index 2b1f4e726c..1b0d63c98e 100644 --- a/yass/third_party/benchmark/.github/install_bazel.sh +++ b/yass/third_party/benchmark/.github/install_bazel.sh @@ -3,11 +3,10 @@ if ! bazel version; then if [ "$arch" == "aarch64" ]; then arch="arm64" fi - echo "Installing wget and downloading $arch Bazel binary from GitHub releases." - yum install -y wget - wget "https://github.com/bazelbuild/bazel/releases/download/6.3.0/bazel-6.3.0-linux-$arch" -O /usr/local/bin/bazel - chmod +x /usr/local/bin/bazel + echo "Downloading $arch Bazel binary from GitHub releases." + curl -L -o $HOME/bin/bazel --create-dirs "https://github.com/bazelbuild/bazel/releases/download/7.1.1/bazel-7.1.1-linux-$arch" + chmod +x $HOME/bin/bazel else - # bazel is installed for the correct architecture + # Bazel is installed for the correct architecture exit 0 fi diff --git a/yass/third_party/benchmark/.github/libcxx-setup.sh b/yass/third_party/benchmark/.github/libcxx-setup.sh index 8773b9c407..9aaf96af4b 100755 --- a/yass/third_party/benchmark/.github/libcxx-setup.sh +++ b/yass/third_party/benchmark/.github/libcxx-setup.sh @@ -3,7 +3,7 @@ set -e # Checkout LLVM sources -git clone --depth=1 https://github.com/llvm/llvm-project.git llvm-project +git clone --depth=1 --branch llvmorg-16.0.6 https://github.com/llvm/llvm-project.git llvm-project ## Setup libc++ options if [ -z "$BUILD_32_BITS" ]; then diff --git a/yass/third_party/benchmark/.github/workflows/bazel.yml b/yass/third_party/benchmark/.github/workflows/bazel.yml index 1cdc38c97e..a669cda84c 100644 --- a/yass/third_party/benchmark/.github/workflows/bazel.yml +++ b/yass/third_party/benchmark/.github/workflows/bazel.yml @@ -14,7 +14,7 @@ jobs: os: [ubuntu-latest, macos-latest, windows-latest] bzlmod: [false, true] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: mount bazel cache uses: actions/cache@v3 diff --git a/yass/third_party/benchmark/.github/workflows/build-and-test.yml b/yass/third_party/benchmark/.github/workflows/build-and-test.yml index b35200a000..95e0482aea 100644 --- a/yass/third_party/benchmark/.github/workflows/build-and-test.yml +++ b/yass/third_party/benchmark/.github/workflows/build-and-test.yml @@ -102,13 +102,60 @@ jobs: - name: build run: cmake --build _build/ --config ${{ matrix.build_type }} - - name: setup test environment - # Make sure gmock and benchmark DLLs can be found - run: > - echo "$((Get-Item .).FullName)/_build/bin/${{ matrix.build_type }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append; - echo "$((Get-Item .).FullName)/_build/src/${{ matrix.build_type }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append; - - name: test run: ctest --test-dir _build/ -C ${{ matrix.build_type }} -VV + msys2: + name: ${{ matrix.os }}.${{ matrix.build_type }}.${{ matrix.lib }}.${{ matrix.msys2.msystem }} + runs-on: ${{ matrix.os }} + defaults: + run: + shell: msys2 {0} + strategy: + fail-fast: false + matrix: + os: [ windows-latest ] + msys2: + - { msystem: MINGW64, arch: x86_64, family: GNU, compiler: g++ } + - { msystem: MINGW32, arch: i686, family: GNU, compiler: g++ } + - { msystem: CLANG64, arch: x86_64, family: LLVM, compiler: clang++ } + - { msystem: CLANG32, arch: i686, family: LLVM, compiler: clang++ } + - { msystem: UCRT64, arch: x86_64, family: GNU, compiler: g++ } + build_type: + - Debug + - Release + lib: + - shared + - static + steps: + - uses: actions/checkout@v2 + + - name: Install Base Dependencies + uses: msys2/setup-msys2@v2 + with: + cache: false + msystem: ${{ matrix.msys2.msystem }} + update: true + install: >- + git + base-devel + pacboy: >- + cc:p + cmake:p + ninja:p + + - name: configure cmake + env: + CXX: ${{ matrix.msys2.compiler }} + run: > + cmake -S . -B _build/ + -GNinja + -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON + -DBUILD_SHARED_LIBS=${{ matrix.lib == 'shared' }} + + - name: build + run: cmake --build _build/ --config ${{ matrix.build_type }} + + - name: test + run: ctest --test-dir _build/ -C ${{ matrix.build_type }} -VV diff --git a/yass/third_party/benchmark/.github/workflows/clang-format-lint.yml b/yass/third_party/benchmark/.github/workflows/clang-format-lint.yml index 77ce1f8cd4..328fe36cc7 100644 --- a/yass/third_party/benchmark/.github/workflows/clang-format-lint.yml +++ b/yass/third_party/benchmark/.github/workflows/clang-format-lint.yml @@ -4,7 +4,8 @@ on: pull_request: {} jobs: - build: + job: + name: check-clang-format runs-on: ubuntu-latest steps: diff --git a/yass/third_party/benchmark/.github/workflows/pre-commit.yml b/yass/third_party/benchmark/.github/workflows/pre-commit.yml new file mode 100644 index 0000000000..5d65b9948f --- /dev/null +++ b/yass/third_party/benchmark/.github/workflows/pre-commit.yml @@ -0,0 +1,38 @@ +name: python + Bazel pre-commit checks + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + pre-commit: + runs-on: ubuntu-latest + env: + MYPY_CACHE_DIR: "${{ github.workspace }}/.cache/mypy" + RUFF_CACHE_DIR: "${{ github.workspace }}/.cache/ruff" + PRE_COMMIT_HOME: "${{ github.workspace }}/.cache/pre-commit" + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.11 + cache: pip + cache-dependency-path: pyproject.toml + - name: Install dependencies + run: python -m pip install ".[dev]" + - name: Cache pre-commit tools + uses: actions/cache@v3 + with: + path: | + ${{ env.MYPY_CACHE_DIR }} + ${{ env.RUFF_CACHE_DIR }} + ${{ env.PRE_COMMIT_HOME }} + key: ${{ runner.os }}-${{ hashFiles('.pre-commit-config.yaml') }}-linter-cache + - name: Run pre-commit checks + run: pre-commit run --all-files --verbose --show-diff-on-failure diff --git a/yass/third_party/benchmark/.github/workflows/pylint.yml b/yass/third_party/benchmark/.github/workflows/pylint.yml deleted file mode 100644 index c6939b50f3..0000000000 --- a/yass/third_party/benchmark/.github/workflows/pylint.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: pylint - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - -jobs: - pylint: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - name: Set up Python 3.8 - uses: actions/setup-python@v1 - with: - python-version: 3.8 - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install pylint pylint-exit conan - - - name: Run pylint - run: | - pylint `find . -name '*.py'|xargs` || pylint-exit $? diff --git a/yass/third_party/benchmark/.github/workflows/test_bindings.yml b/yass/third_party/benchmark/.github/workflows/test_bindings.yml index e01bb7b014..436a8f90e5 100644 --- a/yass/third_party/benchmark/.github/workflows/test_bindings.yml +++ b/yass/third_party/benchmark/.github/workflows/test_bindings.yml @@ -13,17 +13,18 @@ jobs: strategy: fail-fast: false matrix: - os: [ ubuntu-latest, macos-latest, windows-2019 ] + os: [ ubuntu-latest, macos-latest, windows-latest ] steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Python 3.11 + uses: actions/setup-python@v5 with: python-version: 3.11 - - name: Install GBM Python bindings on ${{ matrix.os}} - run: - python -m pip install wheel . + - name: Install GBM Python bindings on ${{ matrix.os }} + run: python -m pip install . - name: Run bindings example on ${{ matrix.os }} run: python bindings/python/google_benchmark/example.py diff --git a/yass/third_party/benchmark/.github/workflows/wheels.yml b/yass/third_party/benchmark/.github/workflows/wheels.yml index 1f73bff4b2..8b772cd8b9 100644 --- a/yass/third_party/benchmark/.github/workflows/wheels.yml +++ b/yass/third_party/benchmark/.github/workflows/wheels.yml @@ -12,20 +12,19 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out repo - uses: actions/checkout@v3 - - - name: Install Python 3.11 - uses: actions/setup-python@v4 + uses: actions/checkout@v4 with: - python-version: 3.11 - - - name: Build and check sdist - run: | - python setup.py sdist - - name: Upload sdist - uses: actions/upload-artifact@v3 + fetch-depth: 0 + - name: Install Python 3.12 + uses: actions/setup-python@v5 with: - name: dist + python-version: 3.12 + - run: python -m pip install build + - name: Build sdist + run: python -m build --sdist + - uses: actions/upload-artifact@v4 + with: + name: dist-sdist path: dist/*.tar.gz build_wheels: @@ -33,47 +32,59 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, macos-latest, windows-2019] + os: [ubuntu-latest, macos-13, macos-14, windows-latest] steps: - name: Check out Google Benchmark - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Set up QEMU if: runner.os == 'Linux' - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 with: platforms: all - name: Build wheels on ${{ matrix.os }} using cibuildwheel - uses: pypa/cibuildwheel@v2.14.1 + uses: pypa/cibuildwheel@v2.17 env: - CIBW_BUILD: 'cp38-* cp39-* cp310-* cp311-*' + CIBW_BUILD: "cp38-* cp39-* cp310-* cp311-* cp312-*" CIBW_SKIP: "*-musllinux_*" - CIBW_TEST_SKIP: "*-macosx_arm64" - CIBW_ARCHS_LINUX: x86_64 aarch64 - CIBW_ARCHS_MACOS: x86_64 arm64 - CIBW_ARCHS_WINDOWS: AMD64 + CIBW_TEST_SKIP: "cp38-macosx_*:arm64" + CIBW_ARCHS_LINUX: auto64 aarch64 + CIBW_ARCHS_WINDOWS: auto64 CIBW_BEFORE_ALL_LINUX: bash .github/install_bazel.sh + # Grab the rootless Bazel installation inside the container. + CIBW_ENVIRONMENT_LINUX: PATH=$PATH:$HOME/bin CIBW_TEST_COMMAND: python {project}/bindings/python/google_benchmark/example.py - name: Upload Google Benchmark ${{ matrix.os }} wheels - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 + with: + name: dist-${{ matrix.os }} + path: wheelhouse/*.whl + + merge_wheels: + name: Merge all built wheels into one artifact + runs-on: ubuntu-latest + needs: build_wheels + steps: + - name: Merge wheels + uses: actions/upload-artifact/merge@v4 with: name: dist - path: ./wheelhouse/*.whl + pattern: dist-* + delete-merged: true pypi_upload: name: Publish google-benchmark wheels to PyPI - needs: [build_sdist, build_wheels] + needs: [merge_wheels] runs-on: ubuntu-latest + permissions: + id-token: write steps: - - uses: actions/download-artifact@v3 - with: - name: dist - path: dist - - - uses: pypa/gh-action-pypi-publish@v1.6.4 - with: - user: __token__ - password: ${{ secrets.PYPI_PASSWORD }} + - uses: actions/download-artifact@v4 + with: + path: dist + - uses: pypa/gh-action-pypi-publish@v1 diff --git a/yass/third_party/benchmark/.gitignore b/yass/third_party/benchmark/.gitignore index 704f56c257..24a1fb6d74 100644 --- a/yass/third_party/benchmark/.gitignore +++ b/yass/third_party/benchmark/.gitignore @@ -46,6 +46,7 @@ rules.ninja # bazel output symlinks. bazel-* +MODULE.bazel.lock # out-of-source build top-level folders. build/ diff --git a/yass/third_party/benchmark/.pre-commit-config.yaml b/yass/third_party/benchmark/.pre-commit-config.yaml new file mode 100644 index 0000000000..93455ab60d --- /dev/null +++ b/yass/third_party/benchmark/.pre-commit-config.yaml @@ -0,0 +1,18 @@ +repos: + - repo: https://github.com/keith/pre-commit-buildifier + rev: 6.4.0 + hooks: + - id: buildifier + - id: buildifier-lint + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.8.0 + hooks: + - id: mypy + types_or: [ python, pyi ] + args: [ "--ignore-missing-imports", "--scripts-are-modules" ] + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.3.1 + hooks: + - id: ruff + args: [ --fix, --exit-non-zero-on-fix ] + - id: ruff-format \ No newline at end of file diff --git a/yass/third_party/benchmark/.ycm_extra_conf.py b/yass/third_party/benchmark/.ycm_extra_conf.py index 5649ddcc74..caf257f054 100644 --- a/yass/third_party/benchmark/.ycm_extra_conf.py +++ b/yass/third_party/benchmark/.ycm_extra_conf.py @@ -1,25 +1,30 @@ import os + import ycm_core # These are the compilation flags that will be used in case there's no # compilation database set (by default, one is not set). # CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. flags = [ -'-Wall', -'-Werror', -'-pedantic-errors', -'-std=c++0x', -'-fno-strict-aliasing', -'-O3', -'-DNDEBUG', -# ...and the same thing goes for the magic -x option which specifies the -# language that the files to be compiled are written in. This is mostly -# relevant for c++ headers. -# For a C project, you would set this to 'c' instead of 'c++'. -'-x', 'c++', -'-I', 'include', -'-isystem', '/usr/include', -'-isystem', '/usr/local/include', + "-Wall", + "-Werror", + "-pedantic-errors", + "-std=c++0x", + "-fno-strict-aliasing", + "-O3", + "-DNDEBUG", + # ...and the same thing goes for the magic -x option which specifies the + # language that the files to be compiled are written in. This is mostly + # relevant for c++ headers. + # For a C project, you would set this to 'c' instead of 'c++'. + "-x", + "c++", + "-I", + "include", + "-isystem", + "/usr/include", + "-isystem", + "/usr/local/include", ] @@ -29,87 +34,87 @@ flags = [ # # Most projects will NOT need to set this to anything; you can just change the # 'flags' list of compilation flags. Notice that YCM itself uses that approach. -compilation_database_folder = '' +compilation_database_folder = "" -if os.path.exists( compilation_database_folder ): - database = ycm_core.CompilationDatabase( compilation_database_folder ) +if os.path.exists(compilation_database_folder): + database = ycm_core.CompilationDatabase(compilation_database_folder) else: - database = None + database = None + +SOURCE_EXTENSIONS = [".cc"] -SOURCE_EXTENSIONS = [ '.cc' ] def DirectoryOfThisScript(): - return os.path.dirname( os.path.abspath( __file__ ) ) + return os.path.dirname(os.path.abspath(__file__)) -def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): - if not working_directory: - return list( flags ) - new_flags = [] - make_next_absolute = False - path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] - for flag in flags: - new_flag = flag +def MakeRelativePathsInFlagsAbsolute(flags, working_directory): + if not working_directory: + return list(flags) + new_flags = [] + make_next_absolute = False + path_flags = ["-isystem", "-I", "-iquote", "--sysroot="] + for flag in flags: + new_flag = flag - if make_next_absolute: - make_next_absolute = False - if not flag.startswith( '/' ): - new_flag = os.path.join( working_directory, flag ) + if make_next_absolute: + make_next_absolute = False + if not flag.startswith("/"): + new_flag = os.path.join(working_directory, flag) - for path_flag in path_flags: - if flag == path_flag: - make_next_absolute = True - break + for path_flag in path_flags: + if flag == path_flag: + make_next_absolute = True + break - if flag.startswith( path_flag ): - path = flag[ len( path_flag ): ] - new_flag = path_flag + os.path.join( working_directory, path ) - break + if flag.startswith(path_flag): + path = flag[len(path_flag) :] + new_flag = path_flag + os.path.join(working_directory, path) + break - if new_flag: - new_flags.append( new_flag ) - return new_flags + if new_flag: + new_flags.append(new_flag) + return new_flags -def IsHeaderFile( filename ): - extension = os.path.splitext( filename )[ 1 ] - return extension in [ '.h', '.hxx', '.hpp', '.hh' ] +def IsHeaderFile(filename): + extension = os.path.splitext(filename)[1] + return extension in [".h", ".hxx", ".hpp", ".hh"] -def GetCompilationInfoForFile( filename ): - # The compilation_commands.json file generated by CMake does not have entries - # for header files. So we do our best by asking the db for flags for a - # corresponding source file, if any. If one exists, the flags for that file - # should be good enough. - if IsHeaderFile( filename ): - basename = os.path.splitext( filename )[ 0 ] - for extension in SOURCE_EXTENSIONS: - replacement_file = basename + extension - if os.path.exists( replacement_file ): - compilation_info = database.GetCompilationInfoForFile( - replacement_file ) - if compilation_info.compiler_flags_: - return compilation_info - return None - return database.GetCompilationInfoForFile( filename ) +def GetCompilationInfoForFile(filename): + # The compilation_commands.json file generated by CMake does not have entries + # for header files. So we do our best by asking the db for flags for a + # corresponding source file, if any. If one exists, the flags for that file + # should be good enough. + if IsHeaderFile(filename): + basename = os.path.splitext(filename)[0] + for extension in SOURCE_EXTENSIONS: + replacement_file = basename + extension + if os.path.exists(replacement_file): + compilation_info = database.GetCompilationInfoForFile( + replacement_file + ) + if compilation_info.compiler_flags_: + return compilation_info + return None + return database.GetCompilationInfoForFile(filename) -def FlagsForFile( filename, **kwargs ): - if database: - # Bear in mind that compilation_info.compiler_flags_ does NOT return a - # python list, but a "list-like" StringVec object - compilation_info = GetCompilationInfoForFile( filename ) - if not compilation_info: - return None +def FlagsForFile(filename, **kwargs): + if database: + # Bear in mind that compilation_info.compiler_flags_ does NOT return a + # python list, but a "list-like" StringVec object + compilation_info = GetCompilationInfoForFile(filename) + if not compilation_info: + return None - final_flags = MakeRelativePathsInFlagsAbsolute( - compilation_info.compiler_flags_, - compilation_info.compiler_working_dir_ ) - else: - relative_to = DirectoryOfThisScript() - final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) + final_flags = MakeRelativePathsInFlagsAbsolute( + compilation_info.compiler_flags_, + compilation_info.compiler_working_dir_, + ) + else: + relative_to = DirectoryOfThisScript() + final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to) - return { - 'flags': final_flags, - 'do_cache': True - } + return {"flags": final_flags, "do_cache": True} diff --git a/yass/third_party/benchmark/AUTHORS b/yass/third_party/benchmark/AUTHORS index d08c1fdb87..2170e46fd4 100644 --- a/yass/third_party/benchmark/AUTHORS +++ b/yass/third_party/benchmark/AUTHORS @@ -31,6 +31,7 @@ Evgeny Safronov Fabien Pichot Federico Ficarelli Felix Homann +Gergely Meszaros Gergő Szitár Google Inc. Henrique Bucher diff --git a/yass/third_party/benchmark/BUILD.bazel b/yass/third_party/benchmark/BUILD.bazel index 60d31d2f2e..15d836998c 100644 --- a/yass/third_party/benchmark/BUILD.bazel +++ b/yass/third_party/benchmark/BUILD.bazel @@ -1,5 +1,22 @@ licenses(["notice"]) +COPTS = [ + "-pedantic", + "-pedantic-errors", + "-std=c++11", + "-Wall", + "-Wconversion", + "-Wextra", + "-Wshadow", + # "-Wshorten-64-to-32", + "-Wfloat-equal", + "-fstrict-aliasing", + ## assert() are used a lot in tests upstream, which may be optimised out leading to + ## unused-variable warning. + "-Wno-unused-variable", + "-Werror=old-style-cast", +] + config_setting( name = "qnx", constraint_values = ["@platforms//os:qnx"], @@ -45,28 +62,35 @@ cc_library( "include/benchmark/benchmark.h", "include/benchmark/export.h", ], - linkopts = select({ - ":windows": ["-DEFAULTLIB:shlwapi.lib"], - "//conditions:default": ["-pthread"], - }), copts = select({ ":windows": [], - "//conditions:default": ["-Werror=old-style-cast"], + "//conditions:default": COPTS, }), - strip_include_prefix = "include", - visibility = ["//visibility:public"], - # Only static linking is allowed; no .so will be produced. - # Using `defines` (i.e. not `local_defines`) means that no - # dependent rules need to bother about defining the macro. - linkstatic = True, defines = [ "BENCHMARK_STATIC_DEFINE", + "BENCHMARK_VERSION=\\\"" + (module_version() if module_version() != None else "") + "\\\"", ] + select({ ":perfcounters": ["HAVE_LIBPFM"], "//conditions:default": [], }), + linkopts = select({ + ":windows": ["-DEFAULTLIB:shlwapi.lib"], + "//conditions:default": ["-pthread"], + }), + # Only static linking is allowed; no .so will be produced. + # Using `defines` (i.e. not `local_defines`) means that no + # dependent rules need to bother about defining the macro. + linkstatic = True, + local_defines = [ + # Turn on Large-file Support + "_FILE_OFFSET_BITS=64", + "_LARGEFILE64_SOURCE", + "_LARGEFILE_SOURCE", + ], + strip_include_prefix = "include", + visibility = ["//visibility:public"], deps = select({ - ":perfcounters": ["@libpfm//:libpfm"], + ":perfcounters": ["@libpfm"], "//conditions:default": [], }), ) @@ -74,7 +98,10 @@ cc_library( cc_library( name = "benchmark_main", srcs = ["src/benchmark_main.cc"], - hdrs = ["include/benchmark/benchmark.h", "include/benchmark/export.h"], + hdrs = [ + "include/benchmark/benchmark.h", + "include/benchmark/export.h", + ], strip_include_prefix = "include", visibility = ["//visibility:public"], deps = [":benchmark"], diff --git a/yass/third_party/benchmark/CMakeLists.txt b/yass/third_party/benchmark/CMakeLists.txt index ffd7deeb2f..71396edacb 100644 --- a/yass/third_party/benchmark/CMakeLists.txt +++ b/yass/third_party/benchmark/CMakeLists.txt @@ -1,7 +1,7 @@ # Require CMake 3.10. If available, use the policies up to CMake 3.22. cmake_minimum_required (VERSION 3.10...3.22) -project (benchmark VERSION 1.8.3 LANGUAGES CXX) +project (benchmark VERSION 1.8.4 LANGUAGES CXX) option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON) option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON) @@ -21,7 +21,7 @@ if(BENCHMARK_FORCE_WERROR) set(BENCHMARK_ENABLE_WERROR ON) endif(BENCHMARK_FORCE_WERROR) -if(NOT MSVC) +if(NOT (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC")) option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF) else() set(BENCHMARK_BUILD_32_BITS OFF CACHE BOOL "Build a 32 bit version of the library - unsupported when using MSVC)" FORCE) @@ -45,7 +45,7 @@ option(BENCHMARK_ENABLE_LIBPFM "Enable performance counters provided by libpfm" set(CMAKE_CXX_VISIBILITY_PRESET hidden) set(CMAKE_VISIBILITY_INLINES_HIDDEN ON) -if(MSVC) +if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # As of CMake 3.18, CMAKE_SYSTEM_PROCESSOR is not set properly for MSVC and # cross-compilation (e.g. Host=x86_64, target=aarch64) requires using the # undocumented, but working variable. @@ -66,7 +66,7 @@ function(should_enable_assembly_tests) return() endif() endif() - if (MSVC) + if (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC") return() elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") return() @@ -105,16 +105,26 @@ get_git_version(GIT_VERSION) # If no git version can be determined, use the version # from the project() command if ("${GIT_VERSION}" STREQUAL "0.0.0") - set(VERSION "${benchmark_VERSION}") + set(VERSION "v${benchmark_VERSION}") else() set(VERSION "${GIT_VERSION}") endif() + +# Normalize version: drop "v" prefix, replace first "-" with ".", +# drop everything after second "-" (including said "-"). +string(STRIP ${VERSION} VERSION) +if(VERSION MATCHES v[^-]*-) + string(REGEX REPLACE "v([^-]*)-([0-9]+)-.*" "\\1.\\2" NORMALIZED_VERSION ${VERSION}) +else() + string(REGEX REPLACE "v(.*)" "\\1" NORMALIZED_VERSION ${VERSION}) +endif() + # Tell the user what versions we are using -message(STATUS "Google Benchmark version: ${VERSION}") +message(STATUS "Google Benchmark version: ${VERSION}, normalized to ${NORMALIZED_VERSION}") # The version of the libraries -set(GENERIC_LIB_VERSION ${VERSION}) -string(SUBSTRING ${VERSION} 0 1 GENERIC_LIB_SOVERSION) +set(GENERIC_LIB_VERSION ${NORMALIZED_VERSION}) +string(SUBSTRING ${NORMALIZED_VERSION} 0 1 GENERIC_LIB_SOVERSION) # Import our CMake modules include(AddCXXCompilerFlag) @@ -128,7 +138,7 @@ if (BENCHMARK_BUILD_32_BITS) add_required_cxx_compiler_flag(-m32) endif() -if (MSVC) +if (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC") set(BENCHMARK_CXX_STANDARD 14) else() set(BENCHMARK_CXX_STANDARD 11) @@ -170,12 +180,17 @@ if (MSVC) set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} /LTCG") endif() else() + # Turn on Large-file Support + add_definitions(-D_FILE_OFFSET_BITS=64) + add_definitions(-D_LARGEFILE64_SOURCE) + add_definitions(-D_LARGEFILE_SOURCE) # Turn compiler warnings up to 11 add_cxx_compiler_flag(-Wall) add_cxx_compiler_flag(-Wextra) add_cxx_compiler_flag(-Wshadow) add_cxx_compiler_flag(-Wfloat-equal) add_cxx_compiler_flag(-Wold-style-cast) + add_cxx_compiler_flag(-Wconversion) if(BENCHMARK_ENABLE_WERROR) add_cxx_compiler_flag(-Werror) endif() @@ -312,7 +327,7 @@ find_package(Threads REQUIRED) cxx_feature_check(PTHREAD_AFFINITY) if (BENCHMARK_ENABLE_LIBPFM) - find_package(PFM) + find_package(PFM REQUIRED) endif() # Set up directories diff --git a/yass/third_party/benchmark/CONTRIBUTORS b/yass/third_party/benchmark/CONTRIBUTORS index 95bcad019b..9ca2caa3ee 100644 --- a/yass/third_party/benchmark/CONTRIBUTORS +++ b/yass/third_party/benchmark/CONTRIBUTORS @@ -51,10 +51,12 @@ Fanbo Meng Federico Ficarelli Felix Homann Geoffrey Martin-Noble +Gergely Meszaros Gergő Szitár Hannes Hauswedell Henrique Bucher Ismael Jimenez Martinez +Iakov Sergeev Jern-Kuan Leong JianXiong Zhou Joao Paulo Magalhaes diff --git a/yass/third_party/benchmark/MODULE.bazel b/yass/third_party/benchmark/MODULE.bazel index 37a5f5de5e..0624a34f01 100644 --- a/yass/third_party/benchmark/MODULE.bazel +++ b/yass/third_party/benchmark/MODULE.bazel @@ -1,11 +1,16 @@ -module(name = "google_benchmark", version="1.8.3") +module( + name = "google_benchmark", + version = "1.8.4", +) + +bazel_dep(name = "bazel_skylib", version = "1.5.0") +bazel_dep(name = "platforms", version = "0.0.8") +bazel_dep(name = "rules_foreign_cc", version = "0.10.1") +bazel_dep(name = "rules_cc", version = "0.0.9") + +bazel_dep(name = "rules_python", version = "0.31.0", dev_dependency = True) +bazel_dep(name = "googletest", version = "1.12.1", dev_dependency = True, repo_name = "com_google_googletest") -bazel_dep(name = "bazel_skylib", version = "1.4.1") -bazel_dep(name = "platforms", version = "0.0.6") -bazel_dep(name = "rules_foreign_cc", version = "0.9.0") -bazel_dep(name = "rules_cc", version = "0.0.6") -bazel_dep(name = "rules_python", version = "0.24.0", dev_dependency = True) -bazel_dep(name = "googletest", version = "1.12.1", repo_name = "com_google_googletest", dev_dependency = True) bazel_dep(name = "libpfm", version = "4.11.0") # Register a toolchain for Python 3.9 to be able to build numpy. Python @@ -14,11 +19,23 @@ bazel_dep(name = "libpfm", version = "4.11.0") # of relying on the changing default version from rules_python. python = use_extension("@rules_python//python/extensions:python.bzl", "python", dev_dependency = True) +python.toolchain(python_version = "3.8") python.toolchain(python_version = "3.9") +python.toolchain(python_version = "3.10") +python.toolchain(python_version = "3.11") +python.toolchain( + is_default = True, + python_version = "3.12", +) pip = use_extension("@rules_python//python/extensions:pip.bzl", "pip", dev_dependency = True) pip.parse( - hub_name="tools_pip_deps", + hub_name = "tools_pip_deps", python_version = "3.9", - requirements_lock="//tools:requirements.txt") + requirements_lock = "//tools:requirements.txt", +) use_repo(pip, "tools_pip_deps") + +# -- bazel_dep definitions -- # + +bazel_dep(name = "nanobind_bazel", version = "1.0.0", dev_dependency = True) diff --git a/yass/third_party/benchmark/WORKSPACE b/yass/third_party/benchmark/WORKSPACE index 833590f289..503202465e 100644 --- a/yass/third_party/benchmark/WORKSPACE +++ b/yass/third_party/benchmark/WORKSPACE @@ -8,15 +8,17 @@ load("@rules_foreign_cc//foreign_cc:repositories.bzl", "rules_foreign_cc_depende rules_foreign_cc_dependencies() -load("@rules_python//python:pip.bzl", pip3_install="pip_install") +load("@rules_python//python:repositories.bzl", "py_repositories") -pip3_install( - name = "tools_pip_deps", - requirements = "//tools:requirements.txt", +py_repositories() + +load("@rules_python//python:pip.bzl", "pip_parse") + +pip_parse( + name = "tools_pip_deps", + requirements_lock = "//tools:requirements.txt", ) -new_local_repository( - name = "python_headers", - build_file = "@//bindings/python:python_headers.BUILD", - path = "", # May be overwritten by setup.py. -) +load("@tools_pip_deps//:requirements.bzl", "install_deps") + +install_deps() diff --git a/yass/third_party/benchmark/bazel/benchmark_deps.bzl b/yass/third_party/benchmark/bazel/benchmark_deps.bzl index 667065f9b7..4fb45a538d 100644 --- a/yass/third_party/benchmark/bazel/benchmark_deps.bzl +++ b/yass/third_party/benchmark/bazel/benchmark_deps.bzl @@ -1,5 +1,9 @@ -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +""" +This file contains the Bazel build dependencies for Google Benchmark (both C++ source and Python bindings). +""" + load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository") +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") def benchmark_deps(): """Loads dependencies required to build Google Benchmark.""" @@ -7,48 +11,41 @@ def benchmark_deps(): if "bazel_skylib" not in native.existing_rules(): http_archive( name = "bazel_skylib", - sha256 = "f7be3474d42aae265405a592bb7da8e171919d74c16f082a5457840f06054728", + sha256 = "cd55a062e763b9349921f0f5db8c3933288dc8ba4f76dd9416aac68acee3cb94", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.2.1/bazel-skylib-1.2.1.tar.gz", - "https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.1/bazel-skylib-1.2.1.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz", + "https://github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz", ], ) if "rules_foreign_cc" not in native.existing_rules(): http_archive( name = "rules_foreign_cc", - sha256 = "bcd0c5f46a49b85b384906daae41d277b3dc0ff27c7c752cc51e43048a58ec83", - strip_prefix = "rules_foreign_cc-0.7.1", - url = "https://github.com/bazelbuild/rules_foreign_cc/archive/0.7.1.tar.gz", + sha256 = "476303bd0f1b04cc311fc258f1708a5f6ef82d3091e53fd1977fa20383425a6a", + strip_prefix = "rules_foreign_cc-0.10.1", + url = "https://github.com/bazelbuild/rules_foreign_cc/releases/download/0.10.1/rules_foreign_cc-0.10.1.tar.gz", ) if "rules_python" not in native.existing_rules(): http_archive( name = "rules_python", - url = "https://github.com/bazelbuild/rules_python/releases/download/0.1.0/rules_python-0.1.0.tar.gz", - sha256 = "b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0", - ) - - if "com_google_absl" not in native.existing_rules(): - http_archive( - name = "com_google_absl", - sha256 = "f41868f7a938605c92936230081175d1eae87f6ea2c248f41077c8f88316f111", - strip_prefix = "abseil-cpp-20200225.2", - urls = ["https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"], + sha256 = "e85ae30de33625a63eca7fc40a94fea845e641888e52f32b6beea91e8b1b2793", + strip_prefix = "rules_python-0.27.1", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.27.1/rules_python-0.27.1.tar.gz", ) if "com_google_googletest" not in native.existing_rules(): new_git_repository( name = "com_google_googletest", remote = "https://github.com/google/googletest.git", - tag = "release-1.11.0", + tag = "release-1.12.1", ) if "nanobind" not in native.existing_rules(): new_git_repository( name = "nanobind", remote = "https://github.com/wjakob/nanobind.git", - tag = "v1.4.0", + tag = "v1.8.0", build_file = "@//bindings/python:nanobind.BUILD", recursive_init_submodules = True, ) diff --git a/yass/third_party/benchmark/bindings/python/BUILD b/yass/third_party/benchmark/bindings/python/BUILD deleted file mode 100644 index 9559a76b30..0000000000 --- a/yass/third_party/benchmark/bindings/python/BUILD +++ /dev/null @@ -1,3 +0,0 @@ -exports_files(glob(["*.BUILD"])) -exports_files(["build_defs.bzl"]) - diff --git a/yass/third_party/benchmark/bindings/python/build_defs.bzl b/yass/third_party/benchmark/bindings/python/build_defs.bzl deleted file mode 100644 index 009820afd0..0000000000 --- a/yass/third_party/benchmark/bindings/python/build_defs.bzl +++ /dev/null @@ -1,25 +0,0 @@ -_SHARED_LIB_SUFFIX = { - "//conditions:default": ".so", - "//:windows": ".dll", -} - -def py_extension(name, srcs, hdrs = [], copts = [], features = [], deps = []): - for shared_lib_suffix in _SHARED_LIB_SUFFIX.values(): - shared_lib_name = name + shared_lib_suffix - native.cc_binary( - name = shared_lib_name, - linkshared = True, - linkstatic = True, - srcs = srcs + hdrs, - copts = copts, - features = features, - deps = deps, - ) - - return native.py_library( - name = name, - data = select({ - platform: [name + shared_lib_suffix] - for platform, shared_lib_suffix in _SHARED_LIB_SUFFIX.items() - }), - ) diff --git a/yass/third_party/benchmark/bindings/python/google_benchmark/BUILD b/yass/third_party/benchmark/bindings/python/google_benchmark/BUILD index 89ec76e0d5..0c8e3c103f 100644 --- a/yass/third_party/benchmark/bindings/python/google_benchmark/BUILD +++ b/yass/third_party/benchmark/bindings/python/google_benchmark/BUILD @@ -1,4 +1,4 @@ -load("//bindings/python:build_defs.bzl", "py_extension") +load("@nanobind_bazel//:build_defs.bzl", "nanobind_extension") py_library( name = "google_benchmark", @@ -9,22 +9,10 @@ py_library( ], ) -py_extension( +nanobind_extension( name = "_benchmark", srcs = ["benchmark.cc"], - copts = [ - "-fexceptions", - "-fno-strict-aliasing", - ], - features = [ - "-use_header_modules", - "-parse_headers", - ], - deps = [ - "//:benchmark", - "@nanobind", - "@python_headers", - ], + deps = ["//:benchmark"], ) py_test( @@ -37,4 +25,3 @@ py_test( ":google_benchmark", ], ) - diff --git a/yass/third_party/benchmark/bindings/python/google_benchmark/__init__.py b/yass/third_party/benchmark/bindings/python/google_benchmark/__init__.py index 642d78a7f4..c1393b4e58 100644 --- a/yass/third_party/benchmark/bindings/python/google_benchmark/__init__.py +++ b/yass/third_party/benchmark/bindings/python/google_benchmark/__init__.py @@ -26,50 +26,30 @@ Example usage: if __name__ == '__main__': benchmark.main() """ + import atexit from absl import app + from google_benchmark import _benchmark from google_benchmark._benchmark import ( - Counter, - kNanosecond, - kMicrosecond, - kMillisecond, - kSecond, - oNone, - o1, - oN, - oNSquared, - oNCubed, - oLogN, - oNLogN, - oAuto, - oLambda, - State, + Counter as Counter, + State as State, + kMicrosecond as kMicrosecond, + kMillisecond as kMillisecond, + kNanosecond as kNanosecond, + kSecond as kSecond, + o1 as o1, + oAuto as oAuto, + oLambda as oLambda, + oLogN as oLogN, + oN as oN, + oNCubed as oNCubed, + oNLogN as oNLogN, + oNone as oNone, + oNSquared as oNSquared, ) - - -__all__ = [ - "register", - "main", - "Counter", - "kNanosecond", - "kMicrosecond", - "kMillisecond", - "kSecond", - "oNone", - "o1", - "oN", - "oNSquared", - "oNCubed", - "oLogN", - "oNLogN", - "oAuto", - "oLambda", - "State", -] - -__version__ = "1.8.3" +from google_benchmark.version import __version__ as __version__ class __OptionMaker: @@ -97,7 +77,6 @@ class __OptionMaker: # The function that get returned on @option.range(start=0, limit=1<<5). def __builder_method(*args, **kwargs): - # The decorator that get called, either with the benchmared function # or the previous Options def __decorator(func_or_options): diff --git a/yass/third_party/benchmark/bindings/python/google_benchmark/example.py b/yass/third_party/benchmark/bindings/python/google_benchmark/example.py index d95a0438d6..b5b2f88ff3 100644 --- a/yass/third_party/benchmark/bindings/python/google_benchmark/example.py +++ b/yass/third_party/benchmark/bindings/python/google_benchmark/example.py @@ -38,6 +38,7 @@ def sum_million(state): while state: sum(range(1_000_000)) + @benchmark.register def pause_timing(state): """Pause timing every iteration.""" @@ -85,7 +86,9 @@ def custom_counters(state): # Set a counter as a rate. state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate) # Set a counter as an inverse of rate. - state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert) + state.counters["foo_inv_rate"] = Counter( + num_foo, Counter.kIsRate | Counter.kInvert + ) # Set a counter as a thread-average quantity. state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads) # There's also a combined flag: diff --git a/yass/third_party/benchmark/bindings/python/google_benchmark/version.py b/yass/third_party/benchmark/bindings/python/google_benchmark/version.py new file mode 100644 index 0000000000..a324693e2d --- /dev/null +++ b/yass/third_party/benchmark/bindings/python/google_benchmark/version.py @@ -0,0 +1,7 @@ +from importlib.metadata import PackageNotFoundError, version + +try: + __version__ = version("google-benchmark") +except PackageNotFoundError: + # package is not installed + pass diff --git a/yass/third_party/benchmark/bindings/python/nanobind.BUILD b/yass/third_party/benchmark/bindings/python/nanobind.BUILD deleted file mode 100644 index cd9faf99bb..0000000000 --- a/yass/third_party/benchmark/bindings/python/nanobind.BUILD +++ /dev/null @@ -1,17 +0,0 @@ -cc_library( - name = "nanobind", - srcs = glob([ - "src/*.cpp" - ]), - copts = ["-fexceptions"], - includes = ["include", "ext/robin_map/include"], - textual_hdrs = glob( - [ - "include/**/*.h", - "src/*.h", - "ext/robin_map/include/tsl/*.h", - ], - ), - deps = ["@python_headers"], - visibility = ["//visibility:public"], -) diff --git a/yass/third_party/benchmark/bindings/python/python_headers.BUILD b/yass/third_party/benchmark/bindings/python/python_headers.BUILD deleted file mode 100644 index 9c34cf6ca4..0000000000 --- a/yass/third_party/benchmark/bindings/python/python_headers.BUILD +++ /dev/null @@ -1,6 +0,0 @@ -cc_library( - name = "python_headers", - hdrs = glob(["**/*.h"]), - includes = ["."], - visibility = ["//visibility:public"], -) diff --git a/yass/third_party/benchmark/cmake/GetGitVersion.cmake b/yass/third_party/benchmark/cmake/GetGitVersion.cmake index 04a1f9b70d..b0210103b2 100644 --- a/yass/third_party/benchmark/cmake/GetGitVersion.cmake +++ b/yass/third_party/benchmark/cmake/GetGitVersion.cmake @@ -20,38 +20,16 @@ set(__get_git_version INCLUDED) function(get_git_version var) if(GIT_EXECUTABLE) - execute_process(COMMAND ${GIT_EXECUTABLE} describe --tags --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 + execute_process(COMMAND ${GIT_EXECUTABLE} describe --tags --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 --dirty WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} RESULT_VARIABLE status - OUTPUT_VARIABLE GIT_DESCRIBE_VERSION + OUTPUT_VARIABLE GIT_VERSION ERROR_QUIET) if(status) - set(GIT_DESCRIBE_VERSION "v0.0.0") + set(GIT_VERSION "v0.0.0") endif() - - string(STRIP ${GIT_DESCRIBE_VERSION} GIT_DESCRIBE_VERSION) - if(GIT_DESCRIBE_VERSION MATCHES v[^-]*-) - string(REGEX REPLACE "v([^-]*)-([0-9]+)-.*" "\\1.\\2" GIT_VERSION ${GIT_DESCRIBE_VERSION}) - else() - string(REGEX REPLACE "v(.*)" "\\1" GIT_VERSION ${GIT_DESCRIBE_VERSION}) - endif() - - # Work out if the repository is dirty - execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - OUTPUT_QUIET - ERROR_QUIET) - execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD -- - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - OUTPUT_VARIABLE GIT_DIFF_INDEX - ERROR_QUIET) - string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY) - if (${GIT_DIRTY}) - set(GIT_DESCRIBE_VERSION "${GIT_DESCRIBE_VERSION}-dirty") - endif() - message(STATUS "git version: ${GIT_DESCRIBE_VERSION} normalized to ${GIT_VERSION}") else() - set(GIT_VERSION "0.0.0") + set(GIT_VERSION "v0.0.0") endif() set(${var} ${GIT_VERSION} PARENT_SCOPE) diff --git a/yass/third_party/benchmark/cmake/benchmark_main.pc.in b/yass/third_party/benchmark/cmake/benchmark_main.pc.in new file mode 100644 index 0000000000..a90f3cd060 --- /dev/null +++ b/yass/third_party/benchmark/cmake/benchmark_main.pc.in @@ -0,0 +1,7 @@ +libdir=@CMAKE_INSTALL_FULL_LIBDIR@ + +Name: @PROJECT_NAME@ +Description: Google microbenchmark framework (with main() function) +Version: @VERSION@ +Requires: benchmark +Libs: -L${libdir} -lbenchmark_main diff --git a/yass/third_party/benchmark/docs/python_bindings.md b/yass/third_party/benchmark/docs/python_bindings.md index 6a7aab0a29..d9c5d2d3f6 100644 --- a/yass/third_party/benchmark/docs/python_bindings.md +++ b/yass/third_party/benchmark/docs/python_bindings.md @@ -3,7 +3,7 @@ Python bindings are available as wheels on [PyPI](https://pypi.org/project/google-benchmark/) for importing and using Google Benchmark directly in Python. Currently, pre-built wheels exist for macOS (both ARM64 and Intel x86), Linux x86-64 and 64-bit Windows. -Supported Python versions are Python 3.7 - 3.10. +Supported Python versions are Python 3.8 - 3.12. To install Google Benchmark's Python bindings, run: @@ -25,9 +25,9 @@ python3 -m venv venv --system-site-packages source venv/bin/activate # .\venv\Scripts\Activate.ps1 on Windows # upgrade Python's system-wide packages -python -m pip install --upgrade pip setuptools wheel -# builds the wheel and stores it in the directory "wheelhouse". -python -m pip wheel . -w wheelhouse +python -m pip install --upgrade pip build +# builds the wheel and stores it in the directory "dist". +python -m build ``` NB: Building wheels from source requires Bazel. For platform-specific instructions on how to install Bazel, diff --git a/yass/third_party/benchmark/docs/reducing_variance.md b/yass/third_party/benchmark/docs/reducing_variance.md index e566ab9852..105f96e769 100644 --- a/yass/third_party/benchmark/docs/reducing_variance.md +++ b/yass/third_party/benchmark/docs/reducing_variance.md @@ -14,8 +14,6 @@ you might want to disable the CPU frequency scaling while running the benchmark, as well as consider other ways to stabilize the performance of your system while benchmarking. -See [Reducing Variance](reducing_variance.md) for more information. - Exactly how to do this depends on the Linux distribution, desktop environment, and installed programs. Specific details are a moving target, so we will not attempt to exhaustively document them here. @@ -67,7 +65,7 @@ program. Reducing sources of variance is OS and architecture dependent, which is one reason some companies maintain machines dedicated to performance testing. -Some of the easier and and effective ways of reducing variance on a typical +Some of the easier and effective ways of reducing variance on a typical Linux workstation are: 1. Use the performance governor as [discussed @@ -89,7 +87,7 @@ above](user_guide#disabling-cpu-frequency-scaling). 4. Close other programs that do non-trivial things based on timers, such as your web browser, desktop environment, etc. 5. Reduce the working set of your benchmark to fit within the L1 cache, but - do be aware that this may lead you to optimize for an unrelistic + do be aware that this may lead you to optimize for an unrealistic situation. Further resources on this topic: diff --git a/yass/third_party/benchmark/docs/releasing.md b/yass/third_party/benchmark/docs/releasing.md index cdf415997a..09bf93764d 100644 --- a/yass/third_party/benchmark/docs/releasing.md +++ b/yass/third_party/benchmark/docs/releasing.md @@ -8,9 +8,8 @@ * `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of commits between the last annotated tag and HEAD * Pick the most interesting. -* Create one last commit that updates the version saved in `CMakeLists.txt`, `MODULE.bazel` - and the `__version__` variable in `bindings/python/google_benchmark/__init__.py`to the - release version you're creating. (This version will be used if benchmark is installed +* Create one last commit that updates the version saved in `CMakeLists.txt` and `MODULE.bazel` + to the release version you're creating. (This version will be used if benchmark is installed from the archive you'll be creating in the next step.) ``` @@ -21,16 +20,6 @@ project (benchmark VERSION 1.8.0 LANGUAGES CXX) module(name = "com_github_google_benchmark", version="1.8.0") ``` -```python -# bindings/python/google_benchmark/__init__.py - -# ... - -__version__ = "1.8.0" # <-- change this to the release version you are creating - -# ... -``` - * Create a release through github's interface * Note this will create a lightweight tag. * Update this to an annotated tag: @@ -38,4 +27,5 @@ __version__ = "1.8.0" # <-- change this to the release version you are creating * `git tag -a -f ` * `git push --force --tags origin` * Confirm that the "Build and upload Python wheels" action runs to completion - * run it manually if it hasn't run + * Run it manually if it hasn't run. + * IMPORTANT: When re-running manually, make sure to select the newly created `` as the workflow version in the "Run workflow" tab on the GitHub Actions page. diff --git a/yass/third_party/benchmark/docs/user_guide.md b/yass/third_party/benchmark/docs/user_guide.md index 2ceb13eb59..d22a906909 100644 --- a/yass/third_party/benchmark/docs/user_guide.md +++ b/yass/third_party/benchmark/docs/user_guide.md @@ -28,6 +28,8 @@ [Templated Benchmarks](#templated-benchmarks) +[Templated Benchmarks that take arguments](#templated-benchmarks-with-arguments) + [Fixtures](#fixtures) [Custom Counters](#custom-counters) @@ -574,6 +576,30 @@ Three macros are provided for adding benchmark templates. #define BENCHMARK_TEMPLATE2(func, arg1, arg2) ``` + + +## Templated Benchmarks that take arguments + +Sometimes there is a need to template benchmarks, and provide arguments to them. + +```c++ +template void BM_Sequential_With_Step(benchmark::State& state, int step) { + Q q; + typename Q::value_type v; + for (auto _ : state) { + for (int i = state.range(0); i-=step; ) + q.push(v); + for (int e = state.range(0); e-=step; ) + q.Wait(&v); + } + // actually messages, not bytes: + state.SetBytesProcessed( + static_cast(state.iterations())*state.range(0)); +} + +BENCHMARK_TEMPLATE1_CAPTURE(BM_Sequential, WaitQueue, Step1, 1)->Range(1<<0, 1<<10); +``` + ## Fixtures @@ -591,10 +617,10 @@ For Example: ```c++ class MyFixture : public benchmark::Fixture { public: - void SetUp(const ::benchmark::State& state) { + void SetUp(::benchmark::State& state) { } - void TearDown(const ::benchmark::State& state) { + void TearDown(::benchmark::State& state) { } }; diff --git a/yass/third_party/benchmark/include/benchmark/benchmark.h b/yass/third_party/benchmark/include/benchmark/benchmark.h index e3857e717f..08cfe29da3 100644 --- a/yass/third_party/benchmark/include/benchmark/benchmark.h +++ b/yass/third_party/benchmark/include/benchmark/benchmark.h @@ -302,6 +302,9 @@ class BenchmarkReporter; // Default number of minimum benchmark running time in seconds. const char kDefaultMinTimeStr[] = "0.5s"; +// Returns the version of the library. +BENCHMARK_EXPORT std::string GetBenchmarkVersion(); + BENCHMARK_EXPORT void PrintDefaultHelp(); BENCHMARK_EXPORT void Initialize(int* argc, char** argv, @@ -341,7 +344,7 @@ BENCHMARK_EXPORT BenchmarkReporter* CreateDefaultDisplayReporter(); // The second and third overload use the specified 'display_reporter' and // 'file_reporter' respectively. 'file_reporter' will write to the file // specified -// by '--benchmark_output'. If '--benchmark_output' is not given the +// by '--benchmark_out'. If '--benchmark_out' is not given the // 'file_reporter' is ignored. // // RETURNS: The number of matching benchmarks. @@ -584,6 +587,12 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); } #endif #else +#ifdef BENCHMARK_HAS_CXX11 +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp&& value) { + internal::UseCharPointer(&reinterpret_cast(value)); +} +#else template BENCHMARK_DEPRECATED_MSG( "The const-ref version of this method can permit " @@ -591,6 +600,12 @@ BENCHMARK_DEPRECATED_MSG( inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { internal::UseCharPointer(&reinterpret_cast(value)); } + +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { + internal::UseCharPointer(&reinterpret_cast(value)); +} +#endif // FIXME Add ClobberMemory() for non-gnu and non-msvc compilers, before C++11. #endif @@ -660,13 +675,15 @@ typedef std::map UserCounters; // calculated automatically to the best fit. enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda }; +typedef int64_t ComplexityN; + typedef int64_t IterationCount; enum StatisticUnit { kTime, kPercentage }; // BigOFunc is passed to a benchmark in order to specify the asymptotic // computational complexity for the benchmark. -typedef double(BigOFunc)(IterationCount); +typedef double(BigOFunc)(ComplexityN); // StatisticsFunc is passed to a benchmark in order to compute some descriptive // statistics over all the measurements of some type @@ -734,13 +751,13 @@ class BENCHMARK_EXPORT State { // have been called previously. // // NOTE: KeepRunning may not be used after calling either of these functions. - BENCHMARK_ALWAYS_INLINE StateIterator begin(); - BENCHMARK_ALWAYS_INLINE StateIterator end(); + inline BENCHMARK_ALWAYS_INLINE StateIterator begin(); + inline BENCHMARK_ALWAYS_INLINE StateIterator end(); // Returns true if the benchmark should continue through another iteration. // NOTE: A benchmark may not return from the test until KeepRunning() has // returned false. - bool KeepRunning(); + inline bool KeepRunning(); // Returns true iff the benchmark should run n more iterations. // REQUIRES: 'n' > 0. @@ -752,7 +769,7 @@ class BENCHMARK_EXPORT State { // while (state.KeepRunningBatch(1000)) { // // process 1000 elements // } - bool KeepRunningBatch(IterationCount n); + inline bool KeepRunningBatch(IterationCount n); // REQUIRES: timer is running and 'SkipWithMessage(...)' or // 'SkipWithError(...)' has not been called by the current thread. @@ -863,10 +880,12 @@ class BENCHMARK_EXPORT State { // and complexity_n will // represent the length of N. BENCHMARK_ALWAYS_INLINE - void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; } + void SetComplexityN(ComplexityN complexity_n) { + complexity_n_ = complexity_n; + } BENCHMARK_ALWAYS_INLINE - int64_t complexity_length_n() const { return complexity_n_; } + ComplexityN complexity_length_n() const { return complexity_n_; } // If this routine is called with items > 0, then an items/s // label is printed on the benchmark report line for the currently @@ -955,7 +974,7 @@ class BENCHMARK_EXPORT State { // items we don't need on the first cache line std::vector range_; - int64_t complexity_n_; + ComplexityN complexity_n_; public: // Container for user-defined counters. @@ -970,7 +989,7 @@ class BENCHMARK_EXPORT State { void StartKeepRunning(); // Implementation of KeepRunning() and KeepRunningBatch(). // is_batch must be true unless n is 1. - bool KeepRunningInternal(IterationCount n, bool is_batch); + inline bool KeepRunningInternal(IterationCount n, bool is_batch); void FinishKeepRunning(); const std::string name_; @@ -1504,7 +1523,7 @@ class Fixture : public internal::Benchmark { // /* Registers a benchmark named "BM_takes_args/int_string_test` */ // BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); #define BENCHMARK_CAPTURE(func, test_case_name, ...) \ - BENCHMARK_PRIVATE_DECLARE(func) = \ + BENCHMARK_PRIVATE_DECLARE(_benchmark_) = \ (::benchmark::internal::RegisterBenchmarkInternal( \ new ::benchmark::internal::FunctionBenchmark( \ #func "/" #test_case_name, \ @@ -1541,6 +1560,31 @@ class Fixture : public internal::Benchmark { #define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a) #endif +#ifdef BENCHMARK_HAS_CXX11 +// This will register a benchmark for a templatized function, +// with the additional arguments specified by `...`. +// +// For example: +// +// template ` +// void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { +// [...] +//} +// /* Registers a benchmark named "BM_takes_args/int_string_test` */ +// BENCHMARK_TEMPLATE1_CAPTURE(BM_takes_args, void, int_string_test, 42, +// std::string("abc")); +#define BENCHMARK_TEMPLATE1_CAPTURE(func, a, test_case_name, ...) \ + BENCHMARK_CAPTURE(func, test_case_name, __VA_ARGS__) + +#define BENCHMARK_TEMPLATE2_CAPTURE(func, a, b, test_case_name, ...) \ + BENCHMARK_PRIVATE_DECLARE(func) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark( \ + #func "<" #a "," #b ">" \ + "/" #test_case_name, \ + [](::benchmark::State& st) { func(st, __VA_ARGS__); }))) +#endif // BENCHMARK_HAS_CXX11 + #define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ class BaseClass##_##Method##_Benchmark : public BaseClass { \ public: \ @@ -1748,6 +1792,7 @@ class BENCHMARK_EXPORT BenchmarkReporter { real_accumulated_time(0), cpu_accumulated_time(0), max_heapbytes_used(0), + use_real_time_for_initial_big_o(false), complexity(oNone), complexity_lambda(), complexity_n(0), @@ -1790,10 +1835,14 @@ class BENCHMARK_EXPORT BenchmarkReporter { // This is set to 0.0 if memory tracing is not enabled. double max_heapbytes_used; + // By default Big-O is computed for CPU time, but that is not what you want + // to happen when manual time was requested, which is stored as real time. + bool use_real_time_for_initial_big_o; + // Keep track of arguments to compute asymptotic complexity BigO complexity; BigOFunc* complexity_lambda; - int64_t complexity_n; + ComplexityN complexity_n; // what statistics to compute from the measurements const std::vector* statistics; diff --git a/yass/third_party/benchmark/pyproject.toml b/yass/third_party/benchmark/pyproject.toml index fe8770bc78..62507a8703 100644 --- a/yass/third_party/benchmark/pyproject.toml +++ b/yass/third_party/benchmark/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools", "wheel"] +requires = ["setuptools", "setuptools-scm[toml]", "wheel"] build-backend = "setuptools.build_meta" [project] @@ -22,6 +22,7 @@ classifiers = [ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Topic :: Software Development :: Testing", "Topic :: System :: Benchmark", ] @@ -32,6 +33,11 @@ dependencies = [ "absl-py>=0.7.1", ] +[project.optional-dependencies] +dev = [ + "pre-commit>=3.3.3", +] + [project.urls] Homepage = "https://github.com/google/benchmark" Documentation = "https://github.com/google/benchmark/tree/main/docs" @@ -46,5 +52,35 @@ zip-safe = false where = ["bindings/python"] [tool.setuptools.dynamic] -version = { attr = "google_benchmark.__version__" } readme = { file = "README.md", content-type = "text/markdown" } + +[tool.setuptools_scm] + +[tool.mypy] +check_untyped_defs = true +disallow_incomplete_defs = true +pretty = true +python_version = "3.11" +strict_optional = false +warn_unreachable = true + +[[tool.mypy.overrides]] +module = ["yaml"] +ignore_missing_imports = true + +[tool.ruff] +# explicitly tell ruff the source directory to correctly identify first-party package. +src = ["bindings/python"] + +line-length = 80 +target-version = "py311" + +[tool.ruff.lint] +# Enable pycodestyle (`E`, `W`), Pyflakes (`F`), and isort (`I`) codes by default. +select = ["E", "F", "I", "W"] +ignore = [ + "E501", # line too long +] + +[tool.ruff.lint.isort] +combine-as-imports = true diff --git a/yass/third_party/benchmark/setup.py b/yass/third_party/benchmark/setup.py index b02a6a7012..40cdc8d339 100644 --- a/yass/third_party/benchmark/setup.py +++ b/yass/third_party/benchmark/setup.py @@ -1,46 +1,70 @@ import contextlib import os import platform +import re import shutil -import sysconfig from pathlib import Path +from typing import Any, Generator import setuptools from setuptools.command import build_ext - -PYTHON_INCLUDE_PATH_PLACEHOLDER = "" - IS_WINDOWS = platform.system() == "Windows" IS_MAC = platform.system() == "Darwin" +IS_LINUX = platform.system() == "Linux" + +# hardcoded SABI-related options. Requires that each Python interpreter +# (hermetic or not) participating is of the same major-minor version. +version_tuple = tuple(int(i) for i in platform.python_version_tuple()) +py_limited_api = version_tuple >= (3, 12) +options = {"bdist_wheel": {"py_limited_api": "cp312"}} if py_limited_api else {} + + +def is_cibuildwheel() -> bool: + return os.getenv("CIBUILDWHEEL") is not None @contextlib.contextmanager -def temp_fill_include_path(fp: str): - """Temporarily set the Python include path in a file.""" - with open(fp, "r+") as f: - try: - content = f.read() - replaced = content.replace( - PYTHON_INCLUDE_PATH_PLACEHOLDER, - Path(sysconfig.get_paths()['include']).as_posix(), +def _maybe_patch_toolchains() -> Generator[None, None, None]: + """ + Patch rules_python toolchains to ignore root user error + when run in a Docker container on Linux in cibuildwheel. + """ + + def fmt_toolchain_args(matchobj): + suffix = "ignore_root_user_error = True" + callargs = matchobj.group(1) + # toolchain def is broken over multiple lines + if callargs.endswith("\n"): + callargs = callargs + " " + suffix + ",\n" + # toolchain def is on one line. + else: + callargs = callargs + ", " + suffix + return "python.toolchain(" + callargs + ")" + + CIBW_LINUX = is_cibuildwheel() and IS_LINUX + try: + if CIBW_LINUX: + module_bazel = Path("MODULE.bazel") + content: str = module_bazel.read_text() + module_bazel.write_text( + re.sub( + r"python.toolchain\(([\w\"\s,.=]*)\)", + fmt_toolchain_args, + content, + ) ) - f.seek(0) - f.write(replaced) - f.truncate() - yield - finally: - # revert to the original content after exit - f.seek(0) - f.write(content) - f.truncate() + yield + finally: + if CIBW_LINUX: + module_bazel.write_text(content) class BazelExtension(setuptools.Extension): """A C/C++ extension that is defined as a Bazel BUILD target.""" - def __init__(self, name: str, bazel_target: str): - super().__init__(name=name, sources=[]) + def __init__(self, name: str, bazel_target: str, **kwargs: Any): + super().__init__(name=name, sources=[], **kwargs) self.bazel_target = bazel_target stripped_target = bazel_target.split("//")[-1] @@ -53,53 +77,62 @@ class BuildBazelExtension(build_ext.build_ext): def run(self): for ext in self.extensions: self.bazel_build(ext) - build_ext.build_ext.run(self) + super().run() + # explicitly call `bazel shutdown` for graceful exit + self.spawn(["bazel", "shutdown"]) - def bazel_build(self, ext: BazelExtension): + def copy_extensions_to_source(self): + """ + Copy generated extensions into the source tree. + This is done in the ``bazel_build`` method, so it's not necessary to + do again in the `build_ext` base class. + """ + pass + + def bazel_build(self, ext: BazelExtension) -> None: """Runs the bazel build to create the package.""" - with temp_fill_include_path("WORKSPACE"): - temp_path = Path(self.build_temp) + temp_path = Path(self.build_temp) + # omit the patch version to avoid build errors if the toolchain is not + # yet registered in the current @rules_python version. + # patch version differences should be fine. + python_version = ".".join(platform.python_version_tuple()[:2]) - bazel_argv = [ - "bazel", - "build", - ext.bazel_target, - f"--symlink_prefix={temp_path / 'bazel-'}", - f"--compilation_mode={'dbg' if self.debug else 'opt'}", - # C++17 is required by nanobind - f"--cxxopt={'/std:c++17' if IS_WINDOWS else '-std=c++17'}", - ] + bazel_argv = [ + "bazel", + "build", + ext.bazel_target, + f"--symlink_prefix={temp_path / 'bazel-'}", + f"--compilation_mode={'dbg' if self.debug else 'opt'}", + # C++17 is required by nanobind + f"--cxxopt={'/std:c++17' if IS_WINDOWS else '-std=c++17'}", + f"--@rules_python//python/config_settings:python_version={python_version}", + ] - if IS_WINDOWS: - # Link with python*.lib. - for library_dir in self.library_dirs: - bazel_argv.append("--linkopt=/LIBPATH:" + library_dir) - elif IS_MAC: - if platform.machine() == "x86_64": - # C++17 needs macOS 10.14 at minimum - bazel_argv.append("--macos_minimum_os=10.14") + if ext.py_limited_api: + bazel_argv += ["--@nanobind_bazel//:py-limited-api=cp312"] - # cross-compilation for Mac ARM64 on GitHub Mac x86 runners. - # ARCHFLAGS is set by cibuildwheel before macOS wheel builds. - archflags = os.getenv("ARCHFLAGS", "") - if "arm64" in archflags: - bazel_argv.append("--cpu=darwin_arm64") - bazel_argv.append("--macos_cpus=arm64") - - elif platform.machine() == "arm64": - bazel_argv.append("--macos_minimum_os=11.0") + if IS_WINDOWS: + # Link with python*.lib. + for library_dir in self.library_dirs: + bazel_argv.append("--linkopt=/LIBPATH:" + library_dir) + elif IS_MAC: + # C++17 needs macOS 10.14 at minimum + bazel_argv.append("--macos_minimum_os=10.14") + with _maybe_patch_toolchains(): self.spawn(bazel_argv) - shared_lib_suffix = '.dll' if IS_WINDOWS else '.so' - ext_name = ext.target_name + shared_lib_suffix - ext_bazel_bin_path = temp_path / 'bazel-bin' / ext.relpath / ext_name + if IS_WINDOWS: + suffix = ".pyd" + else: + suffix = ".abi3.so" if ext.py_limited_api else ".so" - ext_dest_path = Path(self.get_ext_fullpath(ext.name)) - shutil.copyfile(ext_bazel_bin_path, ext_dest_path) - - # explicitly call `bazel shutdown` for graceful exit - self.spawn(["bazel", "shutdown"]) + ext_name = ext.target_name + suffix + ext_bazel_bin_path = temp_path / "bazel-bin" / ext.relpath / ext_name + ext_dest_path = Path(self.get_ext_fullpath(ext.name)).with_name( + ext_name + ) + shutil.copyfile(ext_bazel_bin_path, ext_dest_path) setuptools.setup( @@ -108,6 +141,8 @@ setuptools.setup( BazelExtension( name="google_benchmark._benchmark", bazel_target="//bindings/python/google_benchmark:_benchmark", + py_limited_api=py_limited_api, ) ], + options=options, ) diff --git a/yass/third_party/benchmark/src/CMakeLists.txt b/yass/third_party/benchmark/src/CMakeLists.txt index daf82fb131..5551099b2a 100644 --- a/yass/third_party/benchmark/src/CMakeLists.txt +++ b/yass/third_party/benchmark/src/CMakeLists.txt @@ -28,6 +28,13 @@ target_include_directories(benchmark PUBLIC $ ) +set_property( + SOURCE benchmark.cc + APPEND + PROPERTY COMPILE_DEFINITIONS + BENCHMARK_VERSION="${VERSION}" +) + # libpfm, if available if (PFM_FOUND) target_link_libraries(benchmark PRIVATE PFM::libpfm) @@ -79,6 +86,7 @@ set(generated_dir "${PROJECT_BINARY_DIR}") set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc") +set(pkg_config_main "${generated_dir}/${PROJECT_NAME}_main.pc") set(targets_to_export benchmark benchmark_main) set(targets_export_name "${PROJECT_NAME}Targets") @@ -98,6 +106,7 @@ write_basic_package_version_file( ) configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY) +configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark_main.pc.in" "${pkg_config_main}" @ONLY) export ( TARGETS ${targets_to_export} @@ -126,7 +135,7 @@ if (BENCHMARK_ENABLE_INSTALL) DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") install( - FILES "${pkg_config}" + FILES "${pkg_config}" "${pkg_config_main}" DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") install( diff --git a/yass/third_party/benchmark/src/benchmark.cc b/yass/third_party/benchmark/src/benchmark.cc index 6139e59d05..337bb3faa7 100644 --- a/yass/third_party/benchmark/src/benchmark.cc +++ b/yass/third_party/benchmark/src/benchmark.cc @@ -152,8 +152,16 @@ BENCHMARK_EXPORT std::map*& GetGlobalContext() { return global_context; } -// FIXME: wouldn't LTO mess this up? -void UseCharPointer(char const volatile*) {} +static void const volatile* volatile global_force_escape_pointer; + +// FIXME: Verify if LTO still messes this up? +void UseCharPointer(char const volatile* const v) { + // We want to escape the pointer `v` so that the compiler can not eliminate + // computations that produced it. To do that, we escape the pointer by storing + // it into a volatile variable, since generally, volatile store, is not + // something the compiler is allowed to elide. + global_force_escape_pointer = reinterpret_cast(v); +} } // namespace internal @@ -399,7 +407,8 @@ void RunBenchmarks(const std::vector& benchmarks, benchmarks_with_threads += (benchmark.threads() > 1); runners.emplace_back(benchmark, &perfcounters, reports_for_family); int num_repeats_of_this_instance = runners.back().GetNumRepeats(); - num_repetitions_total += num_repeats_of_this_instance; + num_repetitions_total += + static_cast(num_repeats_of_this_instance); if (reports_for_family) reports_for_family->num_runs_total += num_repeats_of_this_instance; } @@ -577,12 +586,16 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, Err << "A custom file reporter was provided but " "--benchmark_out= was not specified." << std::endl; + Out.flush(); + Err.flush(); std::exit(1); } if (!fname.empty()) { output_file.open(fname); if (!output_file.is_open()) { Err << "invalid file name: '" << fname << "'" << std::endl; + Out.flush(); + Err.flush(); std::exit(1); } if (!file_reporter) { @@ -597,10 +610,16 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, } std::vector benchmarks; - if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0; + if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) { + Out.flush(); + Err.flush(); + return 0; + } if (benchmarks.empty()) { Err << "Failed to match any benchmarks against regex: " << spec << "\n"; + Out.flush(); + Err.flush(); return 0; } @@ -611,6 +630,8 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, internal::RunBenchmarks(benchmarks, display_reporter, file_reporter); } + Out.flush(); + Err.flush(); return benchmarks.size(); } @@ -736,6 +757,14 @@ int InitializeStreams() { } // end namespace internal +std::string GetBenchmarkVersion() { +#ifdef BENCHMARK_VERSION + return {BENCHMARK_VERSION}; +#else + return {""}; +#endif +} + void PrintDefaultHelp() { fprintf(stdout, "benchmark" diff --git a/yass/third_party/benchmark/src/benchmark_register.cc b/yass/third_party/benchmark/src/benchmark_register.cc index e447c9a2d3..8ade048225 100644 --- a/yass/third_party/benchmark/src/benchmark_register.cc +++ b/yass/third_party/benchmark/src/benchmark_register.cc @@ -482,8 +482,9 @@ int Benchmark::ArgsCnt() const { const char* Benchmark::GetArgName(int arg) const { BM_CHECK_GE(arg, 0); - BM_CHECK_LT(arg, static_cast(arg_names_.size())); - return arg_names_[arg].c_str(); + size_t uarg = static_cast(arg); + BM_CHECK_LT(uarg, arg_names_.size()); + return arg_names_[uarg].c_str(); } TimeUnit Benchmark::GetTimeUnit() const { diff --git a/yass/third_party/benchmark/src/benchmark_register.h b/yass/third_party/benchmark/src/benchmark_register.h index 53367c707c..be50265f72 100644 --- a/yass/third_party/benchmark/src/benchmark_register.h +++ b/yass/third_party/benchmark/src/benchmark_register.h @@ -24,7 +24,7 @@ typename std::vector::iterator AddPowers(std::vector* dst, T lo, T hi, static const T kmax = std::numeric_limits::max(); // Space out the values in multiples of "mult" - for (T i = static_cast(1); i <= hi; i *= static_cast(mult)) { + for (T i = static_cast(1); i <= hi; i = static_cast(i * mult)) { if (i >= lo) { dst->push_back(i); } @@ -52,7 +52,7 @@ void AddNegatedPowers(std::vector* dst, T lo, T hi, int mult) { const auto it = AddPowers(dst, hi_complement, lo_complement, mult); - std::for_each(it, dst->end(), [](T& t) { t *= -1; }); + std::for_each(it, dst->end(), [](T& t) { t = static_cast(t * -1); }); std::reverse(it, dst->end()); } diff --git a/yass/third_party/benchmark/src/benchmark_runner.cc b/yass/third_party/benchmark/src/benchmark_runner.cc index f7ae424397..a74bdadd3e 100644 --- a/yass/third_party/benchmark/src/benchmark_runner.cc +++ b/yass/third_party/benchmark/src/benchmark_runner.cc @@ -64,7 +64,7 @@ MemoryManager* memory_manager = nullptr; namespace { -static constexpr IterationCount kMaxIterations = 1000000000; +static constexpr IterationCount kMaxIterations = 1000000000000; const double kDefaultMinTime = std::strtod(::benchmark::kDefaultMinTimeStr, /*p_end*/ nullptr); @@ -96,6 +96,7 @@ BenchmarkReporter::Run CreateRunReport( } else { report.real_accumulated_time = results.real_time_used; } + report.use_real_time_for_initial_big_o = b.use_manual_time(); report.cpu_accumulated_time = results.cpu_time_used; report.complexity_n = results.complexity_n; report.complexity = b.complexity(); @@ -108,7 +109,7 @@ BenchmarkReporter::Run CreateRunReport( report.memory_result = memory_result; report.allocs_per_iter = memory_iterations ? static_cast(memory_result->num_allocs) / - memory_iterations + static_cast(memory_iterations) : 0; } @@ -234,7 +235,7 @@ BenchmarkRunner::BenchmarkRunner( has_explicit_iteration_count(b.iterations() != 0 || parsed_benchtime_flag.tag == BenchTimeType::ITERS), - pool(b.threads() - 1), + pool(static_cast(b.threads() - 1)), iters(has_explicit_iteration_count ? ComputeIters(b_, parsed_benchtime_flag) : 1), @@ -325,8 +326,8 @@ IterationCount BenchmarkRunner::PredictNumItersNeeded( // So what seems to be the sufficiently-large iteration count? Round up. const IterationCount max_next_iters = static_cast( - std::lround(std::max(multiplier * static_cast(i.iters), - static_cast(i.iters) + 1.0))); + std::llround(std::max(multiplier * static_cast(i.iters), + static_cast(i.iters) + 1.0))); // But we do have *some* limits though.. const IterationCount next_iters = std::min(max_next_iters, kMaxIterations); diff --git a/yass/third_party/benchmark/src/colorprint.cc b/yass/third_party/benchmark/src/colorprint.cc index 0bfd67041d..abc71492f7 100644 --- a/yass/third_party/benchmark/src/colorprint.cc +++ b/yass/third_party/benchmark/src/colorprint.cc @@ -140,12 +140,12 @@ void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, // We need to flush the stream buffers into the console before each // SetConsoleTextAttribute call lest it affect the text that is already // printed but has not yet reached the console. - fflush(stdout); + out.flush(); SetConsoleTextAttribute(stdout_handle, GetPlatformColorCode(color) | FOREGROUND_INTENSITY); - vprintf(fmt, args); + out << FormatString(fmt, args); - fflush(stdout); + out.flush(); // Restores the text color. SetConsoleTextAttribute(stdout_handle, old_color_attrs); #else diff --git a/yass/third_party/benchmark/src/complexity.cc b/yass/third_party/benchmark/src/complexity.cc index 825c57394a..eee3122646 100644 --- a/yass/third_party/benchmark/src/complexity.cc +++ b/yass/third_party/benchmark/src/complexity.cc @@ -37,12 +37,14 @@ BigOFunc* FittingCurve(BigO complexity) { return [](IterationCount n) -> double { return std::pow(n, 3); }; case oLogN: /* Note: can't use log2 because Android's GNU STL lacks it */ - return - [](IterationCount n) { return kLog2E * log(static_cast(n)); }; + return [](IterationCount n) { + return kLog2E * std::log(static_cast(n)); + }; case oNLogN: /* Note: can't use log2 because Android's GNU STL lacks it */ return [](IterationCount n) { - return kLog2E * n * log(static_cast(n)); + return kLog2E * static_cast(n) * + std::log(static_cast(n)); }; case o1: default: @@ -75,12 +77,12 @@ std::string GetBigOString(BigO complexity) { // given by the lambda expression. // - n : Vector containing the size of the benchmark tests. // - time : Vector containing the times for the benchmark tests. -// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };). +// - fitting_curve : lambda expression (e.g. [](ComplexityN n) {return n; };). // For a deeper explanation on the algorithm logic, please refer to // https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics -LeastSq MinimalLeastSq(const std::vector& n, +LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, BigOFunc* fitting_curve) { double sigma_gn_squared = 0.0; @@ -105,12 +107,12 @@ LeastSq MinimalLeastSq(const std::vector& n, double rms = 0.0; for (size_t i = 0; i < n.size(); ++i) { double fit = result.coef * fitting_curve(n[i]); - rms += pow((time[i] - fit), 2); + rms += std::pow((time[i] - fit), 2); } // Normalized RMS by the mean of the observed values - double mean = sigma_time / n.size(); - result.rms = sqrt(rms / n.size()) / mean; + double mean = sigma_time / static_cast(n.size()); + result.rms = std::sqrt(rms / static_cast(n.size())) / mean; return result; } @@ -122,7 +124,7 @@ LeastSq MinimalLeastSq(const std::vector& n, // - complexity : If different than oAuto, the fitting curve will stick to // this one. If it is oAuto, it will be calculated the best // fitting curve. -LeastSq MinimalLeastSq(const std::vector& n, +LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, const BigO complexity) { BM_CHECK_EQ(n.size(), time.size()); BM_CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two @@ -162,7 +164,7 @@ std::vector ComputeBigO( if (reports.size() < 2) return results; // Accumulators. - std::vector n; + std::vector n; std::vector real_time; std::vector cpu_time; @@ -171,8 +173,10 @@ std::vector ComputeBigO( BM_CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?"; n.push_back(run.complexity_n); - real_time.push_back(run.real_accumulated_time / run.iterations); - cpu_time.push_back(run.cpu_accumulated_time / run.iterations); + real_time.push_back(run.real_accumulated_time / + static_cast(run.iterations)); + cpu_time.push_back(run.cpu_accumulated_time / + static_cast(run.iterations)); } LeastSq result_cpu; @@ -182,8 +186,19 @@ std::vector ComputeBigO( result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda); result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda); } else { - result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); - result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); + const BigO* InitialBigO = &reports[0].complexity; + const bool use_real_time_for_initial_big_o = + reports[0].use_real_time_for_initial_big_o; + if (use_real_time_for_initial_big_o) { + result_real = MinimalLeastSq(n, real_time, *InitialBigO); + InitialBigO = &result_real.complexity; + // The Big-O complexity for CPU time must have the same Big-O function! + } + result_cpu = MinimalLeastSq(n, cpu_time, *InitialBigO); + InitialBigO = &result_cpu.complexity; + if (!use_real_time_for_initial_big_o) { + result_real = MinimalLeastSq(n, real_time, *InitialBigO); + } } // Drop the 'args' when reporting complexity. diff --git a/yass/third_party/benchmark/src/console_reporter.cc b/yass/third_party/benchmark/src/console_reporter.cc index 10e05e133e..35c3de2a4d 100644 --- a/yass/third_party/benchmark/src/console_reporter.cc +++ b/yass/third_party/benchmark/src/console_reporter.cc @@ -42,11 +42,15 @@ bool ConsoleReporter::ReportContext(const Context& context) { PrintBasicContext(&GetErrorStream(), context); #ifdef BENCHMARK_OS_WINDOWS - if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) { - GetErrorStream() - << "Color printing is only supported for stdout on windows." - " Disabling color printing\n"; - output_options_ = static_cast(output_options_ & ~OO_Color); + if ((output_options_ & OO_Color)) { + auto stdOutBuf = std::cout.rdbuf(); + auto outStreamBuf = GetOutputStream().rdbuf(); + if (stdOutBuf != outStreamBuf) { + GetErrorStream() + << "Color printing is only supported for stdout on windows." + " Disabling color printing\n"; + output_options_ = static_cast(output_options_ & ~OO_Color); + } } #endif diff --git a/yass/third_party/benchmark/src/counter.cc b/yass/third_party/benchmark/src/counter.cc index cf5b78ee3a..aa14cd8092 100644 --- a/yass/third_party/benchmark/src/counter.cc +++ b/yass/third_party/benchmark/src/counter.cc @@ -27,10 +27,10 @@ double Finish(Counter const& c, IterationCount iterations, double cpu_time, v /= num_threads; } if (c.flags & Counter::kIsIterationInvariant) { - v *= iterations; + v *= static_cast(iterations); } if (c.flags & Counter::kAvgIterations) { - v /= iterations; + v /= static_cast(iterations); } if (c.flags & Counter::kInvert) { // Invert is *always* last. diff --git a/yass/third_party/benchmark/src/csv_reporter.cc b/yass/third_party/benchmark/src/csv_reporter.cc index 7b56da107e..4b39e2c52f 100644 --- a/yass/third_party/benchmark/src/csv_reporter.cc +++ b/yass/third_party/benchmark/src/csv_reporter.cc @@ -122,13 +122,21 @@ void CSVReporter::PrintRunData(const Run& run) { } Out << ","; - Out << run.GetAdjustedRealTime() << ","; - Out << run.GetAdjustedCPUTime() << ","; + if (run.run_type != Run::RT_Aggregate || + run.aggregate_unit == StatisticUnit::kTime) { + Out << run.GetAdjustedRealTime() << ","; + Out << run.GetAdjustedCPUTime() << ","; + } else { + assert(run.aggregate_unit == StatisticUnit::kPercentage); + Out << run.real_accumulated_time << ","; + Out << run.cpu_accumulated_time << ","; + } // Do not print timeLabel on bigO and RMS report if (run.report_big_o) { Out << GetBigOString(run.complexity); - } else if (!run.report_rms) { + } else if (!run.report_rms && + run.aggregate_unit != StatisticUnit::kPercentage) { Out << GetTimeUnitString(run.time_unit); } Out << ","; diff --git a/yass/third_party/benchmark/src/cycleclock.h b/yass/third_party/benchmark/src/cycleclock.h index ae1ef2d2d2..a25843760b 100644 --- a/yass/third_party/benchmark/src/cycleclock.h +++ b/yass/third_party/benchmark/src/cycleclock.h @@ -70,7 +70,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { // frequency scaling). Also note that when the Mac sleeps, this // counter pauses; it does not continue counting, nor does it // reset to zero. - return mach_absolute_time(); + return static_cast(mach_absolute_time()); #elif defined(BENCHMARK_OS_EMSCRIPTEN) // this goes above x86-specific code because old versions of Emscripten // define __x86_64__, although they have nothing to do with it. @@ -82,7 +82,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { #elif defined(__x86_64__) || defined(__amd64__) uint64_t low, high; __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); - return (high << 32) | low; + return static_cast((high << 32) | low); #elif defined(__powerpc__) || defined(__ppc__) // This returns a time-base, which is not always precisely a cycle-count. #if defined(__powerpc64__) || defined(__ppc64__) @@ -181,23 +181,25 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { #elif defined(__s390__) // Covers both s390 and s390x. // Return the CPU clock. uint64_t tsc; -#if defined(BENCHMARK_OS_ZOS) && defined(COMPILER_IBMXL) - // z/OS XL compiler HLASM syntax. +#if defined(BENCHMARK_OS_ZOS) + // z/OS HLASM syntax. asm(" stck %0" : "=m"(tsc) : : "cc"); #else + // Linux on Z syntax. asm("stck %0" : "=Q"(tsc) : : "cc"); #endif return tsc; #elif defined(__riscv) // RISC-V - // Use RDCYCLE (and RDCYCLEH on riscv32) + // Use RDTIME (and RDTIMEH on riscv32). + // RDCYCLE is a privileged instruction since Linux 6.6. #if __riscv_xlen == 32 uint32_t cycles_lo, cycles_hi0, cycles_hi1; // This asm also includes the PowerPC overflow handling strategy, as above. // Implemented in assembly because Clang insisted on branching. asm volatile( - "rdcycleh %0\n" - "rdcycle %1\n" - "rdcycleh %2\n" + "rdtimeh %0\n" + "rdtime %1\n" + "rdtimeh %2\n" "sub %0, %0, %2\n" "seqz %0, %0\n" "sub %0, zero, %0\n" @@ -206,7 +208,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { return (static_cast(cycles_hi1) << 32) | cycles_lo; #else uint64_t cycles; - asm volatile("rdcycle %0" : "=r"(cycles)); + asm volatile("rdtime %0" : "=r"(cycles)); return cycles; #endif #elif defined(__e2k__) || defined(__elbrus__) @@ -217,10 +219,20 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { uint64_t pcycle; asm volatile("%0 = C15:14" : "=r"(pcycle)); return static_cast(pcycle); +#elif defined(__alpha__) + // Alpha has a cycle counter, the PCC register, but it is an unsigned 32-bit + // integer and thus wraps every ~4s, making using it for tick counts + // unreliable beyond this time range. The real-time clock is low-precision, + // roughtly ~1ms, but it is the only option that can reasonable count + // indefinitely. + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; #else -// The soft failover to a generic implementation is automatic only for ARM. -// For other platforms the developer is expected to make an attempt to create -// a fast implementation and use generic version if nothing better is available. + // The soft failover to a generic implementation is automatic only for ARM. + // For other platforms the developer is expected to make an attempt to create + // a fast implementation and use generic version if nothing better is + // available. #error You need to define CycleTimer for your OS and CPU #endif } diff --git a/yass/third_party/benchmark/src/internal_macros.h b/yass/third_party/benchmark/src/internal_macros.h index 8dd7d0c650..f4894ba8e6 100644 --- a/yass/third_party/benchmark/src/internal_macros.h +++ b/yass/third_party/benchmark/src/internal_macros.h @@ -11,11 +11,7 @@ #endif #if defined(__clang__) - #if defined(__ibmxl__) - #if !defined(COMPILER_IBMXL) - #define COMPILER_IBMXL - #endif - #elif !defined(COMPILER_CLANG) + #if !defined(COMPILER_CLANG) #define COMPILER_CLANG #endif #elif defined(_MSC_VER) diff --git a/yass/third_party/benchmark/src/json_reporter.cc b/yass/third_party/benchmark/src/json_reporter.cc index 6559dfd5e6..b8c8c94c08 100644 --- a/yass/third_party/benchmark/src/json_reporter.cc +++ b/yass/third_party/benchmark/src/json_reporter.cc @@ -167,12 +167,19 @@ bool JSONReporter::ReportContext(const Context& context) { } out << "],\n"; + out << indent << FormatKV("library_version", GetBenchmarkVersion()); + out << ",\n"; + #if defined(NDEBUG) const char build_type[] = "release"; #else const char build_type[] = "debug"; #endif out << indent << FormatKV("library_build_type", build_type); + out << ",\n"; + + // NOTE: our json schema is not strictly tied to the library version! + out << indent << FormatKV("json_schema_version", int64_t(1)); std::map* global_context = internal::GetGlobalContext(); diff --git a/yass/third_party/benchmark/src/perf_counters.cc b/yass/third_party/benchmark/src/perf_counters.cc index 417acdb18f..2eb97eb46a 100644 --- a/yass/third_party/benchmark/src/perf_counters.cc +++ b/yass/third_party/benchmark/src/perf_counters.cc @@ -39,7 +39,8 @@ size_t PerfCounterValues::Read(const std::vector& leaders) { auto read_bytes = ::read(lead, ptr, size); if (read_bytes >= ssize_t(sizeof(uint64_t))) { // Actual data bytes are all bytes minus initial padding - std::size_t data_bytes = read_bytes - sizeof(uint64_t); + std::size_t data_bytes = + static_cast(read_bytes) - sizeof(uint64_t); // This should be very cheap since it's in hot cache std::memmove(ptr, ptr + sizeof(uint64_t), data_bytes); // Increment our counters @@ -254,7 +255,7 @@ bool PerfCounters::IsCounterSupported(const std::string&) { return false; } PerfCounters PerfCounters::Create( const std::vector& counter_names) { if (!counter_names.empty()) { - GetErrorLogInstance() << "Performance counters not supported."; + GetErrorLogInstance() << "Performance counters not supported.\n"; } return NoCounters(); } diff --git a/yass/third_party/benchmark/src/statistics.cc b/yass/third_party/benchmark/src/statistics.cc index 844e926895..16b60261fd 100644 --- a/yass/third_party/benchmark/src/statistics.cc +++ b/yass/third_party/benchmark/src/statistics.cc @@ -32,7 +32,7 @@ auto StatisticsSum = [](const std::vector& v) { double StatisticsMean(const std::vector& v) { if (v.empty()) return 0.0; - return StatisticsSum(v) * (1.0 / v.size()); + return StatisticsSum(v) * (1.0 / static_cast(v.size())); } double StatisticsMedian(const std::vector& v) { @@ -71,8 +71,11 @@ double StatisticsStdDev(const std::vector& v) { // Sample standard deviation is undefined for n = 1 if (v.size() == 1) return 0.0; - const double avg_squares = SumSquares(v) * (1.0 / v.size()); - return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean))); + const double avg_squares = + SumSquares(v) * (1.0 / static_cast(v.size())); + return Sqrt(static_cast(v.size()) / + (static_cast(v.size()) - 1.0) * + (avg_squares - Sqr(mean))); } double StatisticsCV(const std::vector& v) { @@ -81,6 +84,8 @@ double StatisticsCV(const std::vector& v) { const auto stddev = StatisticsStdDev(v); const auto mean = StatisticsMean(v); + if (std::fpclassify(mean) == FP_ZERO) return 0.0; + return stddev / mean; } @@ -92,7 +97,7 @@ std::vector ComputeStats( auto error_count = std::count_if(reports.begin(), reports.end(), [](Run const& run) { return run.skipped; }); - if (reports.size() - error_count < 2) { + if (reports.size() - static_cast(error_count) < 2) { // We don't report aggregated data if there was a single run. return results; } @@ -174,7 +179,7 @@ std::vector ComputeStats( // Similarly, if there are N repetitions with 1 iterations each, // an aggregate will be computed over N measurements, not 1. // Thus it is best to simply use the count of separate reports. - data.iterations = reports.size(); + data.iterations = static_cast(reports.size()); data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat); data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat); diff --git a/yass/third_party/benchmark/src/string_util.cc b/yass/third_party/benchmark/src/string_util.cc index c69e40a813..9ba63a700a 100644 --- a/yass/third_party/benchmark/src/string_util.cc +++ b/yass/third_party/benchmark/src/string_util.cc @@ -56,7 +56,7 @@ void ToExponentAndMantissa(double val, int precision, double one_k, scaled /= one_k; if (scaled <= big_threshold) { mantissa_stream << scaled; - *exponent = i + 1; + *exponent = static_cast(i + 1); *mantissa = mantissa_stream.str(); return; } diff --git a/yass/third_party/benchmark/src/sysinfo.cc b/yass/third_party/benchmark/src/sysinfo.cc index 922e83ac92..7261e2a96b 100644 --- a/yass/third_party/benchmark/src/sysinfo.cc +++ b/yass/third_party/benchmark/src/sysinfo.cc @@ -15,6 +15,10 @@ #include "internal_macros.h" #ifdef BENCHMARK_OS_WINDOWS +#if !defined(WINVER) || WINVER < 0x0600 +#undef WINVER +#define WINVER 0x0600 +#endif // WINVER handling #include #undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA #include @@ -158,7 +162,7 @@ ValueUnion GetSysctlImp(std::string const& name) { mib[1] = HW_CPUSPEED; } - if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) { + if (sysctl(mib, 2, buff.data(), &buff.size, nullptr, 0) == -1) { return ValueUnion(); } return buff; @@ -346,7 +350,7 @@ std::vector GetCacheSizesWindows() { CPUInfo::CacheInfo C; C.num_sharing = static_cast(b.count()); C.level = cache.Level; - C.size = cache.Size; + C.size = static_cast(cache.Size); C.type = "Unknown"; switch (cache.Type) { case CacheUnified: @@ -456,6 +460,8 @@ std::string GetSystemName() { #define HOST_NAME_MAX 256 #elif defined(BENCHMARK_OS_SOLARIS) #define HOST_NAME_MAX MAXHOSTNAMELEN +#elif defined(BENCHMARK_OS_ZOS) +#define HOST_NAME_MAX _POSIX_HOST_NAME_MAX #else #pragma message("HOST_NAME_MAX not defined. using 64") #define HOST_NAME_MAX 64 @@ -468,27 +474,25 @@ std::string GetSystemName() { #endif // Catch-all POSIX block. } -int GetNumCPUs() { +int GetNumCPUsImpl() { #ifdef BENCHMARK_HAS_SYSCTL int num_cpu = -1; if (GetSysctl("hw.ncpu", &num_cpu)) return num_cpu; - fprintf(stderr, "Err: %s\n", strerror(errno)); - std::exit(EXIT_FAILURE); + PrintErrorAndDie("Err: ", strerror(errno)); #elif defined(BENCHMARK_OS_WINDOWS) SYSTEM_INFO sysinfo; // Use memset as opposed to = {} to avoid GCC missing initializer false // positives. std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO)); GetSystemInfo(&sysinfo); - return sysinfo.dwNumberOfProcessors; // number of logical - // processors in the current - // group + // number of logical processors in the current group + return static_cast(sysinfo.dwNumberOfProcessors); #elif defined(BENCHMARK_OS_SOLARIS) // Returns -1 in case of a failure. long num_cpu = sysconf(_SC_NPROCESSORS_ONLN); if (num_cpu < 0) { - fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", - strerror(errno)); + PrintErrorAndDie("sysconf(_SC_NPROCESSORS_ONLN) failed with error: ", + strerror(errno)); } return (int)num_cpu; #elif defined(BENCHMARK_OS_QNX) @@ -504,10 +508,13 @@ int GetNumCPUs() { int max_id = -1; std::ifstream f("/proc/cpuinfo"); if (!f.is_open()) { - std::cerr << "failed to open /proc/cpuinfo\n"; - return -1; + PrintErrorAndDie("Failed to open /proc/cpuinfo"); } +#if defined(__alpha__) + const std::string Key = "cpus detected"; +#else const std::string Key = "processor"; +#endif std::string ln; while (std::getline(f, ln)) { if (ln.empty()) continue; @@ -530,12 +537,10 @@ int GetNumCPUs() { } } if (f.bad()) { - std::cerr << "Failure reading /proc/cpuinfo\n"; - return -1; + PrintErrorAndDie("Failure reading /proc/cpuinfo"); } if (!f.eof()) { - std::cerr << "Failed to read to end of /proc/cpuinfo\n"; - return -1; + PrintErrorAndDie("Failed to read to end of /proc/cpuinfo"); } f.close(); @@ -549,6 +554,16 @@ int GetNumCPUs() { BENCHMARK_UNREACHABLE(); } +int GetNumCPUs() { + const int num_cpus = GetNumCPUsImpl(); + if (num_cpus < 1) { + PrintErrorAndDie( + "Unable to extract number of CPUs. If your platform uses " + "/proc/cpuinfo, custom support may need to be added."); + } + return num_cpus; +} + class ThreadAffinityGuard final { public: ThreadAffinityGuard() : reset_affinity(SetAffinity()) { @@ -651,7 +666,7 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { &freq)) { // The value is in kHz (as the file name suggests). For example, on a // 2GHz warpstation, the file contains the value "2000000". - return freq * 1000.0; + return static_cast(freq) * 1000.0; } const double error_value = -1; @@ -719,9 +734,9 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { #endif unsigned long long hz = 0; #if defined BENCHMARK_OS_OPENBSD - if (GetSysctl(freqStr, &hz)) return hz * 1000000; + if (GetSysctl(freqStr, &hz)) return static_cast(hz * 1000000); #else - if (GetSysctl(freqStr, &hz)) return hz; + if (GetSysctl(freqStr, &hz)) return static_cast(hz); #endif fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", freqStr, strerror(errno)); @@ -771,8 +786,9 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { kstat_close(kc); return clock_hz; #elif defined(BENCHMARK_OS_QNX) - return static_cast((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) * - (int64_t)(1000 * 1000)); + return static_cast( + static_cast(SYSPAGE_ENTRY(cpuinfo)->speed) * + static_cast(1000 * 1000)); #elif defined(BENCHMARK_OS_QURT) // QuRT doesn't provide any API to query Hexagon frequency. return 1000000000; @@ -820,7 +836,7 @@ std::vector GetLoadAvg() { !(defined(__ANDROID__) && __ANDROID_API__ < 29) static constexpr int kMaxSamples = 3; std::vector res(kMaxSamples, 0.0); - const int nelem = getloadavg(res.data(), kMaxSamples); + const size_t nelem = static_cast(getloadavg(res.data(), kMaxSamples)); if (nelem < 1) { res.clear(); } else { diff --git a/yass/third_party/benchmark/src/timers.cc b/yass/third_party/benchmark/src/timers.cc index b23feea8ba..d0821f3166 100644 --- a/yass/third_party/benchmark/src/timers.cc +++ b/yass/third_party/benchmark/src/timers.cc @@ -102,7 +102,8 @@ double MakeTime(thread_basic_info_data_t const& info) { #endif #if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID) double MakeTime(struct timespec const& ts) { - return ts.tv_sec + (static_cast(ts.tv_nsec) * 1e-9); + return static_cast(ts.tv_sec) + + (static_cast(ts.tv_nsec) * 1e-9); } #endif @@ -181,6 +182,9 @@ double ThreadCPUUsage() { // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c return ProcessCPUUsage(); +#elif defined(BENCHMARK_OS_ZOS) + // z/OS doesn't support CLOCK_THREAD_CPUTIME_ID. + return ProcessCPUUsage(); #elif defined(BENCHMARK_OS_SOLARIS) struct rusage ru; if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru); @@ -241,9 +245,9 @@ std::string LocalDateTimeString() { tz_offset_sign = '-'; } - tz_len = + tz_len = static_cast( ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li", - tz_offset_sign, offset_minutes / 100, offset_minutes % 100); + tz_offset_sign, offset_minutes / 100, offset_minutes % 100)); BM_CHECK(tz_len == kTzOffsetLen); ((void)tz_len); // Prevent unused variable warning in optimized build. } else { diff --git a/yass/third_party/benchmark/test/BUILD b/yass/third_party/benchmark/test/BUILD index ea34fd4646..b245fa7622 100644 --- a/yass/third_party/benchmark/test/BUILD +++ b/yass/third_party/benchmark/test/BUILD @@ -18,6 +18,10 @@ TEST_COPTS = [ # "-Wshorten-64-to-32", "-Wfloat-equal", "-fstrict-aliasing", + ## assert() are used a lot in tests upstream, which may be optimised out leading to + ## unused-variable warning. + "-Wno-unused-variable", + "-Werror=old-style-cast", ] # Some of the issues with DoNotOptimize only occur when optimization is enabled @@ -32,6 +36,7 @@ PER_SRC_TEST_ARGS = { "repetitions_test.cc": [" --benchmark_repetitions=3"], "spec_arg_test.cc": ["--benchmark_filter=BM_NotChosen"], "spec_arg_verbosity_test.cc": ["--v=42"], + "complexity_test.cc": ["--benchmark_min_time=1000000x"], } cc_library( diff --git a/yass/third_party/benchmark/test/CMakeLists.txt b/yass/third_party/benchmark/test/CMakeLists.txt index fd88131988..1de175f98d 100644 --- a/yass/third_party/benchmark/test/CMakeLists.txt +++ b/yass/third_party/benchmark/test/CMakeLists.txt @@ -5,6 +5,8 @@ set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) include(CheckCXXCompilerFlag) +add_cxx_compiler_flag(-Wno-unused-variable) + # NOTE: Some tests use `` to perform the test. Therefore we must # strip -DNDEBUG from the default CMake flags in DEBUG mode. string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE) @@ -62,30 +64,38 @@ macro(compile_output_test name) ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_output_test) +macro(benchmark_add_test) + add_test(${ARGV}) + if(WIN32 AND BUILD_SHARED_LIBS) + cmake_parse_arguments(TEST "" "NAME" "" ${ARGN}) + set_tests_properties(${TEST_NAME} PROPERTIES ENVIRONMENT_MODIFICATION "PATH=path_list_prepend:$") + endif() +endmacro(benchmark_add_test) + # Demonstration executable compile_benchmark_test(benchmark_test) -add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01s) compile_benchmark_test(spec_arg_test) -add_test(NAME spec_arg COMMAND spec_arg_test --benchmark_filter=BM_NotChosen) +benchmark_add_test(NAME spec_arg COMMAND spec_arg_test --benchmark_filter=BM_NotChosen) compile_benchmark_test(spec_arg_verbosity_test) -add_test(NAME spec_arg_verbosity COMMAND spec_arg_verbosity_test --v=42) +benchmark_add_test(NAME spec_arg_verbosity COMMAND spec_arg_verbosity_test --v=42) compile_benchmark_test(benchmark_setup_teardown_test) -add_test(NAME benchmark_setup_teardown COMMAND benchmark_setup_teardown_test) +benchmark_add_test(NAME benchmark_setup_teardown COMMAND benchmark_setup_teardown_test) compile_benchmark_test(filter_test) macro(add_filter_test name filter expect) - add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01s --benchmark_filter=${filter} ${expect}) - add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect}) + benchmark_add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01s --benchmark_filter=${filter} ${expect}) + benchmark_add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect}) endmacro(add_filter_test) compile_benchmark_test(benchmark_min_time_flag_time_test) -add_test(NAME min_time_flag_time COMMAND benchmark_min_time_flag_time_test) +benchmark_add_test(NAME min_time_flag_time COMMAND benchmark_min_time_flag_time_test) compile_benchmark_test(benchmark_min_time_flag_iters_test) -add_test(NAME min_time_flag_iters COMMAND benchmark_min_time_flag_iters_test) +benchmark_add_test(NAME min_time_flag_iters COMMAND benchmark_min_time_flag_iters_test) add_filter_test(filter_simple "Foo" 3) add_filter_test(filter_simple_negative "-Foo" 2) @@ -107,19 +117,19 @@ add_filter_test(filter_regex_end ".*Ba$" 1) add_filter_test(filter_regex_end_negative "-.*Ba$" 4) compile_benchmark_test(options_test) -add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01s) compile_benchmark_test(basic_test) -add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01s) compile_output_test(repetitions_test) -add_test(NAME repetitions_benchmark COMMAND repetitions_test --benchmark_min_time=0.01s --benchmark_repetitions=3) +benchmark_add_test(NAME repetitions_benchmark COMMAND repetitions_test --benchmark_min_time=0.01s --benchmark_repetitions=3) compile_benchmark_test(diagnostics_test) -add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01s) compile_benchmark_test(skip_with_error_test) -add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01s) compile_benchmark_test(donotoptimize_test) # Enable errors for deprecated deprecations (DoNotOptimize(Tp const& value)). @@ -132,58 +142,58 @@ check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) if (BENCHMARK_HAS_O3_FLAG) set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3") endif() -add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01s) compile_benchmark_test(fixture_test) -add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01s) compile_benchmark_test(register_benchmark_test) -add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01s) compile_benchmark_test(map_test) -add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01s) compile_benchmark_test(multiple_ranges_test) -add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01s) compile_benchmark_test(args_product_test) -add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01s) compile_benchmark_test_with_main(link_main_test) -add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01s) compile_output_test(reporter_output_test) -add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01s) compile_output_test(templated_fixture_test) -add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01s) compile_output_test(user_counters_test) -add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01s) compile_output_test(perf_counters_test) -add_test(NAME perf_counters_test COMMAND perf_counters_test --benchmark_min_time=0.01s --benchmark_perf_counters=CYCLES,BRANCHES) +benchmark_add_test(NAME perf_counters_test COMMAND perf_counters_test --benchmark_min_time=0.01s --benchmark_perf_counters=CYCLES,INSTRUCTIONS) compile_output_test(internal_threading_test) -add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01s) compile_output_test(report_aggregates_only_test) -add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01s) compile_output_test(display_aggregates_only_test) -add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01s) compile_output_test(user_counters_tabular_test) -add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01s) +benchmark_add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01s) compile_output_test(user_counters_thousands_test) -add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01s) compile_output_test(memory_manager_test) -add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01s) +benchmark_add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01s) # MSVC does not allow to set the language standard to C++98/03. -if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") +if(NOT (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC")) compile_benchmark_test(cxx03_test) set_target_properties(cxx03_test PROPERTIES @@ -205,17 +215,11 @@ if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") set(DISABLE_LTO_WARNINGS "${DISABLE_LTO_WARNINGS} -Wno-lto-type-mismatch") endif() set_target_properties(cxx03_test PROPERTIES LINK_FLAGS "${DISABLE_LTO_WARNINGS}") - add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01s) + benchmark_add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01s) endif() -# Attempt to work around flaky test failures when running on Appveyor servers. -if (DEFINED ENV{APPVEYOR}) - set(COMPLEXITY_MIN_TIME "0.5s") -else() - set(COMPLEXITY_MIN_TIME "0.01s") -endif() compile_output_test(complexity_test) -add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME}) +benchmark_add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=1000000x) ############################################################################### # GoogleTest Unit Tests @@ -230,7 +234,12 @@ if (BENCHMARK_ENABLE_GTEST_TESTS) macro(add_gtest name) compile_gtest(${name}) - add_test(NAME ${name} COMMAND ${name}) + benchmark_add_test(NAME ${name} COMMAND ${name}) + if(WIN32 AND BUILD_SHARED_LIBS) + set_tests_properties(${name} PROPERTIES + ENVIRONMENT_MODIFICATION "PATH=path_list_prepend:$;PATH=path_list_prepend:$" + ) + endif() endmacro() add_gtest(benchmark_gtest) diff --git a/yass/third_party/benchmark/test/basic_test.cc b/yass/third_party/benchmark/test/basic_test.cc index cba1b0f992..c25bec7ddd 100644 --- a/yass/third_party/benchmark/test/basic_test.cc +++ b/yass/third_party/benchmark/test/basic_test.cc @@ -5,7 +5,7 @@ void BM_empty(benchmark::State& state) { for (auto _ : state) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } } diff --git a/yass/third_party/benchmark/test/benchmark_gtest.cc b/yass/third_party/benchmark/test/benchmark_gtest.cc index 2c9e555d92..0aa2552c1e 100644 --- a/yass/third_party/benchmark/test/benchmark_gtest.cc +++ b/yass/third_party/benchmark/test/benchmark_gtest.cc @@ -38,7 +38,7 @@ TEST(AddRangeTest, Advanced64) { TEST(AddRangeTest, FullRange8) { std::vector dst; - AddRange(&dst, int8_t{1}, std::numeric_limits::max(), int8_t{8}); + AddRange(&dst, int8_t{1}, std::numeric_limits::max(), 8); EXPECT_THAT( dst, testing::ElementsAre(int8_t{1}, int8_t{8}, int8_t{64}, int8_t{127})); } diff --git a/yass/third_party/benchmark/test/benchmark_test.cc b/yass/third_party/benchmark/test/benchmark_test.cc index 94590d5e41..8b14017d03 100644 --- a/yass/third_party/benchmark/test/benchmark_test.cc +++ b/yass/third_party/benchmark/test/benchmark_test.cc @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -226,6 +227,31 @@ void BM_non_template_args(benchmark::State& state, int, double) { } BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); +template +void BM_template2_capture(benchmark::State& state, ExtraArgs&&... extra_args) { + static_assert(std::is_same::value, ""); + static_assert(std::is_same::value, ""); + static_assert(std::is_same::value, ""); + unsigned int dummy[sizeof...(ExtraArgs)] = {extra_args...}; + assert(dummy[0] == 42); + for (auto _ : state) { + } +} +BENCHMARK_TEMPLATE2_CAPTURE(BM_template2_capture, void, char*, foo, 42U); +BENCHMARK_CAPTURE((BM_template2_capture), foo, 42U); + +template +void BM_template1_capture(benchmark::State& state, ExtraArgs&&... extra_args) { + static_assert(std::is_same::value, ""); + static_assert(std::is_same::value, ""); + unsigned long dummy[sizeof...(ExtraArgs)] = {extra_args...}; + assert(dummy[0] == 24); + for (auto _ : state) { + } +} +BENCHMARK_TEMPLATE1_CAPTURE(BM_template1_capture, void, foo, 24UL); +BENCHMARK_CAPTURE(BM_template1_capture, foo, 24UL); + #endif // BENCHMARK_HAS_CXX11 static void BM_DenseThreadRanges(benchmark::State& st) { diff --git a/yass/third_party/benchmark/test/complexity_test.cc b/yass/third_party/benchmark/test/complexity_test.cc index 76891e07b4..0729d15aa7 100644 --- a/yass/third_party/benchmark/test/complexity_test.cc +++ b/yass/third_party/benchmark/test/complexity_test.cc @@ -69,35 +69,44 @@ int AddComplexityTest(const std::string &test_name, void BM_Complexity_O1(benchmark::State &state) { for (auto _ : state) { - for (int i = 0; i < 1024; ++i) { - benchmark::DoNotOptimize(i); + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + double tmp = static_cast(state.iterations()); + benchmark::DoNotOptimize(tmp); + for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { + benchmark::DoNotOptimize(state.iterations()); + tmp *= static_cast(state.iterations()); + benchmark::DoNotOptimize(tmp); } + + // always 1ns per iteration + state.SetIterationTime(42 * 1e-9); } state.SetComplexityN(state.range(0)); } -BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); -BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(); BENCHMARK(BM_Complexity_O1) ->Range(1, 1 << 18) + ->UseManualTime() + ->Complexity(benchmark::o1); +BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->UseManualTime()->Complexity(); +BENCHMARK(BM_Complexity_O1) + ->Range(1, 1 << 18) + ->UseManualTime() ->Complexity([](benchmark::IterationCount) { return 1.0; }); -const char *one_test_name = "BM_Complexity_O1"; -const char *big_o_1_test_name = "BM_Complexity_O1_BigO"; -const char *rms_o_1_test_name = "BM_Complexity_O1_RMS"; -const char *enum_big_o_1 = "\\([0-9]+\\)"; -// FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto -// deduced. -// See https://github.com/google/benchmark/issues/272 -const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)"; +const char *one_test_name = "BM_Complexity_O1/manual_time"; +const char *big_o_1_test_name = "BM_Complexity_O1/manual_time_BigO"; +const char *rms_o_1_test_name = "BM_Complexity_O1/manual_time_RMS"; +const char *enum_auto_big_o_1 = "\\([0-9]+\\)"; const char *lambda_big_o_1 = "f\\(N\\)"; // Add enum tests ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, - enum_big_o_1, /*family_index=*/0); + enum_auto_big_o_1, /*family_index=*/0); -// Add auto enum tests +// Add auto tests ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, - auto_big_o_1, /*family_index=*/1); + enum_auto_big_o_1, /*family_index=*/1); // Add lambda tests ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, @@ -107,43 +116,44 @@ ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, // --------------------------- Testing BigO O(N) --------------------------- // // ========================================================================= // -std::vector ConstructRandomVector(int64_t size) { - std::vector v; - v.reserve(static_cast(size)); - for (int i = 0; i < size; ++i) { - v.push_back(static_cast(std::rand() % size)); - } - return v; -} - void BM_Complexity_O_N(benchmark::State &state) { - auto v = ConstructRandomVector(state.range(0)); - // Test worst case scenario (item not in vector) - const int64_t item_not_in_vector = state.range(0) * 2; for (auto _ : state) { - auto it = std::find(v.begin(), v.end(), item_not_in_vector); - benchmark::DoNotOptimize(it); + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + double tmp = static_cast(state.iterations()); + benchmark::DoNotOptimize(tmp); + for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { + benchmark::DoNotOptimize(state.iterations()); + tmp *= static_cast(state.iterations()); + benchmark::DoNotOptimize(tmp); + } + + // 1ns per iteration per entry + state.SetIterationTime(static_cast(state.range(0)) * 42 * 1e-9); } state.SetComplexityN(state.range(0)); } BENCHMARK(BM_Complexity_O_N) ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) + ->Range(1 << 10, 1 << 20) + ->UseManualTime() ->Complexity(benchmark::oN); BENCHMARK(BM_Complexity_O_N) ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) + ->Range(1 << 10, 1 << 20) + ->UseManualTime() + ->Complexity(); +BENCHMARK(BM_Complexity_O_N) + ->RangeMultiplier(2) + ->Range(1 << 10, 1 << 20) + ->UseManualTime() ->Complexity([](benchmark::IterationCount n) -> double { return static_cast(n); }); -BENCHMARK(BM_Complexity_O_N) - ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) - ->Complexity(); -const char *n_test_name = "BM_Complexity_O_N"; -const char *big_o_n_test_name = "BM_Complexity_O_N_BigO"; -const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS"; +const char *n_test_name = "BM_Complexity_O_N/manual_time"; +const char *big_o_n_test_name = "BM_Complexity_O_N/manual_time_BigO"; +const char *rms_o_n_test_name = "BM_Complexity_O_N/manual_time_RMS"; const char *enum_auto_big_o_n = "N"; const char *lambda_big_o_n = "f\\(N\\)"; @@ -151,40 +161,57 @@ const char *lambda_big_o_n = "f\\(N\\)"; ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n, /*family_index=*/3); +// Add auto tests +ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, + enum_auto_big_o_n, /*family_index=*/4); + // Add lambda tests ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, - lambda_big_o_n, /*family_index=*/4); + lambda_big_o_n, /*family_index=*/5); // ========================================================================= // -// ------------------------- Testing BigO O(N*lgN) ------------------------- // +// ------------------------- Testing BigO O(NlgN) ------------------------- // // ========================================================================= // +static const double kLog2E = 1.44269504088896340736; static void BM_Complexity_O_N_log_N(benchmark::State &state) { - auto v = ConstructRandomVector(state.range(0)); for (auto _ : state) { - std::sort(v.begin(), v.end()); + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + double tmp = static_cast(state.iterations()); + benchmark::DoNotOptimize(tmp); + for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { + benchmark::DoNotOptimize(state.iterations()); + tmp *= static_cast(state.iterations()); + benchmark::DoNotOptimize(tmp); + } + + state.SetIterationTime(static_cast(state.range(0)) * kLog2E * + std::log(state.range(0)) * 42 * 1e-9); } state.SetComplexityN(state.range(0)); } -static const double kLog2E = 1.44269504088896340736; BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) + ->Range(1 << 10, 1U << 24) + ->UseManualTime() ->Complexity(benchmark::oNLogN); BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) - ->Complexity([](benchmark::IterationCount n) { - return kLog2E * static_cast(n) * log(static_cast(n)); - }); + ->Range(1 << 10, 1U << 24) + ->UseManualTime() + ->Complexity(); BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) - ->Range(1 << 10, 1 << 16) - ->Complexity(); + ->Range(1 << 10, 1U << 24) + ->UseManualTime() + ->Complexity([](benchmark::IterationCount n) { + return kLog2E * static_cast(n) * std::log(static_cast(n)); + }); -const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N"; -const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; -const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS"; +const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time"; +const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time_BigO"; +const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time_RMS"; const char *enum_auto_big_o_n_lg_n = "NlgN"; const char *lambda_big_o_n_lg_n = "f\\(N\\)"; @@ -193,10 +220,15 @@ ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n, /*family_index=*/6); -// Add lambda tests +// NOTE: auto big-o is wron.g +ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, + rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n, + /*family_index=*/7); + +//// Add lambda tests ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n, - /*family_index=*/7); + /*family_index=*/8); // ========================================================================= // // -------- Testing formatting of Complexity with captured args ------------ // @@ -205,21 +237,31 @@ ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, void BM_ComplexityCaptureArgs(benchmark::State &state, int n) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); - benchmark::DoNotOptimize(iterations); + benchmark::DoNotOptimize(state.iterations()); + double tmp = static_cast(state.iterations()); + benchmark::DoNotOptimize(tmp); + for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { + benchmark::DoNotOptimize(state.iterations()); + tmp *= static_cast(state.iterations()); + benchmark::DoNotOptimize(tmp); + } + + state.SetIterationTime(static_cast(state.range(0)) * 42 * 1e-9); } state.SetComplexityN(n); } BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100) + ->UseManualTime() ->Complexity(benchmark::oN) ->Ranges({{1, 2}, {3, 4}}); const std::string complexity_capture_name = - "BM_ComplexityCaptureArgs/capture_test"; + "BM_ComplexityCaptureArgs/capture_test/manual_time"; ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO", - complexity_capture_name + "_RMS", "N", /*family_index=*/9); + complexity_capture_name + "_RMS", "N", + /*family_index=*/9); // ========================================================================= // // --------------------------- TEST CASES END ------------------------------ // diff --git a/yass/third_party/benchmark/test/diagnostics_test.cc b/yass/third_party/benchmark/test/diagnostics_test.cc index 0cd3edbd42..7c68a98929 100644 --- a/yass/third_party/benchmark/test/diagnostics_test.cc +++ b/yass/third_party/benchmark/test/diagnostics_test.cc @@ -49,7 +49,7 @@ void BM_diagnostic_test(benchmark::State& state) { if (called_once == false) try_invalid_pause_resume(state); for (auto _ : state) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } @@ -65,7 +65,7 @@ void BM_diagnostic_test_keep_running(benchmark::State& state) { if (called_once == false) try_invalid_pause_resume(state); while (state.KeepRunning()) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } diff --git a/yass/third_party/benchmark/test/link_main_test.cc b/yass/third_party/benchmark/test/link_main_test.cc index e806500a9a..131937eebc 100644 --- a/yass/third_party/benchmark/test/link_main_test.cc +++ b/yass/third_party/benchmark/test/link_main_test.cc @@ -2,7 +2,7 @@ void BM_empty(benchmark::State& state) { for (auto _ : state) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } } diff --git a/yass/third_party/benchmark/test/memory_manager_test.cc b/yass/third_party/benchmark/test/memory_manager_test.cc index d94bd5161b..4df674d586 100644 --- a/yass/third_party/benchmark/test/memory_manager_test.cc +++ b/yass/third_party/benchmark/test/memory_manager_test.cc @@ -14,7 +14,7 @@ class TestMemoryManager : public benchmark::MemoryManager { void BM_empty(benchmark::State& state) { for (auto _ : state) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } } diff --git a/yass/third_party/benchmark/test/output_test_helper.cc b/yass/third_party/benchmark/test/output_test_helper.cc index 25673700aa..265f28aae7 100644 --- a/yass/third_party/benchmark/test/output_test_helper.cc +++ b/yass/third_party/benchmark/test/output_test_helper.cc @@ -65,6 +65,7 @@ SubMap& GetSubstitutions() { {"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"}, {"%csv_ms_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ms,,,,,"}, {"%csv_s_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",s,,,,,"}, + {"%csv_cv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",,,,,,"}, {"%csv_bytes_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"}, {"%csv_items_report", diff --git a/yass/third_party/benchmark/test/perf_counters_gtest.cc b/yass/third_party/benchmark/test/perf_counters_gtest.cc index 54c78635b8..2e63049285 100644 --- a/yass/third_party/benchmark/test/perf_counters_gtest.cc +++ b/yass/third_party/benchmark/test/perf_counters_gtest.cc @@ -41,7 +41,7 @@ TEST(PerfCountersTest, NegativeTest) { return; } EXPECT_TRUE(PerfCounters::Initialize()); - // Sanity checks + // Safety checks // Create() will always create a valid object, even if passed no or // wrong arguments as the new behavior is to warn and drop unsupported // counters diff --git a/yass/third_party/benchmark/test/perf_counters_test.cc b/yass/third_party/benchmark/test/perf_counters_test.cc index b0a3ab0619..3cc593e629 100644 --- a/yass/third_party/benchmark/test/perf_counters_test.cc +++ b/yass/third_party/benchmark/test/perf_counters_test.cc @@ -14,7 +14,7 @@ BM_DECLARE_string(benchmark_perf_counters); static void BM_Simple(benchmark::State& state) { for (auto _ : state) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } } diff --git a/yass/third_party/benchmark/test/reporter_output_test.cc b/yass/third_party/benchmark/test/reporter_output_test.cc index 2eb545a8de..7867165d1f 100644 --- a/yass/third_party/benchmark/test/reporter_output_test.cc +++ b/yass/third_party/benchmark/test/reporter_output_test.cc @@ -55,6 +55,9 @@ static int AddContextCases() { {{"Load Average: (%float, ){0,2}%float$", MR_Next}}); } AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}}); + AddCases(TC_JSONOut, {{"\"library_version\": \".*\",$", MR_Next}}); + AddCases(TC_JSONOut, {{"\"library_build_type\": \".*\",$", MR_Next}}); + AddCases(TC_JSONOut, {{"\"json_schema_version\": 1$", MR_Next}}); return 0; } int dummy_register = AddContextCases(); @@ -93,7 +96,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}}); void BM_bytes_per_second(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } state.SetBytesProcessed(1); @@ -125,7 +128,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}}); void BM_items_per_second(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } state.SetItemsProcessed(1); @@ -406,7 +409,7 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, void BM_Complexity_O1(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } state.SetComplexityN(state.range(0)); @@ -1088,7 +1091,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_UserPercentStats/iterations:5/repeats:3/" {"^\"BM_UserPercentStats/iterations:5/repeats:3/" "manual_time_stddev\",%csv_report$"}, {"^\"BM_UserPercentStats/iterations:5/repeats:3/" - "manual_time_\",%csv_report$"}}); + "manual_time_\",%csv_cv_report$"}}); // ========================================================================= // // ------------------------- Testing StrEscape JSON ------------------------ // diff --git a/yass/third_party/benchmark/test/skip_with_error_test.cc b/yass/third_party/benchmark/test/skip_with_error_test.cc index b4c5e154c4..2139a19e25 100644 --- a/yass/third_party/benchmark/test/skip_with_error_test.cc +++ b/yass/third_party/benchmark/test/skip_with_error_test.cc @@ -143,7 +143,7 @@ ADD_CASES("BM_error_during_running_ranged_for", void BM_error_after_running(benchmark::State& state) { for (auto _ : state) { - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } if (state.thread_index() <= (state.threads() / 2)) diff --git a/yass/third_party/benchmark/test/statistics_gtest.cc b/yass/third_party/benchmark/test/statistics_gtest.cc index 1de2d87d4b..48c77260fd 100644 --- a/yass/third_party/benchmark/test/statistics_gtest.cc +++ b/yass/third_party/benchmark/test/statistics_gtest.cc @@ -28,8 +28,8 @@ TEST(StatisticsTest, StdDev) { TEST(StatisticsTest, CV) { EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({101, 101, 101, 101}), 0.0); EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({1, 2, 3}), 1. / 2.); - EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({2.5, 2.4, 3.3, 4.2, 5.1}), - 0.32888184094918121); + ASSERT_NEAR(benchmark::StatisticsCV({2.5, 2.4, 3.3, 4.2, 5.1}), + 0.32888184094918121, 1e-15); } } // end namespace diff --git a/yass/third_party/benchmark/test/user_counters_tabular_test.cc b/yass/third_party/benchmark/test/user_counters_tabular_test.cc index c98b769af2..cfc1ab069c 100644 --- a/yass/third_party/benchmark/test/user_counters_tabular_test.cc +++ b/yass/third_party/benchmark/test/user_counters_tabular_test.cc @@ -63,6 +63,9 @@ ADD_CASES(TC_CSVOut, {{"%csv_header," void BM_Counters_Tabular(benchmark::State& state) { for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + auto iterations = double(state.iterations()) * double(state.iterations()); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters.insert({ @@ -330,7 +333,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/repeats:2/threads:1_stddev\",%csv_report," "%float,%float,%float,%float,%float,%float$"}}); ADD_CASES(TC_CSVOut, - {{"^\"BM_Counters_Tabular/repeats:2/threads:1_cv\",%csv_report," + {{"^\"BM_Counters_Tabular/repeats:2/threads:1_cv\",%csv_cv_report," "%float,%float,%float,%float,%float,%float$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report," @@ -348,7 +351,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/repeats:2/threads:2_stddev\",%csv_report," "%float,%float,%float,%float,%float,%float$"}}); ADD_CASES(TC_CSVOut, - {{"^\"BM_Counters_Tabular/repeats:2/threads:2_cv\",%csv_report," + {{"^\"BM_Counters_Tabular/repeats:2/threads:2_cv\",%csv_cv_report," "%float,%float,%float,%float,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() @@ -372,7 +375,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$", void BM_CounterRates_Tabular(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; diff --git a/yass/third_party/benchmark/test/user_counters_test.cc b/yass/third_party/benchmark/test/user_counters_test.cc index 4cd8ee3739..22252acbf6 100644 --- a/yass/third_party/benchmark/test/user_counters_test.cc +++ b/yass/third_party/benchmark/test/user_counters_test.cc @@ -67,7 +67,7 @@ int num_calls1 = 0; void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } state.counters["foo"] = 1; @@ -119,7 +119,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", void BM_Counters_Rate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; @@ -163,7 +163,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate); void BM_Invert(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; @@ -204,7 +204,7 @@ CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert); void BM_Counters_InvertedRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; @@ -333,7 +333,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", void BM_Counters_AvgThreadsRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; @@ -421,7 +421,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; @@ -513,7 +513,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations); void BM_Counters_kAvgIterationsRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - auto iterations = state.iterations(); + auto iterations = double(state.iterations()) * double(state.iterations()); benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; diff --git a/yass/third_party/benchmark/tools/BUILD.bazel b/yass/third_party/benchmark/tools/BUILD.bazel index d25caa79ae..8ef6a86598 100644 --- a/yass/third_party/benchmark/tools/BUILD.bazel +++ b/yass/third_party/benchmark/tools/BUILD.bazel @@ -4,14 +4,15 @@ py_library( name = "gbench", srcs = glob(["gbench/*.py"]), deps = [ - requirement("numpy"), - requirement("scipy"), + requirement("numpy"), + requirement("scipy"), ], ) py_binary( name = "compare", srcs = ["compare.py"], + imports = ["."], python_version = "PY3", deps = [ ":gbench", diff --git a/yass/third_party/benchmark/tools/compare.py b/yass/third_party/benchmark/tools/compare.py index e5eeb247e6..7572520cc0 100755 --- a/yass/third_party/benchmark/tools/compare.py +++ b/yass/third_party/benchmark/tools/compare.py @@ -1,17 +1,20 @@ #!/usr/bin/env python3 -import unittest +# type: ignore + """ compare.py - versatile benchmark output compare tool """ import argparse -from argparse import ArgumentParser import json -import sys import os +import sys +import unittest +from argparse import ArgumentParser + import gbench -from gbench import util, report +from gbench import report, util def check_inputs(in1, in2, flags): @@ -20,163 +23,203 @@ def check_inputs(in1, in2, flags): """ in1_kind, in1_err = util.classify_input_file(in1) in2_kind, in2_err = util.classify_input_file(in2) - output_file = util.find_benchmark_flag('--benchmark_out=', flags) - output_type = util.find_benchmark_flag('--benchmark_out_format=', flags) - if in1_kind == util.IT_Executable and in2_kind == util.IT_Executable and output_file: - print(("WARNING: '--benchmark_out=%s' will be passed to both " - "benchmarks causing it to be overwritten") % output_file) + output_file = util.find_benchmark_flag("--benchmark_out=", flags) + output_type = util.find_benchmark_flag("--benchmark_out_format=", flags) + if ( + in1_kind == util.IT_Executable + and in2_kind == util.IT_Executable + and output_file + ): + print( + ( + "WARNING: '--benchmark_out=%s' will be passed to both " + "benchmarks causing it to be overwritten" + ) + % output_file + ) if in1_kind == util.IT_JSON and in2_kind == util.IT_JSON: # When both sides are JSON the only supported flag is # --benchmark_filter= - for flag in util.remove_benchmark_flags('--benchmark_filter=', flags): - print("WARNING: passing %s has no effect since both " - "inputs are JSON" % flag) - if output_type is not None and output_type != 'json': - print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`" - " is not supported.") % output_type) + for flag in util.remove_benchmark_flags("--benchmark_filter=", flags): + print( + "WARNING: passing %s has no effect since both " + "inputs are JSON" % flag + ) + if output_type is not None and output_type != "json": + print( + ( + "ERROR: passing '--benchmark_out_format=%s' to 'compare.py`" + " is not supported." + ) + % output_type + ) sys.exit(1) def create_parser(): parser = ArgumentParser( - description='versatile benchmark output compare tool') - - parser.add_argument( - '-a', - '--display_aggregates_only', - dest='display_aggregates_only', - action="store_true", - help="If there are repetitions, by default, we display everything - the" - " actual runs, and the aggregates computed. Sometimes, it is " - "desirable to only view the aggregates. E.g. when there are a lot " - "of repetitions. Do note that only the display is affected. " - "Internally, all the actual runs are still used, e.g. for U test.") - - parser.add_argument( - '--no-color', - dest='color', - default=True, - action="store_false", - help="Do not use colors in the terminal output" + description="versatile benchmark output compare tool" ) parser.add_argument( - '-d', - '--dump_to_json', - dest='dump_to_json', - help="Additionally, dump benchmark comparison output to this file in JSON format.") + "-a", + "--display_aggregates_only", + dest="display_aggregates_only", + action="store_true", + help="If there are repetitions, by default, we display everything - the" + " actual runs, and the aggregates computed. Sometimes, it is " + "desirable to only view the aggregates. E.g. when there are a lot " + "of repetitions. Do note that only the display is affected. " + "Internally, all the actual runs are still used, e.g. for U test.", + ) + + parser.add_argument( + "--no-color", + dest="color", + default=True, + action="store_false", + help="Do not use colors in the terminal output", + ) + + parser.add_argument( + "-d", + "--dump_to_json", + dest="dump_to_json", + help="Additionally, dump benchmark comparison output to this file in JSON format.", + ) utest = parser.add_argument_group() utest.add_argument( - '--no-utest', - dest='utest', + "--no-utest", + dest="utest", default=True, action="store_false", - help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS)) + help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format( + report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS + ), + ) alpha_default = 0.05 utest.add_argument( "--alpha", - dest='utest_alpha', + dest="utest_alpha", default=alpha_default, type=float, - help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") % - alpha_default) + help=( + "significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)" + ) + % alpha_default, + ) subparsers = parser.add_subparsers( - help='This tool has multiple modes of operation:', - dest='mode') + help="This tool has multiple modes of operation:", dest="mode" + ) parser_a = subparsers.add_parser( - 'benchmarks', - help='The most simple use-case, compare all the output of these two benchmarks') - baseline = parser_a.add_argument_group( - 'baseline', 'The benchmark baseline') + "benchmarks", + help="The most simple use-case, compare all the output of these two benchmarks", + ) + baseline = parser_a.add_argument_group("baseline", "The benchmark baseline") baseline.add_argument( - 'test_baseline', - metavar='test_baseline', - type=argparse.FileType('r'), + "test_baseline", + metavar="test_baseline", + type=argparse.FileType("r"), nargs=1, - help='A benchmark executable or JSON output file') + help="A benchmark executable or JSON output file", + ) contender = parser_a.add_argument_group( - 'contender', 'The benchmark that will be compared against the baseline') + "contender", "The benchmark that will be compared against the baseline" + ) contender.add_argument( - 'test_contender', - metavar='test_contender', - type=argparse.FileType('r'), + "test_contender", + metavar="test_contender", + type=argparse.FileType("r"), nargs=1, - help='A benchmark executable or JSON output file') + help="A benchmark executable or JSON output file", + ) parser_a.add_argument( - 'benchmark_options', - metavar='benchmark_options', + "benchmark_options", + metavar="benchmark_options", nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables') + help="Arguments to pass when running benchmark executables", + ) parser_b = subparsers.add_parser( - 'filters', help='Compare filter one with the filter two of benchmark') - baseline = parser_b.add_argument_group( - 'baseline', 'The benchmark baseline') + "filters", help="Compare filter one with the filter two of benchmark" + ) + baseline = parser_b.add_argument_group("baseline", "The benchmark baseline") baseline.add_argument( - 'test', - metavar='test', - type=argparse.FileType('r'), + "test", + metavar="test", + type=argparse.FileType("r"), nargs=1, - help='A benchmark executable or JSON output file') + help="A benchmark executable or JSON output file", + ) baseline.add_argument( - 'filter_baseline', - metavar='filter_baseline', + "filter_baseline", + metavar="filter_baseline", type=str, nargs=1, - help='The first filter, that will be used as baseline') + help="The first filter, that will be used as baseline", + ) contender = parser_b.add_argument_group( - 'contender', 'The benchmark that will be compared against the baseline') + "contender", "The benchmark that will be compared against the baseline" + ) contender.add_argument( - 'filter_contender', - metavar='filter_contender', + "filter_contender", + metavar="filter_contender", type=str, nargs=1, - help='The second filter, that will be compared against the baseline') + help="The second filter, that will be compared against the baseline", + ) parser_b.add_argument( - 'benchmark_options', - metavar='benchmark_options', + "benchmark_options", + metavar="benchmark_options", nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables') + help="Arguments to pass when running benchmark executables", + ) parser_c = subparsers.add_parser( - 'benchmarksfiltered', - help='Compare filter one of first benchmark with filter two of the second benchmark') - baseline = parser_c.add_argument_group( - 'baseline', 'The benchmark baseline') + "benchmarksfiltered", + help="Compare filter one of first benchmark with filter two of the second benchmark", + ) + baseline = parser_c.add_argument_group("baseline", "The benchmark baseline") baseline.add_argument( - 'test_baseline', - metavar='test_baseline', - type=argparse.FileType('r'), + "test_baseline", + metavar="test_baseline", + type=argparse.FileType("r"), nargs=1, - help='A benchmark executable or JSON output file') + help="A benchmark executable or JSON output file", + ) baseline.add_argument( - 'filter_baseline', - metavar='filter_baseline', + "filter_baseline", + metavar="filter_baseline", type=str, nargs=1, - help='The first filter, that will be used as baseline') + help="The first filter, that will be used as baseline", + ) contender = parser_c.add_argument_group( - 'contender', 'The benchmark that will be compared against the baseline') + "contender", "The benchmark that will be compared against the baseline" + ) contender.add_argument( - 'test_contender', - metavar='test_contender', - type=argparse.FileType('r'), + "test_contender", + metavar="test_contender", + type=argparse.FileType("r"), nargs=1, - help='The second benchmark executable or JSON output file, that will be compared against the baseline') + help="The second benchmark executable or JSON output file, that will be compared against the baseline", + ) contender.add_argument( - 'filter_contender', - metavar='filter_contender', + "filter_contender", + metavar="filter_contender", type=str, nargs=1, - help='The second filter, that will be compared against the baseline') + help="The second filter, that will be compared against the baseline", + ) parser_c.add_argument( - 'benchmark_options', - metavar='benchmark_options', + "benchmark_options", + metavar="benchmark_options", nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables') + help="Arguments to pass when running benchmark executables", + ) return parser @@ -191,16 +234,16 @@ def main(): assert not unknown_args benchmark_options = args.benchmark_options - if args.mode == 'benchmarks': + if args.mode == "benchmarks": test_baseline = args.test_baseline[0].name test_contender = args.test_contender[0].name - filter_baseline = '' - filter_contender = '' + filter_baseline = "" + filter_contender = "" # NOTE: if test_baseline == test_contender, you are analyzing the stdev - description = 'Comparing %s to %s' % (test_baseline, test_contender) - elif args.mode == 'filters': + description = "Comparing %s to %s" % (test_baseline, test_contender) + elif args.mode == "filters": test_baseline = args.test[0].name test_contender = args.test[0].name filter_baseline = args.filter_baseline[0] @@ -209,9 +252,12 @@ def main(): # NOTE: if filter_baseline == filter_contender, you are analyzing the # stdev - description = 'Comparing %s to %s (from %s)' % ( - filter_baseline, filter_contender, args.test[0].name) - elif args.mode == 'benchmarksfiltered': + description = "Comparing %s to %s (from %s)" % ( + filter_baseline, + filter_contender, + args.test[0].name, + ) + elif args.mode == "benchmarksfiltered": test_baseline = args.test_baseline[0].name test_contender = args.test_contender[0].name filter_baseline = args.filter_baseline[0] @@ -220,8 +266,12 @@ def main(): # NOTE: if test_baseline == test_contender and # filter_baseline == filter_contender, you are analyzing the stdev - description = 'Comparing %s (from %s) to %s (from %s)' % ( - filter_baseline, test_baseline, filter_contender, test_contender) + description = "Comparing %s (from %s) to %s (from %s)" % ( + filter_baseline, + test_baseline, + filter_contender, + test_contender, + ) else: # should never happen print("Unrecognized mode of operation: '%s'" % args.mode) @@ -231,199 +281,240 @@ def main(): check_inputs(test_baseline, test_contender, benchmark_options) if args.display_aggregates_only: - benchmark_options += ['--benchmark_display_aggregates_only=true'] + benchmark_options += ["--benchmark_display_aggregates_only=true"] options_baseline = [] options_contender = [] if filter_baseline and filter_contender: - options_baseline = ['--benchmark_filter=%s' % filter_baseline] - options_contender = ['--benchmark_filter=%s' % filter_contender] + options_baseline = ["--benchmark_filter=%s" % filter_baseline] + options_contender = ["--benchmark_filter=%s" % filter_contender] # Run the benchmarks and report the results - json1 = json1_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark( - test_baseline, benchmark_options + options_baseline)) - json2 = json2_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark( - test_contender, benchmark_options + options_contender)) + json1 = json1_orig = gbench.util.sort_benchmark_results( + gbench.util.run_or_load_benchmark( + test_baseline, benchmark_options + options_baseline + ) + ) + json2 = json2_orig = gbench.util.sort_benchmark_results( + gbench.util.run_or_load_benchmark( + test_contender, benchmark_options + options_contender + ) + ) # Now, filter the benchmarks so that the difference report can work if filter_baseline and filter_contender: - replacement = '[%s vs. %s]' % (filter_baseline, filter_contender) + replacement = "[%s vs. %s]" % (filter_baseline, filter_contender) json1 = gbench.report.filter_benchmark( - json1_orig, filter_baseline, replacement) + json1_orig, filter_baseline, replacement + ) json2 = gbench.report.filter_benchmark( - json2_orig, filter_contender, replacement) + json2_orig, filter_contender, replacement + ) - diff_report = gbench.report.get_difference_report( - json1, json2, args.utest) + diff_report = gbench.report.get_difference_report(json1, json2, args.utest) output_lines = gbench.report.print_difference_report( diff_report, args.display_aggregates_only, - args.utest, args.utest_alpha, args.color) + args.utest, + args.utest_alpha, + args.color, + ) print(description) for ln in output_lines: print(ln) # Optionally, diff and output to JSON if args.dump_to_json is not None: - with open(args.dump_to_json, 'w') as f_json: - json.dump(diff_report, f_json) + with open(args.dump_to_json, "w") as f_json: + json.dump(diff_report, f_json, indent=1) + class TestParser(unittest.TestCase): def setUp(self): self.parser = create_parser() testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'gbench', - 'Inputs') - self.testInput0 = os.path.join(testInputs, 'test1_run1.json') - self.testInput1 = os.path.join(testInputs, 'test1_run2.json') + os.path.dirname(os.path.realpath(__file__)), "gbench", "Inputs" + ) + self.testInput0 = os.path.join(testInputs, "test1_run1.json") + self.testInput1 = os.path.join(testInputs, "test1_run2.json") def test_benchmarks_basic(self): parsed = self.parser.parse_args( - ['benchmarks', self.testInput0, self.testInput1]) + ["benchmarks", self.testInput0, self.testInput1] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_without_utest(self): parsed = self.parser.parse_args( - ['--no-utest', 'benchmarks', self.testInput0, self.testInput1]) + ["--no-utest", "benchmarks", self.testInput0, self.testInput1] + ) self.assertFalse(parsed.display_aggregates_only) self.assertFalse(parsed.utest) self.assertEqual(parsed.utest_alpha, 0.05) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_display_aggregates_only(self): parsed = self.parser.parse_args( - ['-a', 'benchmarks', self.testInput0, self.testInput1]) + ["-a", "benchmarks", self.testInput0, self.testInput1] + ) self.assertTrue(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_with_utest_alpha(self): parsed = self.parser.parse_args( - ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) + ["--alpha=0.314", "benchmarks", self.testInput0, self.testInput1] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.utest_alpha, 0.314) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_without_utest_with_utest_alpha(self): parsed = self.parser.parse_args( - ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) + [ + "--no-utest", + "--alpha=0.314", + "benchmarks", + self.testInput0, + self.testInput1, + ] + ) self.assertFalse(parsed.display_aggregates_only) self.assertFalse(parsed.utest) self.assertEqual(parsed.utest_alpha, 0.314) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_with_remainder(self): parsed = self.parser.parse_args( - ['benchmarks', self.testInput0, self.testInput1, 'd']) + ["benchmarks", self.testInput0, self.testInput1, "d"] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.benchmark_options, ['d']) + self.assertEqual(parsed.benchmark_options, ["d"]) def test_benchmarks_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( - ['benchmarks', self.testInput0, self.testInput1, '--', 'e']) + ["benchmarks", self.testInput0, self.testInput1, "--", "e"] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.mode, "benchmarks") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.benchmark_options, ['e']) + self.assertEqual(parsed.benchmark_options, ["e"]) def test_filters_basic(self): - parsed = self.parser.parse_args( - ['filters', self.testInput0, 'c', 'd']) + parsed = self.parser.parse_args(["filters", self.testInput0, "c", "d"]) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'filters') + self.assertEqual(parsed.mode, "filters") self.assertEqual(parsed.test[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.filter_contender[0], 'd') + self.assertEqual(parsed.filter_baseline[0], "c") + self.assertEqual(parsed.filter_contender[0], "d") self.assertFalse(parsed.benchmark_options) def test_filters_with_remainder(self): parsed = self.parser.parse_args( - ['filters', self.testInput0, 'c', 'd', 'e']) + ["filters", self.testInput0, "c", "d", "e"] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'filters') + self.assertEqual(parsed.mode, "filters") self.assertEqual(parsed.test[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.filter_contender[0], 'd') - self.assertEqual(parsed.benchmark_options, ['e']) + self.assertEqual(parsed.filter_baseline[0], "c") + self.assertEqual(parsed.filter_contender[0], "d") + self.assertEqual(parsed.benchmark_options, ["e"]) def test_filters_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( - ['filters', self.testInput0, 'c', 'd', '--', 'f']) + ["filters", self.testInput0, "c", "d", "--", "f"] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'filters') + self.assertEqual(parsed.mode, "filters") self.assertEqual(parsed.test[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') - self.assertEqual(parsed.filter_contender[0], 'd') - self.assertEqual(parsed.benchmark_options, ['f']) + self.assertEqual(parsed.filter_baseline[0], "c") + self.assertEqual(parsed.filter_contender[0], "d") + self.assertEqual(parsed.benchmark_options, ["f"]) def test_benchmarksfiltered_basic(self): parsed = self.parser.parse_args( - ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e']) + ["benchmarksfiltered", self.testInput0, "c", self.testInput1, "e"] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarksfiltered') + self.assertEqual(parsed.mode, "benchmarksfiltered") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.filter_baseline[0], "c") self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.filter_contender[0], 'e') + self.assertEqual(parsed.filter_contender[0], "e") self.assertFalse(parsed.benchmark_options) def test_benchmarksfiltered_with_remainder(self): parsed = self.parser.parse_args( - ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f']) + [ + "benchmarksfiltered", + self.testInput0, + "c", + self.testInput1, + "e", + "f", + ] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarksfiltered') + self.assertEqual(parsed.mode, "benchmarksfiltered") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.filter_baseline[0], "c") self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.filter_contender[0], 'e') - self.assertEqual(parsed.benchmark_options[0], 'f') + self.assertEqual(parsed.filter_contender[0], "e") + self.assertEqual(parsed.benchmark_options[0], "f") def test_benchmarksfiltered_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( - ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g']) + [ + "benchmarksfiltered", + self.testInput0, + "c", + self.testInput1, + "e", + "--", + "g", + ] + ) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) - self.assertEqual(parsed.mode, 'benchmarksfiltered') + self.assertEqual(parsed.mode, "benchmarksfiltered") self.assertEqual(parsed.test_baseline[0].name, self.testInput0) - self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.filter_baseline[0], "c") self.assertEqual(parsed.test_contender[0].name, self.testInput1) - self.assertEqual(parsed.filter_contender[0], 'e') - self.assertEqual(parsed.benchmark_options[0], 'g') + self.assertEqual(parsed.filter_contender[0], "e") + self.assertEqual(parsed.benchmark_options[0], "g") -if __name__ == '__main__': +if __name__ == "__main__": # unittest.main() main() diff --git a/yass/third_party/benchmark/tools/gbench/Inputs/test5_run0.json b/yass/third_party/benchmark/tools/gbench/Inputs/test5_run0.json new file mode 100644 index 0000000000..074103b11d --- /dev/null +++ b/yass/third_party/benchmark/tools/gbench/Inputs/test5_run0.json @@ -0,0 +1,18 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_ManyRepetitions", + "iterations": 1000, + "real_time": 1, + "cpu_time": 1000, + "time_unit": "s" + } + ] +} diff --git a/yass/third_party/benchmark/tools/gbench/Inputs/test5_run1.json b/yass/third_party/benchmark/tools/gbench/Inputs/test5_run1.json new file mode 100644 index 0000000000..430df9f0da --- /dev/null +++ b/yass/third_party/benchmark/tools/gbench/Inputs/test5_run1.json @@ -0,0 +1,18 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_ManyRepetitions", + "iterations": 1000, + "real_time": 1000, + "cpu_time": 1, + "time_unit": "s" + } + ] +} diff --git a/yass/third_party/benchmark/tools/gbench/__init__.py b/yass/third_party/benchmark/tools/gbench/__init__.py index fce1a1acfb..9212568814 100644 --- a/yass/third_party/benchmark/tools/gbench/__init__.py +++ b/yass/third_party/benchmark/tools/gbench/__init__.py @@ -1,8 +1,8 @@ """Google Benchmark tooling""" -__author__ = 'Eric Fiselier' -__email__ = 'eric@efcs.ca' +__author__ = "Eric Fiselier" +__email__ = "eric@efcs.ca" __versioninfo__ = (0, 5, 0) -__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev' +__version__ = ".".join(str(v) for v in __versioninfo__) + "dev" -__all__ = [] +__all__ = [] # type: ignore diff --git a/yass/third_party/benchmark/tools/gbench/report.py b/yass/third_party/benchmark/tools/gbench/report.py index b2bbfb9f62..7158fd1654 100644 --- a/yass/third_party/benchmark/tools/gbench/report.py +++ b/yass/third_party/benchmark/tools/gbench/report.py @@ -1,14 +1,17 @@ -"""report.py - Utilities for reporting statistics about benchmark results +# type: ignore + +""" +report.py - Utilities for reporting statistics about benchmark results """ -import unittest -import os -import re import copy +import os import random +import re +import unittest -from scipy.stats import mannwhitneyu, gmean from numpy import array +from scipy.stats import gmean, mannwhitneyu class BenchmarkColor(object): @@ -17,26 +20,25 @@ class BenchmarkColor(object): self.code = code def __repr__(self): - return '%s%r' % (self.__class__.__name__, - (self.name, self.code)) + return "%s%r" % (self.__class__.__name__, (self.name, self.code)) def __format__(self, format): return self.code # Benchmark Colors Enumeration -BC_NONE = BenchmarkColor('NONE', '') -BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') -BC_CYAN = BenchmarkColor('CYAN', '\033[96m') -BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') -BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m') -BC_HEADER = BenchmarkColor('HEADER', '\033[92m') -BC_WARNING = BenchmarkColor('WARNING', '\033[93m') -BC_WHITE = BenchmarkColor('WHITE', '\033[97m') -BC_FAIL = BenchmarkColor('FAIL', '\033[91m') -BC_ENDC = BenchmarkColor('ENDC', '\033[0m') -BC_BOLD = BenchmarkColor('BOLD', '\033[1m') -BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') +BC_NONE = BenchmarkColor("NONE", "") +BC_MAGENTA = BenchmarkColor("MAGENTA", "\033[95m") +BC_CYAN = BenchmarkColor("CYAN", "\033[96m") +BC_OKBLUE = BenchmarkColor("OKBLUE", "\033[94m") +BC_OKGREEN = BenchmarkColor("OKGREEN", "\033[32m") +BC_HEADER = BenchmarkColor("HEADER", "\033[92m") +BC_WARNING = BenchmarkColor("WARNING", "\033[93m") +BC_WHITE = BenchmarkColor("WHITE", "\033[97m") +BC_FAIL = BenchmarkColor("FAIL", "\033[91m") +BC_ENDC = BenchmarkColor("ENDC", "\033[0m") +BC_BOLD = BenchmarkColor("BOLD", "\033[1m") +BC_UNDERLINE = BenchmarkColor("UNDERLINE", "\033[4m") UTEST_MIN_REPETITIONS = 2 UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better. @@ -59,10 +61,14 @@ def color_format(use_color, fmt_str, *args, **kwargs): """ assert use_color is True or use_color is False if not use_color: - args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE - for arg in args] - kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE - for key, arg in kwargs.items()} + args = [ + arg if not isinstance(arg, BenchmarkColor) else BC_NONE + for arg in args + ] + kwargs = { + key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE + for key, arg in kwargs.items() + } return fmt_str.format(*args, **kwargs) @@ -73,8 +79,8 @@ def find_longest_name(benchmark_list): """ longest_name = 1 for bc in benchmark_list: - if len(bc['name']) > longest_name: - longest_name = len(bc['name']) + if len(bc["name"]) > longest_name: + longest_name = len(bc["name"]) return longest_name @@ -95,13 +101,13 @@ def filter_benchmark(json_orig, family, replacement=""): """ regex = re.compile(family) filtered = {} - filtered['benchmarks'] = [] - for be in json_orig['benchmarks']: - if not regex.search(be['name']): + filtered["benchmarks"] = [] + for be in json_orig["benchmarks"]: + if not regex.search(be["name"]): continue filteredbench = copy.deepcopy(be) # Do NOT modify the old name! - filteredbench['name'] = regex.sub(replacement, filteredbench['name']) - filtered['benchmarks'].append(filteredbench) + filteredbench["name"] = regex.sub(replacement, filteredbench["name"]) + filtered["benchmarks"].append(filteredbench) return filtered @@ -110,9 +116,11 @@ def get_unique_benchmark_names(json): While *keeping* the order, give all the unique 'names' used for benchmarks. """ seen = set() - uniqued = [x['name'] for x in json['benchmarks'] - if x['name'] not in seen and - (seen.add(x['name']) or True)] + uniqued = [ + x["name"] + for x in json["benchmarks"] + if x["name"] not in seen and (seen.add(x["name"]) or True) + ] return uniqued @@ -125,7 +133,7 @@ def intersect(list1, list2): def is_potentially_comparable_benchmark(x): - return ('time_unit' in x and 'real_time' in x and 'cpu_time' in x) + return "time_unit" in x and "real_time" in x and "cpu_time" in x def partition_benchmarks(json1, json2): @@ -142,18 +150,24 @@ def partition_benchmarks(json1, json2): time_unit = None # Pick the time unit from the first entry of the lhs benchmark. # We should be careful not to crash with unexpected input. - for x in json1['benchmarks']: - if (x['name'] == name and is_potentially_comparable_benchmark(x)): - time_unit = x['time_unit'] + for x in json1["benchmarks"]: + if x["name"] == name and is_potentially_comparable_benchmark(x): + time_unit = x["time_unit"] break if time_unit is None: continue # Filter by name and time unit. # All the repetitions are assumed to be comparable. - lhs = [x for x in json1['benchmarks'] if x['name'] == name and - x['time_unit'] == time_unit] - rhs = [x for x in json2['benchmarks'] if x['name'] == name and - x['time_unit'] == time_unit] + lhs = [ + x + for x in json1["benchmarks"] + if x["name"] == name and x["time_unit"] == time_unit + ] + rhs = [ + x + for x in json2["benchmarks"] + if x["name"] == name and x["time_unit"] == time_unit + ] partitions.append([lhs, rhs]) return partitions @@ -164,7 +178,7 @@ def get_timedelta_field_as_seconds(benchmark, field_name): time_unit, as time in seconds. """ timedelta = benchmark[field_name] - time_unit = benchmark.get('time_unit', 's') + time_unit = benchmark.get("time_unit", "s") return timedelta * _TIME_UNIT_TO_SECONDS_MULTIPLIER.get(time_unit) @@ -174,11 +188,15 @@ def calculate_geomean(json): and calculate their geomean. """ times = [] - for benchmark in json['benchmarks']: - if 'run_type' in benchmark and benchmark['run_type'] == 'aggregate': + for benchmark in json["benchmarks"]: + if "run_type" in benchmark and benchmark["run_type"] == "aggregate": continue - times.append([get_timedelta_field_as_seconds(benchmark, 'real_time'), - get_timedelta_field_as_seconds(benchmark, 'cpu_time')]) + times.append( + [ + get_timedelta_field_as_seconds(benchmark, "real_time"), + get_timedelta_field_as_seconds(benchmark, "cpu_time"), + ] + ) return gmean(times) if times else array([]) @@ -190,19 +208,23 @@ def extract_field(partition, field_name): def calc_utest(timings_cpu, timings_time): - min_rep_cnt = min(len(timings_time[0]), - len(timings_time[1]), - len(timings_cpu[0]), - len(timings_cpu[1])) + min_rep_cnt = min( + len(timings_time[0]), + len(timings_time[1]), + len(timings_cpu[0]), + len(timings_cpu[1]), + ) # Does *everything* has at least UTEST_MIN_REPETITIONS repetitions? if min_rep_cnt < UTEST_MIN_REPETITIONS: return False, None, None time_pvalue = mannwhitneyu( - timings_time[0], timings_time[1], alternative='two-sided').pvalue + timings_time[0], timings_time[1], alternative="two-sided" + ).pvalue cpu_pvalue = mannwhitneyu( - timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue + timings_cpu[0], timings_cpu[1], alternative="two-sided" + ).pvalue return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue @@ -212,38 +234,46 @@ def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True): return BC_FAIL if pval >= utest_alpha else BC_OKGREEN # Check if we failed miserably with minimum required repetitions for utest - if not utest['have_optimal_repetitions'] and utest['cpu_pvalue'] is None and utest['time_pvalue'] is None: + if ( + not utest["have_optimal_repetitions"] + and utest["cpu_pvalue"] is None + and utest["time_pvalue"] is None + ): return [] dsc = "U Test, Repetitions: {} vs {}".format( - utest['nr_of_repetitions'], utest['nr_of_repetitions_other']) + utest["nr_of_repetitions"], utest["nr_of_repetitions_other"] + ) dsc_color = BC_OKGREEN # We still got some results to show but issue a warning about it. - if not utest['have_optimal_repetitions']: + if not utest["have_optimal_repetitions"]: dsc_color = BC_WARNING dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format( - UTEST_OPTIMAL_REPETITIONS) + UTEST_OPTIMAL_REPETITIONS + ) special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" - return [color_format(use_color, - special_str, - BC_HEADER, - "{}{}".format(bc_name, UTEST_COL_NAME), - first_col_width, - get_utest_color( - utest['time_pvalue']), utest['time_pvalue'], - get_utest_color( - utest['cpu_pvalue']), utest['cpu_pvalue'], - dsc_color, dsc, - endc=BC_ENDC)] + return [ + color_format( + use_color, + special_str, + BC_HEADER, + "{}{}".format(bc_name, UTEST_COL_NAME), + first_col_width, + get_utest_color(utest["time_pvalue"]), + utest["time_pvalue"], + get_utest_color(utest["cpu_pvalue"]), + utest["cpu_pvalue"], + dsc_color, + dsc, + endc=BC_ENDC, + ) + ] -def get_difference_report( - json1, - json2, - utest=False): +def get_difference_report(json1, json2, utest=False): """ Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'. Output is another json containing @@ -254,37 +284,44 @@ def get_difference_report( diff_report = [] partitions = partition_benchmarks(json1, json2) for partition in partitions: - benchmark_name = partition[0][0]['name'] - label = partition[0][0]['label'] if 'label' in partition[0][0] else '' - time_unit = partition[0][0]['time_unit'] + benchmark_name = partition[0][0]["name"] + label = partition[0][0]["label"] if "label" in partition[0][0] else "" + time_unit = partition[0][0]["time_unit"] measurements = [] utest_results = {} # Careful, we may have different repetition count. for i in range(min(len(partition[0]), len(partition[1]))): bn = partition[0][i] other_bench = partition[1][i] - measurements.append({ - 'real_time': bn['real_time'], - 'cpu_time': bn['cpu_time'], - 'real_time_other': other_bench['real_time'], - 'cpu_time_other': other_bench['cpu_time'], - 'time': calculate_change(bn['real_time'], other_bench['real_time']), - 'cpu': calculate_change(bn['cpu_time'], other_bench['cpu_time']) - }) + measurements.append( + { + "real_time": bn["real_time"], + "cpu_time": bn["cpu_time"], + "real_time_other": other_bench["real_time"], + "cpu_time_other": other_bench["cpu_time"], + "time": calculate_change( + bn["real_time"], other_bench["real_time"] + ), + "cpu": calculate_change( + bn["cpu_time"], other_bench["cpu_time"] + ), + } + ) # After processing the whole partition, if requested, do the U test. if utest: - timings_cpu = extract_field(partition, 'cpu_time') - timings_time = extract_field(partition, 'real_time') + timings_cpu = extract_field(partition, "cpu_time") + timings_time = extract_field(partition, "real_time") have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest( - timings_cpu, timings_time) - if cpu_pvalue and time_pvalue: + timings_cpu, timings_time + ) + if cpu_pvalue is not None and time_pvalue is not None: utest_results = { - 'have_optimal_repetitions': have_optimal_repetitions, - 'cpu_pvalue': cpu_pvalue, - 'time_pvalue': time_pvalue, - 'nr_of_repetitions': len(timings_cpu[0]), - 'nr_of_repetitions_other': len(timings_cpu[1]) + "have_optimal_repetitions": have_optimal_repetitions, + "cpu_pvalue": cpu_pvalue, + "time_pvalue": time_pvalue, + "nr_of_repetitions": len(timings_cpu[0]), + "nr_of_repetitions_other": len(timings_cpu[1]), } # Store only if we had any measurements for given benchmark. @@ -292,47 +329,63 @@ def get_difference_report( # time units which are not compatible with other time units in the # benchmark suite. if measurements: - run_type = partition[0][0]['run_type'] if 'run_type' in partition[0][0] else '' - aggregate_name = partition[0][0]['aggregate_name'] if run_type == 'aggregate' and 'aggregate_name' in partition[0][0] else '' - diff_report.append({ - 'name': benchmark_name, - 'label': label, - 'measurements': measurements, - 'time_unit': time_unit, - 'run_type': run_type, - 'aggregate_name': aggregate_name, - 'utest': utest_results - }) + run_type = ( + partition[0][0]["run_type"] + if "run_type" in partition[0][0] + else "" + ) + aggregate_name = ( + partition[0][0]["aggregate_name"] + if run_type == "aggregate" + and "aggregate_name" in partition[0][0] + else "" + ) + diff_report.append( + { + "name": benchmark_name, + "label": label, + "measurements": measurements, + "time_unit": time_unit, + "run_type": run_type, + "aggregate_name": aggregate_name, + "utest": utest_results, + } + ) lhs_gmean = calculate_geomean(json1) rhs_gmean = calculate_geomean(json2) if lhs_gmean.any() and rhs_gmean.any(): - diff_report.append({ - 'name': 'OVERALL_GEOMEAN', - 'label': '', - 'measurements': [{ - 'real_time': lhs_gmean[0], - 'cpu_time': lhs_gmean[1], - 'real_time_other': rhs_gmean[0], - 'cpu_time_other': rhs_gmean[1], - 'time': calculate_change(lhs_gmean[0], rhs_gmean[0]), - 'cpu': calculate_change(lhs_gmean[1], rhs_gmean[1]) - }], - 'time_unit': 's', - 'run_type': 'aggregate', - 'aggregate_name': 'geomean', - 'utest': {} - }) + diff_report.append( + { + "name": "OVERALL_GEOMEAN", + "label": "", + "measurements": [ + { + "real_time": lhs_gmean[0], + "cpu_time": lhs_gmean[1], + "real_time_other": rhs_gmean[0], + "cpu_time_other": rhs_gmean[1], + "time": calculate_change(lhs_gmean[0], rhs_gmean[0]), + "cpu": calculate_change(lhs_gmean[1], rhs_gmean[1]), + } + ], + "time_unit": "s", + "run_type": "aggregate", + "aggregate_name": "geomean", + "utest": {}, + } + ) return diff_report def print_difference_report( - json_diff_report, - include_aggregates_only=False, - utest=False, - utest_alpha=0.05, - use_color=True): + json_diff_report, + include_aggregates_only=False, + utest=False, + utest_alpha=0.05, + use_color=True, +): """ Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'. @@ -348,44 +401,53 @@ def print_difference_report( return BC_CYAN first_col_width = find_longest_name(json_diff_report) - first_col_width = max( - first_col_width, - len('Benchmark')) + first_col_width = max(first_col_width, len("Benchmark")) first_col_width += len(UTEST_COL_NAME) first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( - 'Benchmark', 12 + first_col_width) - output_strs = [first_line, '-' * len(first_line)] + "Benchmark", 12 + first_col_width + ) + output_strs = [first_line, "-" * len(first_line)] fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" for benchmark in json_diff_report: # *If* we were asked to only include aggregates, # and if it is non-aggregate, then don't print it. - if not include_aggregates_only or not 'run_type' in benchmark or benchmark['run_type'] == 'aggregate': - for measurement in benchmark['measurements']: - output_strs += [color_format(use_color, - fmt_str, - BC_HEADER, - benchmark['name'], - first_col_width, - get_color(measurement['time']), - measurement['time'], - get_color(measurement['cpu']), - measurement['cpu'], - measurement['real_time'], - measurement['real_time_other'], - measurement['cpu_time'], - measurement['cpu_time_other'], - endc=BC_ENDC)] + if ( + not include_aggregates_only + or "run_type" not in benchmark + or benchmark["run_type"] == "aggregate" + ): + for measurement in benchmark["measurements"]: + output_strs += [ + color_format( + use_color, + fmt_str, + BC_HEADER, + benchmark["name"], + first_col_width, + get_color(measurement["time"]), + measurement["time"], + get_color(measurement["cpu"]), + measurement["cpu"], + measurement["real_time"], + measurement["real_time_other"], + measurement["cpu_time"], + measurement["cpu_time_other"], + endc=BC_ENDC, + ) + ] # After processing the measurements, if requested and # if applicable (e.g. u-test exists for given benchmark), # print the U test. - if utest and benchmark['utest']: - output_strs += print_utest(benchmark['name'], - benchmark['utest'], - utest_alpha=utest_alpha, - first_col_width=first_col_width, - use_color=use_color) + if utest and benchmark["utest"]: + output_strs += print_utest( + benchmark["name"], + benchmark["utest"], + utest_alpha=utest_alpha, + first_col_width=first_col_width, + use_color=use_color, + ) return output_strs @@ -397,21 +459,21 @@ def print_difference_report( class TestGetUniqueBenchmarkNames(unittest.TestCase): def load_results(self): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput = os.path.join(testInputs, 'test3_run0.json') - with open(testOutput, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput = os.path.join(testInputs, "test3_run0.json") + with open(testOutput, "r") as f: json = json.load(f) return json def test_basic(self): expect_lines = [ - 'BM_One', - 'BM_Two', - 'short', # These two are not sorted - 'medium', # These two are not sorted + "BM_One", + "BM_Two", + "short", # These two are not sorted + "medium", # These two are not sorted ] json = self.load_results() output_lines = get_unique_benchmark_names(json) @@ -427,15 +489,15 @@ class TestReportDifference(unittest.TestCase): def setUpClass(cls): def load_results(): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test1_run1.json') - testOutput2 = os.path.join(testInputs, 'test1_run2.json') - with open(testOutput1, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput1 = os.path.join(testInputs, "test1_run1.json") + testOutput2 = os.path.join(testInputs, "test1_run2.json") + with open(testOutput1, "r") as f: json1 = json.load(f) - with open(testOutput2, 'r') as f: + with open(testOutput2, "r") as f: json2 = json.load(f) return json1, json2 @@ -444,171 +506,323 @@ class TestReportDifference(unittest.TestCase): def test_json_diff_report_pretty_printing(self): expect_lines = [ - ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'], - ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'], - ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'], - ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'], - ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'], - ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'], - ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'], - ['BM_100xSlower', '+99.0000', '+99.0000', - '100', '10000', '100', '10000'], - ['BM_100xFaster', '-0.9900', '-0.9900', - '10000', '100', '10000', '100'], - ['BM_10PercentCPUToTime', '+0.1000', - '-0.1000', '100', '110', '100', '90'], - ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], - ['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], - ['BM_hasLabel', '+0.0000', '+0.0000', '1', '1', '1', '1'], - ['OVERALL_GEOMEAN', '-0.8113', '-0.7779', '0', '0', '0', '0'] + ["BM_SameTimes", "+0.0000", "+0.0000", "10", "10", "10", "10"], + ["BM_2xFaster", "-0.5000", "-0.5000", "50", "25", "50", "25"], + ["BM_2xSlower", "+1.0000", "+1.0000", "50", "100", "50", "100"], + [ + "BM_1PercentFaster", + "-0.0100", + "-0.0100", + "100", + "99", + "100", + "99", + ], + [ + "BM_1PercentSlower", + "+0.0100", + "+0.0100", + "100", + "101", + "100", + "101", + ], + [ + "BM_10PercentFaster", + "-0.1000", + "-0.1000", + "100", + "90", + "100", + "90", + ], + [ + "BM_10PercentSlower", + "+0.1000", + "+0.1000", + "100", + "110", + "100", + "110", + ], + [ + "BM_100xSlower", + "+99.0000", + "+99.0000", + "100", + "10000", + "100", + "10000", + ], + [ + "BM_100xFaster", + "-0.9900", + "-0.9900", + "10000", + "100", + "10000", + "100", + ], + [ + "BM_10PercentCPUToTime", + "+0.1000", + "-0.1000", + "100", + "110", + "100", + "90", + ], + ["BM_ThirdFaster", "-0.3333", "-0.3334", "100", "67", "100", "67"], + ["BM_NotBadTimeUnit", "-0.9000", "+0.2000", "0", "0", "0", "1"], + ["BM_hasLabel", "+0.0000", "+0.0000", "1", "1", "1", "1"], + ["OVERALL_GEOMEAN", "-0.8113", "-0.7779", "0", "0", "0", "0"], ] output_lines_with_header = print_difference_report( - self.json_diff_report, use_color=False) + self.json_diff_report, use_color=False + ) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] + parts = [x for x in output_lines[i].split(" ") if x] self.assertEqual(len(parts), 7) self.assertEqual(expect_lines[i], parts) def test_json_diff_report_output(self): expected_output = [ { - 'name': 'BM_SameTimes', - 'label': '', - 'measurements': [{'time': 0.0000, 'cpu': 0.0000, - 'real_time': 10, 'real_time_other': 10, - 'cpu_time': 10, 'cpu_time_other': 10}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_SameTimes", + "label": "", + "measurements": [ + { + "time": 0.0000, + "cpu": 0.0000, + "real_time": 10, + "real_time_other": 10, + "cpu_time": 10, + "cpu_time_other": 10, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_2xFaster', - 'label': '', - 'measurements': [{'time': -0.5000, 'cpu': -0.5000, - 'real_time': 50, 'real_time_other': 25, - 'cpu_time': 50, 'cpu_time_other': 25}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_2xFaster", + "label": "", + "measurements": [ + { + "time": -0.5000, + "cpu": -0.5000, + "real_time": 50, + "real_time_other": 25, + "cpu_time": 50, + "cpu_time_other": 25, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_2xSlower', - 'label': '', - 'measurements': [{'time': 1.0000, 'cpu': 1.0000, - 'real_time': 50, 'real_time_other': 100, - 'cpu_time': 50, 'cpu_time_other': 100}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_2xSlower", + "label": "", + "measurements": [ + { + "time": 1.0000, + "cpu": 1.0000, + "real_time": 50, + "real_time_other": 100, + "cpu_time": 50, + "cpu_time_other": 100, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_1PercentFaster', - 'label': '', - 'measurements': [{'time': -0.0100, 'cpu': -0.0100, - 'real_time': 100, 'real_time_other': 98.9999999, - 'cpu_time': 100, 'cpu_time_other': 98.9999999}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_1PercentFaster", + "label": "", + "measurements": [ + { + "time": -0.0100, + "cpu": -0.0100, + "real_time": 100, + "real_time_other": 98.9999999, + "cpu_time": 100, + "cpu_time_other": 98.9999999, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_1PercentSlower', - 'label': '', - 'measurements': [{'time': 0.0100, 'cpu': 0.0100, - 'real_time': 100, 'real_time_other': 101, - 'cpu_time': 100, 'cpu_time_other': 101}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_1PercentSlower", + "label": "", + "measurements": [ + { + "time": 0.0100, + "cpu": 0.0100, + "real_time": 100, + "real_time_other": 101, + "cpu_time": 100, + "cpu_time_other": 101, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_10PercentFaster', - 'label': '', - 'measurements': [{'time': -0.1000, 'cpu': -0.1000, - 'real_time': 100, 'real_time_other': 90, - 'cpu_time': 100, 'cpu_time_other': 90}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_10PercentFaster", + "label": "", + "measurements": [ + { + "time": -0.1000, + "cpu": -0.1000, + "real_time": 100, + "real_time_other": 90, + "cpu_time": 100, + "cpu_time_other": 90, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_10PercentSlower', - 'label': '', - 'measurements': [{'time': 0.1000, 'cpu': 0.1000, - 'real_time': 100, 'real_time_other': 110, - 'cpu_time': 100, 'cpu_time_other': 110}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_10PercentSlower", + "label": "", + "measurements": [ + { + "time": 0.1000, + "cpu": 0.1000, + "real_time": 100, + "real_time_other": 110, + "cpu_time": 100, + "cpu_time_other": 110, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_100xSlower', - 'label': '', - 'measurements': [{'time': 99.0000, 'cpu': 99.0000, - 'real_time': 100, 'real_time_other': 10000, - 'cpu_time': 100, 'cpu_time_other': 10000}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_100xSlower", + "label": "", + "measurements": [ + { + "time": 99.0000, + "cpu": 99.0000, + "real_time": 100, + "real_time_other": 10000, + "cpu_time": 100, + "cpu_time_other": 10000, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_100xFaster', - 'label': '', - 'measurements': [{'time': -0.9900, 'cpu': -0.9900, - 'real_time': 10000, 'real_time_other': 100, - 'cpu_time': 10000, 'cpu_time_other': 100}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_100xFaster", + "label": "", + "measurements": [ + { + "time": -0.9900, + "cpu": -0.9900, + "real_time": 10000, + "real_time_other": 100, + "cpu_time": 10000, + "cpu_time_other": 100, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_10PercentCPUToTime', - 'label': '', - 'measurements': [{'time': 0.1000, 'cpu': -0.1000, - 'real_time': 100, 'real_time_other': 110, - 'cpu_time': 100, 'cpu_time_other': 90}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_10PercentCPUToTime", + "label": "", + "measurements": [ + { + "time": 0.1000, + "cpu": -0.1000, + "real_time": 100, + "real_time_other": 110, + "cpu_time": 100, + "cpu_time_other": 90, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_ThirdFaster', - 'label': '', - 'measurements': [{'time': -0.3333, 'cpu': -0.3334, - 'real_time': 100, 'real_time_other': 67, - 'cpu_time': 100, 'cpu_time_other': 67}], - 'time_unit': 'ns', - 'utest': {} + "name": "BM_ThirdFaster", + "label": "", + "measurements": [ + { + "time": -0.3333, + "cpu": -0.3334, + "real_time": 100, + "real_time_other": 67, + "cpu_time": 100, + "cpu_time_other": 67, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'BM_NotBadTimeUnit', - 'label': '', - 'measurements': [{'time': -0.9000, 'cpu': 0.2000, - 'real_time': 0.4, 'real_time_other': 0.04, - 'cpu_time': 0.5, 'cpu_time_other': 0.6}], - 'time_unit': 's', - 'utest': {} + "name": "BM_NotBadTimeUnit", + "label": "", + "measurements": [ + { + "time": -0.9000, + "cpu": 0.2000, + "real_time": 0.4, + "real_time_other": 0.04, + "cpu_time": 0.5, + "cpu_time_other": 0.6, + } + ], + "time_unit": "s", + "utest": {}, }, { - 'name': 'BM_hasLabel', - 'label': 'a label', - 'measurements': [{'time': 0.0000, 'cpu': 0.0000, - 'real_time': 1, 'real_time_other': 1, - 'cpu_time': 1, 'cpu_time_other': 1}], - 'time_unit': 's', - 'utest': {} + "name": "BM_hasLabel", + "label": "a label", + "measurements": [ + { + "time": 0.0000, + "cpu": 0.0000, + "real_time": 1, + "real_time_other": 1, + "cpu_time": 1, + "cpu_time_other": 1, + } + ], + "time_unit": "s", + "utest": {}, }, { - 'name': 'OVERALL_GEOMEAN', - 'label': '', - 'measurements': [{'real_time': 3.1622776601683826e-06, 'cpu_time': 3.2130844755623912e-06, - 'real_time_other': 1.9768988699420897e-07, 'cpu_time_other': 2.397447755209533e-07, - 'time': -0.8112976497120911, 'cpu': -0.7778551721181174}], - 'time_unit': 's', - 'run_type': 'aggregate', - 'aggregate_name': 'geomean', 'utest': {} + "name": "OVERALL_GEOMEAN", + "label": "", + "measurements": [ + { + "real_time": 3.1622776601683826e-06, + "cpu_time": 3.2130844755623912e-06, + "real_time_other": 1.9768988699420897e-07, + "cpu_time_other": 2.397447755209533e-07, + "time": -0.8112976497120911, + "cpu": -0.7778551721181174, + } + ], + "time_unit": "s", + "run_type": "aggregate", + "aggregate_name": "geomean", + "utest": {}, }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip( - self.json_diff_report, expected_output): - self.assertEqual(out['name'], expected['name']) - self.assertEqual(out['label'], expected['label']) - self.assertEqual(out['time_unit'], expected['time_unit']) + for out, expected in zip(self.json_diff_report, expected_output): + self.assertEqual(out["name"], expected["name"]) + self.assertEqual(out["label"], expected["label"]) + self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) assert_measurements(self, out, expected) @@ -618,12 +832,12 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase): def setUpClass(cls): def load_result(): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput = os.path.join(testInputs, 'test2_run.json') - with open(testOutput, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput = os.path.join(testInputs, "test2_run.json") + with open(testOutput, "r") as f: json = json.load(f) return json @@ -634,65 +848,108 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase): def test_json_diff_report_pretty_printing(self): expect_lines = [ - ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'], - ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'], - ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'], - ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'], - ['OVERALL_GEOMEAN', '-0.5000', '-0.5000', '0', '0', '0', '0'] + [".", "-0.5000", "-0.5000", "10", "5", "10", "5"], + ["./4", "-0.5000", "-0.5000", "40", "20", "40", "20"], + ["Prefix/.", "-0.5000", "-0.5000", "20", "10", "20", "10"], + ["Prefix/./3", "-0.5000", "-0.5000", "30", "15", "30", "15"], + ["OVERALL_GEOMEAN", "-0.5000", "-0.5000", "0", "0", "0", "0"], ] output_lines_with_header = print_difference_report( - self.json_diff_report, use_color=False) + self.json_diff_report, use_color=False + ) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] + parts = [x for x in output_lines[i].split(" ") if x] self.assertEqual(len(parts), 7) self.assertEqual(expect_lines[i], parts) def test_json_diff_report(self): expected_output = [ { - 'name': u'.', - 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 10, 'real_time_other': 5, 'cpu_time': 10, 'cpu_time_other': 5}], - 'time_unit': 'ns', - 'utest': {} + "name": ".", + "measurements": [ + { + "time": -0.5, + "cpu": -0.5, + "real_time": 10, + "real_time_other": 5, + "cpu_time": 10, + "cpu_time_other": 5, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': u'./4', - 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 40, 'real_time_other': 20, 'cpu_time': 40, 'cpu_time_other': 20}], - 'time_unit': 'ns', - 'utest': {}, + "name": "./4", + "measurements": [ + { + "time": -0.5, + "cpu": -0.5, + "real_time": 40, + "real_time_other": 20, + "cpu_time": 40, + "cpu_time_other": 20, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': u'Prefix/.', - 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 20, 'real_time_other': 10, 'cpu_time': 20, 'cpu_time_other': 10}], - 'time_unit': 'ns', - 'utest': {} + "name": "Prefix/.", + "measurements": [ + { + "time": -0.5, + "cpu": -0.5, + "real_time": 20, + "real_time_other": 10, + "cpu_time": 20, + "cpu_time_other": 10, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': u'Prefix/./3', - 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 30, 'real_time_other': 15, 'cpu_time': 30, 'cpu_time_other': 15}], - 'time_unit': 'ns', - 'utest': {} + "name": "Prefix/./3", + "measurements": [ + { + "time": -0.5, + "cpu": -0.5, + "real_time": 30, + "real_time_other": 15, + "cpu_time": 30, + "cpu_time_other": 15, + } + ], + "time_unit": "ns", + "utest": {}, }, { - 'name': 'OVERALL_GEOMEAN', - 'measurements': [{'real_time': 2.213363839400641e-08, 'cpu_time': 2.213363839400641e-08, - 'real_time_other': 1.1066819197003185e-08, 'cpu_time_other': 1.1066819197003185e-08, - 'time': -0.5000000000000009, 'cpu': -0.5000000000000009}], - 'time_unit': 's', - 'run_type': 'aggregate', - 'aggregate_name': 'geomean', - 'utest': {} - } + "name": "OVERALL_GEOMEAN", + "measurements": [ + { + "real_time": 2.213363839400641e-08, + "cpu_time": 2.213363839400641e-08, + "real_time_other": 1.1066819197003185e-08, + "cpu_time_other": 1.1066819197003185e-08, + "time": -0.5000000000000009, + "cpu": -0.5000000000000009, + } + ], + "time_unit": "s", + "run_type": "aggregate", + "aggregate_name": "geomean", + "utest": {}, + }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip( - self.json_diff_report, expected_output): - self.assertEqual(out['name'], expected['name']) - self.assertEqual(out['time_unit'], expected['time_unit']) + for out, expected in zip(self.json_diff_report, expected_output): + self.assertEqual(out["name"], expected["name"]) + self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) assert_measurements(self, out, expected) @@ -702,424 +959,489 @@ class TestReportDifferenceWithUTest(unittest.TestCase): def setUpClass(cls): def load_results(): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test3_run0.json') - testOutput2 = os.path.join(testInputs, 'test3_run1.json') - with open(testOutput1, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput1 = os.path.join(testInputs, "test3_run0.json") + testOutput2 = os.path.join(testInputs, "test3_run1.json") + with open(testOutput1, "r") as f: json1 = json.load(f) - with open(testOutput2, 'r') as f: + with open(testOutput2, "r") as f: json2 = json.load(f) return json1, json2 json1, json2 = load_results() - cls.json_diff_report = get_difference_report( - json1, json2, utest=True) + cls.json_diff_report = get_difference_report(json1, json2, utest=True) def test_json_diff_report_pretty_printing(self): expect_lines = [ - ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], - ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], - ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], - ['BM_Two_pvalue', - '1.0000', - '0.6667', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '2.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], - ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], - ['short_pvalue', - '0.7671', - '0.2000', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '3.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], - ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0'] + ["BM_One", "-0.1000", "+0.1000", "10", "9", "100", "110"], + ["BM_Two", "+0.1111", "-0.0111", "9", "10", "90", "89"], + ["BM_Two", "-0.1250", "-0.1628", "8", "7", "86", "72"], + [ + "BM_Two_pvalue", + "1.0000", + "0.6667", + "U", + "Test,", + "Repetitions:", + "2", + "vs", + "2.", + "WARNING:", + "Results", + "unreliable!", + "9+", + "repetitions", + "recommended.", + ], + ["short", "-0.1250", "-0.0625", "8", "7", "80", "75"], + ["short", "-0.4325", "-0.1351", "8", "5", "77", "67"], + [ + "short_pvalue", + "0.7671", + "0.2000", + "U", + "Test,", + "Repetitions:", + "2", + "vs", + "3.", + "WARNING:", + "Results", + "unreliable!", + "9+", + "repetitions", + "recommended.", + ], + ["medium", "-0.3750", "-0.3375", "8", "5", "80", "53"], + ["OVERALL_GEOMEAN", "+1.6405", "-0.6985", "0", "0", "0", "0"], ] output_lines_with_header = print_difference_report( - self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False) + self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False + ) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] + parts = [x for x in output_lines[i].split(" ") if x] self.assertEqual(expect_lines[i], parts) def test_json_diff_report_pretty_printing_aggregates_only(self): expect_lines = [ - ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], - ['BM_Two_pvalue', - '1.0000', - '0.6667', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '2.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], - ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], - ['short_pvalue', - '0.7671', - '0.2000', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '3.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0'] + ["BM_One", "-0.1000", "+0.1000", "10", "9", "100", "110"], + [ + "BM_Two_pvalue", + "1.0000", + "0.6667", + "U", + "Test,", + "Repetitions:", + "2", + "vs", + "2.", + "WARNING:", + "Results", + "unreliable!", + "9+", + "repetitions", + "recommended.", + ], + ["short", "-0.1250", "-0.0625", "8", "7", "80", "75"], + ["short", "-0.4325", "-0.1351", "8", "5", "77", "67"], + [ + "short_pvalue", + "0.7671", + "0.2000", + "U", + "Test,", + "Repetitions:", + "2", + "vs", + "3.", + "WARNING:", + "Results", + "unreliable!", + "9+", + "repetitions", + "recommended.", + ], + ["OVERALL_GEOMEAN", "+1.6405", "-0.6985", "0", "0", "0", "0"], ] output_lines_with_header = print_difference_report( - self.json_diff_report, include_aggregates_only=True, utest=True, utest_alpha=0.05, use_color=False) + self.json_diff_report, + include_aggregates_only=True, + utest=True, + utest_alpha=0.05, + use_color=False, + ) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] + parts = [x for x in output_lines[i].split(" ") if x] self.assertEqual(expect_lines[i], parts) def test_json_diff_report(self): expected_output = [ { - 'name': u'BM_One', - 'measurements': [ - {'time': -0.1, - 'cpu': 0.1, - 'real_time': 10, - 'real_time_other': 9, - 'cpu_time': 100, - 'cpu_time_other': 110} + "name": "BM_One", + "measurements": [ + { + "time": -0.1, + "cpu": 0.1, + "real_time": 10, + "real_time_other": 9, + "cpu_time": 100, + "cpu_time_other": 110, + } ], - 'time_unit': 'ns', - 'utest': {} + "time_unit": "ns", + "utest": {}, }, { - 'name': u'BM_Two', - 'measurements': [ - {'time': 0.1111111111111111, - 'cpu': -0.011111111111111112, - 'real_time': 9, - 'real_time_other': 10, - 'cpu_time': 90, - 'cpu_time_other': 89}, - {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8, - 'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72} + "name": "BM_Two", + "measurements": [ + { + "time": 0.1111111111111111, + "cpu": -0.011111111111111112, + "real_time": 9, + "real_time_other": 10, + "cpu_time": 90, + "cpu_time_other": 89, + }, + { + "time": -0.125, + "cpu": -0.16279069767441862, + "real_time": 8, + "real_time_other": 7, + "cpu_time": 86, + "cpu_time_other": 72, + }, ], - 'time_unit': 'ns', - 'utest': { - 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0 - } + "time_unit": "ns", + "utest": { + "have_optimal_repetitions": False, + "cpu_pvalue": 0.6666666666666666, + "time_pvalue": 1.0, + }, }, { - 'name': u'short', - 'measurements': [ - {'time': -0.125, - 'cpu': -0.0625, - 'real_time': 8, - 'real_time_other': 7, - 'cpu_time': 80, - 'cpu_time_other': 75}, - {'time': -0.4325, - 'cpu': -0.13506493506493514, - 'real_time': 8, - 'real_time_other': 4.54, - 'cpu_time': 77, - 'cpu_time_other': 66.6} + "name": "short", + "measurements": [ + { + "time": -0.125, + "cpu": -0.0625, + "real_time": 8, + "real_time_other": 7, + "cpu_time": 80, + "cpu_time_other": 75, + }, + { + "time": -0.4325, + "cpu": -0.13506493506493514, + "real_time": 8, + "real_time_other": 4.54, + "cpu_time": 77, + "cpu_time_other": 66.6, + }, ], - 'time_unit': 'ns', - 'utest': { - 'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772 - } + "time_unit": "ns", + "utest": { + "have_optimal_repetitions": False, + "cpu_pvalue": 0.2, + "time_pvalue": 0.7670968684102772, + }, }, { - 'name': u'medium', - 'measurements': [ - {'time': -0.375, - 'cpu': -0.3375, - 'real_time': 8, - 'real_time_other': 5, - 'cpu_time': 80, - 'cpu_time_other': 53} + "name": "medium", + "measurements": [ + { + "time": -0.375, + "cpu": -0.3375, + "real_time": 8, + "real_time_other": 5, + "cpu_time": 80, + "cpu_time_other": 53, + } ], - 'time_unit': 'ns', - 'utest': {} + "time_unit": "ns", + "utest": {}, }, { - 'name': 'OVERALL_GEOMEAN', - 'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08, - 'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08, - 'time': 1.6404861082353634, 'cpu': -0.6984640740519662}], - 'time_unit': 's', - 'run_type': 'aggregate', - 'aggregate_name': 'geomean', - 'utest': {} - } + "name": "OVERALL_GEOMEAN", + "measurements": [ + { + "real_time": 8.48528137423858e-09, + "cpu_time": 8.441336246629233e-08, + "real_time_other": 2.2405267593145244e-08, + "cpu_time_other": 2.5453661413660466e-08, + "time": 1.6404861082353634, + "cpu": -0.6984640740519662, + } + ], + "time_unit": "s", + "run_type": "aggregate", + "aggregate_name": "geomean", + "utest": {}, + }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip( - self.json_diff_report, expected_output): - self.assertEqual(out['name'], expected['name']) - self.assertEqual(out['time_unit'], expected['time_unit']) + for out, expected in zip(self.json_diff_report, expected_output): + self.assertEqual(out["name"], expected["name"]) + self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) assert_measurements(self, out, expected) class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( - unittest.TestCase): + unittest.TestCase +): @classmethod def setUpClass(cls): def load_results(): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test3_run0.json') - testOutput2 = os.path.join(testInputs, 'test3_run1.json') - with open(testOutput1, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput1 = os.path.join(testInputs, "test3_run0.json") + testOutput2 = os.path.join(testInputs, "test3_run1.json") + with open(testOutput1, "r") as f: json1 = json.load(f) - with open(testOutput2, 'r') as f: + with open(testOutput2, "r") as f: json2 = json.load(f) return json1, json2 json1, json2 = load_results() - cls.json_diff_report = get_difference_report( - json1, json2, utest=True) + cls.json_diff_report = get_difference_report(json1, json2, utest=True) def test_json_diff_report_pretty_printing(self): expect_lines = [ - ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], - ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], - ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], - ['BM_Two_pvalue', - '1.0000', - '0.6667', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '2.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], - ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], - ['short_pvalue', - '0.7671', - '0.2000', - 'U', - 'Test,', - 'Repetitions:', - '2', - 'vs', - '3.', - 'WARNING:', - 'Results', - 'unreliable!', - '9+', - 'repetitions', - 'recommended.'], - ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], - ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0'] + ["BM_One", "-0.1000", "+0.1000", "10", "9", "100", "110"], + ["BM_Two", "+0.1111", "-0.0111", "9", "10", "90", "89"], + ["BM_Two", "-0.1250", "-0.1628", "8", "7", "86", "72"], + [ + "BM_Two_pvalue", + "1.0000", + "0.6667", + "U", + "Test,", + "Repetitions:", + "2", + "vs", + "2.", + "WARNING:", + "Results", + "unreliable!", + "9+", + "repetitions", + "recommended.", + ], + ["short", "-0.1250", "-0.0625", "8", "7", "80", "75"], + ["short", "-0.4325", "-0.1351", "8", "5", "77", "67"], + [ + "short_pvalue", + "0.7671", + "0.2000", + "U", + "Test,", + "Repetitions:", + "2", + "vs", + "3.", + "WARNING:", + "Results", + "unreliable!", + "9+", + "repetitions", + "recommended.", + ], + ["medium", "-0.3750", "-0.3375", "8", "5", "80", "53"], + ["OVERALL_GEOMEAN", "+1.6405", "-0.6985", "0", "0", "0", "0"], ] output_lines_with_header = print_difference_report( - self.json_diff_report, - utest=True, utest_alpha=0.05, use_color=False) + self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False + ) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] + parts = [x for x in output_lines[i].split(" ") if x] self.assertEqual(expect_lines[i], parts) def test_json_diff_report(self): expected_output = [ { - 'name': u'BM_One', - 'measurements': [ - {'time': -0.1, - 'cpu': 0.1, - 'real_time': 10, - 'real_time_other': 9, - 'cpu_time': 100, - 'cpu_time_other': 110} + "name": "BM_One", + "measurements": [ + { + "time": -0.1, + "cpu": 0.1, + "real_time": 10, + "real_time_other": 9, + "cpu_time": 100, + "cpu_time_other": 110, + } ], - 'time_unit': 'ns', - 'utest': {} + "time_unit": "ns", + "utest": {}, }, { - 'name': u'BM_Two', - 'measurements': [ - {'time': 0.1111111111111111, - 'cpu': -0.011111111111111112, - 'real_time': 9, - 'real_time_other': 10, - 'cpu_time': 90, - 'cpu_time_other': 89}, - {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8, - 'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72} + "name": "BM_Two", + "measurements": [ + { + "time": 0.1111111111111111, + "cpu": -0.011111111111111112, + "real_time": 9, + "real_time_other": 10, + "cpu_time": 90, + "cpu_time_other": 89, + }, + { + "time": -0.125, + "cpu": -0.16279069767441862, + "real_time": 8, + "real_time_other": 7, + "cpu_time": 86, + "cpu_time_other": 72, + }, ], - 'time_unit': 'ns', - 'utest': { - 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0 - } + "time_unit": "ns", + "utest": { + "have_optimal_repetitions": False, + "cpu_pvalue": 0.6666666666666666, + "time_pvalue": 1.0, + }, }, { - 'name': u'short', - 'measurements': [ - {'time': -0.125, - 'cpu': -0.0625, - 'real_time': 8, - 'real_time_other': 7, - 'cpu_time': 80, - 'cpu_time_other': 75}, - {'time': -0.4325, - 'cpu': -0.13506493506493514, - 'real_time': 8, - 'real_time_other': 4.54, - 'cpu_time': 77, - 'cpu_time_other': 66.6} + "name": "short", + "measurements": [ + { + "time": -0.125, + "cpu": -0.0625, + "real_time": 8, + "real_time_other": 7, + "cpu_time": 80, + "cpu_time_other": 75, + }, + { + "time": -0.4325, + "cpu": -0.13506493506493514, + "real_time": 8, + "real_time_other": 4.54, + "cpu_time": 77, + "cpu_time_other": 66.6, + }, ], - 'time_unit': 'ns', - 'utest': { - 'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772 - } + "time_unit": "ns", + "utest": { + "have_optimal_repetitions": False, + "cpu_pvalue": 0.2, + "time_pvalue": 0.7670968684102772, + }, }, { - 'name': u'medium', - 'measurements': [ - {'real_time_other': 5, - 'cpu_time': 80, - 'time': -0.375, - 'real_time': 8, - 'cpu_time_other': 53, - 'cpu': -0.3375 - } + "name": "medium", + "measurements": [ + { + "real_time_other": 5, + "cpu_time": 80, + "time": -0.375, + "real_time": 8, + "cpu_time_other": 53, + "cpu": -0.3375, + } ], - 'utest': {}, - 'time_unit': u'ns', - 'aggregate_name': '' + "utest": {}, + "time_unit": "ns", + "aggregate_name": "", }, { - 'name': 'OVERALL_GEOMEAN', - 'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08, - 'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08, - 'time': 1.6404861082353634, 'cpu': -0.6984640740519662}], - 'time_unit': 's', - 'run_type': 'aggregate', - 'aggregate_name': 'geomean', - 'utest': {} - } + "name": "OVERALL_GEOMEAN", + "measurements": [ + { + "real_time": 8.48528137423858e-09, + "cpu_time": 8.441336246629233e-08, + "real_time_other": 2.2405267593145244e-08, + "cpu_time_other": 2.5453661413660466e-08, + "time": 1.6404861082353634, + "cpu": -0.6984640740519662, + } + ], + "time_unit": "s", + "run_type": "aggregate", + "aggregate_name": "geomean", + "utest": {}, + }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip( - self.json_diff_report, expected_output): - self.assertEqual(out['name'], expected['name']) - self.assertEqual(out['time_unit'], expected['time_unit']) + for out, expected in zip(self.json_diff_report, expected_output): + self.assertEqual(out["name"], expected["name"]) + self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) assert_measurements(self, out, expected) -class TestReportDifferenceForPercentageAggregates( - unittest.TestCase): +class TestReportDifferenceForPercentageAggregates(unittest.TestCase): @classmethod def setUpClass(cls): def load_results(): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test4_run0.json') - testOutput2 = os.path.join(testInputs, 'test4_run1.json') - with open(testOutput1, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput1 = os.path.join(testInputs, "test4_run0.json") + testOutput2 = os.path.join(testInputs, "test4_run1.json") + with open(testOutput1, "r") as f: json1 = json.load(f) - with open(testOutput2, 'r') as f: + with open(testOutput2, "r") as f: json2 = json.load(f) return json1, json2 json1, json2 = load_results() - cls.json_diff_report = get_difference_report( - json1, json2, utest=True) + cls.json_diff_report = get_difference_report(json1, json2, utest=True) def test_json_diff_report_pretty_printing(self): - expect_lines = [ - ['whocares', '-0.5000', '+0.5000', '0', '0', '0', '0'] - ] + expect_lines = [["whocares", "-0.5000", "+0.5000", "0", "0", "0", "0"]] output_lines_with_header = print_difference_report( - self.json_diff_report, - utest=True, utest_alpha=0.05, use_color=False) + self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False + ) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): - parts = [x for x in output_lines[i].split(' ') if x] + parts = [x for x in output_lines[i].split(" ") if x] self.assertEqual(expect_lines[i], parts) def test_json_diff_report(self): expected_output = [ { - 'name': u'whocares', - 'measurements': [ - {'time': -0.5, - 'cpu': 0.5, - 'real_time': 0.01, - 'real_time_other': 0.005, - 'cpu_time': 0.10, - 'cpu_time_other': 0.15} + "name": "whocares", + "measurements": [ + { + "time": -0.5, + "cpu": 0.5, + "real_time": 0.01, + "real_time_other": 0.005, + "cpu_time": 0.10, + "cpu_time_other": 0.15, + } ], - 'time_unit': 'ns', - 'utest': {} + "time_unit": "ns", + "utest": {}, } ] self.assertEqual(len(self.json_diff_report), len(expected_output)) - for out, expected in zip( - self.json_diff_report, expected_output): - self.assertEqual(out['name'], expected['name']) - self.assertEqual(out['time_unit'], expected['time_unit']) + for out, expected in zip(self.json_diff_report, expected_output): + self.assertEqual(out["name"], expected["name"]) + self.assertEqual(out["time_unit"], expected["time_unit"]) assert_utest(self, out, expected) assert_measurements(self, out, expected) @@ -1129,12 +1451,12 @@ class TestReportSorting(unittest.TestCase): def setUpClass(cls): def load_result(): import json + testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput = os.path.join(testInputs, 'test4_run.json') - with open(testOutput, 'r') as f: + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput = os.path.join(testInputs, "test4_run.json") + with open(testOutput, "r") as f: json = json.load(f) return json @@ -1155,45 +1477,141 @@ class TestReportSorting(unittest.TestCase): "91 family 1 instance 0 aggregate", "90 family 1 instance 1 repetition 0", "89 family 1 instance 1 repetition 1", - "88 family 1 instance 1 aggregate" + "88 family 1 instance 1 aggregate", ] - for n in range(len(self.json['benchmarks']) ** 2): - random.shuffle(self.json['benchmarks']) + for n in range(len(self.json["benchmarks"]) ** 2): + random.shuffle(self.json["benchmarks"]) sorted_benchmarks = util.sort_benchmark_results(self.json)[ - 'benchmarks'] + "benchmarks" + ] self.assertEqual(len(expected_names), len(sorted_benchmarks)) for out, expected in zip(sorted_benchmarks, expected_names): - self.assertEqual(out['name'], expected) + self.assertEqual(out["name"], expected) + + +class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2( + unittest.TestCase +): + @classmethod + def setUpClass(cls): + def load_results(): + import json + + testInputs = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "Inputs" + ) + testOutput1 = os.path.join(testInputs, "test5_run0.json") + testOutput2 = os.path.join(testInputs, "test5_run1.json") + with open(testOutput1, "r") as f: + json1 = json.load(f) + json1["benchmarks"] = [ + json1["benchmarks"][0] for i in range(1000) + ] + with open(testOutput2, "r") as f: + json2 = json.load(f) + json2["benchmarks"] = [ + json2["benchmarks"][0] for i in range(1000) + ] + return json1, json2 + + json1, json2 = load_results() + cls.json_diff_report = get_difference_report(json1, json2, utest=True) + + def test_json_diff_report_pretty_printing(self): + expect_line = [ + "BM_ManyRepetitions_pvalue", + "0.0000", + "0.0000", + "U", + "Test,", + "Repetitions:", + "1000", + "vs", + "1000", + ] + output_lines_with_header = print_difference_report( + self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False + ) + output_lines = output_lines_with_header[2:] + found = False + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(" ") if x] + found = expect_line == parts + if found: + break + self.assertTrue(found) + + def test_json_diff_report(self): + expected_output = [ + { + "name": "BM_ManyRepetitions", + "label": "", + "time_unit": "s", + "run_type": "", + "aggregate_name": "", + "utest": { + "have_optimal_repetitions": True, + "cpu_pvalue": 0.0, + "time_pvalue": 0.0, + "nr_of_repetitions": 1000, + "nr_of_repetitions_other": 1000, + }, + }, + { + "name": "OVERALL_GEOMEAN", + "label": "", + "measurements": [ + { + "real_time": 1.0, + "cpu_time": 1000.000000000069, + "real_time_other": 1000.000000000069, + "cpu_time_other": 1.0, + "time": 999.000000000069, + "cpu": -0.9990000000000001, + } + ], + "time_unit": "s", + "run_type": "aggregate", + "aggregate_name": "geomean", + "utest": {}, + }, + ] + self.assertEqual(len(self.json_diff_report), len(expected_output)) + for out, expected in zip(self.json_diff_report, expected_output): + self.assertEqual(out["name"], expected["name"]) + self.assertEqual(out["time_unit"], expected["time_unit"]) + assert_utest(self, out, expected) def assert_utest(unittest_instance, lhs, rhs): - if lhs['utest']: + if lhs["utest"]: unittest_instance.assertAlmostEqual( - lhs['utest']['cpu_pvalue'], - rhs['utest']['cpu_pvalue']) + lhs["utest"]["cpu_pvalue"], rhs["utest"]["cpu_pvalue"] + ) unittest_instance.assertAlmostEqual( - lhs['utest']['time_pvalue'], - rhs['utest']['time_pvalue']) + lhs["utest"]["time_pvalue"], rhs["utest"]["time_pvalue"] + ) unittest_instance.assertEqual( - lhs['utest']['have_optimal_repetitions'], - rhs['utest']['have_optimal_repetitions']) + lhs["utest"]["have_optimal_repetitions"], + rhs["utest"]["have_optimal_repetitions"], + ) else: # lhs is empty. assert if rhs is not. - unittest_instance.assertEqual(lhs['utest'], rhs['utest']) + unittest_instance.assertEqual(lhs["utest"], rhs["utest"]) def assert_measurements(unittest_instance, lhs, rhs): - for m1, m2 in zip(lhs['measurements'], rhs['measurements']): - unittest_instance.assertEqual(m1['real_time'], m2['real_time']) - unittest_instance.assertEqual(m1['cpu_time'], m2['cpu_time']) + for m1, m2 in zip(lhs["measurements"], rhs["measurements"]): + unittest_instance.assertEqual(m1["real_time"], m2["real_time"]) + unittest_instance.assertEqual(m1["cpu_time"], m2["cpu_time"]) # m1['time'] and m1['cpu'] hold values which are being calculated, # and therefore we must use almost-equal pattern. - unittest_instance.assertAlmostEqual(m1['time'], m2['time'], places=4) - unittest_instance.assertAlmostEqual(m1['cpu'], m2['cpu'], places=4) + unittest_instance.assertAlmostEqual(m1["time"], m2["time"], places=4) + unittest_instance.assertAlmostEqual(m1["cpu"], m2["cpu"], places=4) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 diff --git a/yass/third_party/benchmark/tools/gbench/util.py b/yass/third_party/benchmark/tools/gbench/util.py index 5e79da8f01..1119a1a2ca 100644 --- a/yass/third_party/benchmark/tools/gbench/util.py +++ b/yass/third_party/benchmark/tools/gbench/util.py @@ -1,5 +1,5 @@ -"""util.py - General utilities for running, loading, and processing benchmarks -""" +"""util.py - General utilities for running, loading, and processing benchmarks""" + import json import os import re @@ -7,13 +7,12 @@ import subprocess import sys import tempfile - # Input file type enumeration IT_Invalid = 0 IT_JSON = 1 IT_Executable = 2 -_num_magic_bytes = 2 if sys.platform.startswith('win') else 4 +_num_magic_bytes = 2 if sys.platform.startswith("win") else 4 def is_executable_file(filename): @@ -24,21 +23,21 @@ def is_executable_file(filename): """ if not os.path.isfile(filename): return False - with open(filename, mode='rb') as f: + with open(filename, mode="rb") as f: magic_bytes = f.read(_num_magic_bytes) - if sys.platform == 'darwin': + if sys.platform == "darwin": return magic_bytes in [ - b'\xfe\xed\xfa\xce', # MH_MAGIC - b'\xce\xfa\xed\xfe', # MH_CIGAM - b'\xfe\xed\xfa\xcf', # MH_MAGIC_64 - b'\xcf\xfa\xed\xfe', # MH_CIGAM_64 - b'\xca\xfe\xba\xbe', # FAT_MAGIC - b'\xbe\xba\xfe\xca' # FAT_CIGAM + b"\xfe\xed\xfa\xce", # MH_MAGIC + b"\xce\xfa\xed\xfe", # MH_CIGAM + b"\xfe\xed\xfa\xcf", # MH_MAGIC_64 + b"\xcf\xfa\xed\xfe", # MH_CIGAM_64 + b"\xca\xfe\xba\xbe", # FAT_MAGIC + b"\xbe\xba\xfe\xca", # FAT_CIGAM ] - elif sys.platform.startswith('win'): - return magic_bytes == b'MZ' + elif sys.platform.startswith("win"): + return magic_bytes == b"MZ" else: - return magic_bytes == b'\x7FELF' + return magic_bytes == b"\x7fELF" def is_json_file(filename): @@ -47,7 +46,7 @@ def is_json_file(filename): 'False' otherwise. """ try: - with open(filename, 'r') as f: + with open(filename, "r") as f: json.load(f) return True except BaseException: @@ -72,7 +71,10 @@ def classify_input_file(filename): elif is_json_file(filename): ftype = IT_JSON else: - err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename + err_msg = ( + "'%s' does not name a valid benchmark executable or JSON file" + % filename + ) return ftype, err_msg @@ -95,11 +97,11 @@ def find_benchmark_flag(prefix, benchmark_flags): if it is found return the arg it specifies. If specified more than once the last value is returned. If the flag is not found None is returned. """ - assert prefix.startswith('--') and prefix.endswith('=') + assert prefix.startswith("--") and prefix.endswith("=") result = None for f in benchmark_flags: if f.startswith(prefix): - result = f[len(prefix):] + result = f[len(prefix) :] return result @@ -108,7 +110,7 @@ def remove_benchmark_flags(prefix, benchmark_flags): Return a new list containing the specified benchmark_flags except those with the specified prefix. """ - assert prefix.startswith('--') and prefix.endswith('=') + assert prefix.startswith("--") and prefix.endswith("=") return [f for f in benchmark_flags if not f.startswith(prefix)] @@ -124,36 +126,61 @@ def load_benchmark_results(fname, benchmark_filter): REQUIRES: 'fname' names a file containing JSON benchmark output. """ + def benchmark_wanted(benchmark): if benchmark_filter is None: return True - name = benchmark.get('run_name', None) or benchmark['name'] - if re.search(benchmark_filter, name): - return True - return False + name = benchmark.get("run_name", None) or benchmark["name"] + return re.search(benchmark_filter, name) is not None - with open(fname, 'r') as f: + with open(fname, "r") as f: results = json.load(f) - if 'benchmarks' in results: - results['benchmarks'] = list(filter(benchmark_wanted, - results['benchmarks'])) + if "context" in results: + if "json_schema_version" in results["context"]: + json_schema_version = results["context"]["json_schema_version"] + if json_schema_version != 1: + print( + "In %s, got unnsupported JSON schema version: %i, expected 1" + % (fname, json_schema_version) + ) + sys.exit(1) + if "benchmarks" in results: + results["benchmarks"] = list( + filter(benchmark_wanted, results["benchmarks"]) + ) return results def sort_benchmark_results(result): - benchmarks = result['benchmarks'] + benchmarks = result["benchmarks"] # From inner key to the outer key! benchmarks = sorted( - benchmarks, key=lambda benchmark: benchmark['repetition_index'] if 'repetition_index' in benchmark else -1) + benchmarks, + key=lambda benchmark: benchmark["repetition_index"] + if "repetition_index" in benchmark + else -1, + ) benchmarks = sorted( - benchmarks, key=lambda benchmark: 1 if 'run_type' in benchmark and benchmark['run_type'] == "aggregate" else 0) + benchmarks, + key=lambda benchmark: 1 + if "run_type" in benchmark and benchmark["run_type"] == "aggregate" + else 0, + ) benchmarks = sorted( - benchmarks, key=lambda benchmark: benchmark['per_family_instance_index'] if 'per_family_instance_index' in benchmark else -1) + benchmarks, + key=lambda benchmark: benchmark["per_family_instance_index"] + if "per_family_instance_index" in benchmark + else -1, + ) benchmarks = sorted( - benchmarks, key=lambda benchmark: benchmark['family_index'] if 'family_index' in benchmark else -1) + benchmarks, + key=lambda benchmark: benchmark["family_index"] + if "family_index" in benchmark + else -1, + ) - result['benchmarks'] = benchmarks + result["benchmarks"] = benchmarks return result @@ -164,21 +191,21 @@ def run_benchmark(exe_name, benchmark_flags): real time console output. RETURNS: A JSON object representing the benchmark output """ - output_name = find_benchmark_flag('--benchmark_out=', - benchmark_flags) + output_name = find_benchmark_flag("--benchmark_out=", benchmark_flags) is_temp_output = False if output_name is None: is_temp_output = True thandle, output_name = tempfile.mkstemp() os.close(thandle) - benchmark_flags = list(benchmark_flags) + \ - ['--benchmark_out=%s' % output_name] + benchmark_flags = list(benchmark_flags) + [ + "--benchmark_out=%s" % output_name + ] cmd = [exe_name] + benchmark_flags - print("RUNNING: %s" % ' '.join(cmd)) + print("RUNNING: %s" % " ".join(cmd)) exitCode = subprocess.call(cmd) if exitCode != 0: - print('TEST FAILED...') + print("TEST FAILED...") sys.exit(exitCode) json_res = load_benchmark_results(output_name, None) if is_temp_output: @@ -195,9 +222,10 @@ def run_or_load_benchmark(filename, benchmark_flags): """ ftype = check_input_file(filename) if ftype == IT_JSON: - benchmark_filter = find_benchmark_flag('--benchmark_filter=', - benchmark_flags) + benchmark_filter = find_benchmark_flag( + "--benchmark_filter=", benchmark_flags + ) return load_benchmark_results(filename, benchmark_filter) if ftype == IT_Executable: return run_benchmark(filename, benchmark_flags) - raise ValueError('Unknown file type %s' % ftype) + raise ValueError("Unknown file type %s" % ftype) diff --git a/yass/third_party/benchmark/tools/strip_asm.py b/yass/third_party/benchmark/tools/strip_asm.py index d131dc7194..bc3a774a79 100755 --- a/yass/third_party/benchmark/tools/strip_asm.py +++ b/yass/third_party/benchmark/tools/strip_asm.py @@ -4,48 +4,49 @@ strip_asm.py - Cleanup ASM output for the specified file """ -from argparse import ArgumentParser -import sys import os import re +import sys +from argparse import ArgumentParser + def find_used_labels(asm): found = set() - label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)") - for l in asm.splitlines(): - m = label_re.match(l) + label_re = re.compile(r"\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)") + for line in asm.splitlines(): + m = label_re.match(line) if m: - found.add('.L%s' % m.group(1)) + found.add(".L%s" % m.group(1)) return found def normalize_labels(asm): decls = set() label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") - for l in asm.splitlines(): - m = label_decl.match(l) + for line in asm.splitlines(): + m = label_decl.match(line) if m: decls.add(m.group(0)) if len(decls) == 0: return asm - needs_dot = next(iter(decls))[0] != '.' + needs_dot = next(iter(decls))[0] != "." if not needs_dot: return asm for ld in decls: - asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm) + asm = re.sub(r"(^|\s+)" + ld + r"(?=:|\s)", "\\1." + ld, asm) return asm def transform_labels(asm): asm = normalize_labels(asm) used_decls = find_used_labels(asm) - new_asm = '' - label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") - for l in asm.splitlines(): - m = label_decl.match(l) + new_asm = "" + label_decl = re.compile(r"^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") + for line in asm.splitlines(): + m = label_decl.match(line) if not m or m.group(0) in used_decls: - new_asm += l - new_asm += '\n' + new_asm += line + new_asm += "\n" return new_asm @@ -53,29 +54,34 @@ def is_identifier(tk): if len(tk) == 0: return False first = tk[0] - if not first.isalpha() and first != '_': + if not first.isalpha() and first != "_": return False for i in range(1, len(tk)): c = tk[i] - if not c.isalnum() and c != '_': + if not c.isalnum() and c != "_": return False return True -def process_identifiers(l): + +def process_identifiers(line): """ process_identifiers - process all identifiers and modify them to have consistent names across all platforms; specifically across ELF and MachO. For example, MachO inserts an additional understore at the beginning of names. This function removes that. """ - parts = re.split(r'([a-zA-Z0-9_]+)', l) - new_line = '' + parts = re.split(r"([a-zA-Z0-9_]+)", line) + new_line = "" for tk in parts: if is_identifier(tk): - if tk.startswith('__Z'): + if tk.startswith("__Z"): tk = tk[1:] - elif tk.startswith('_') and len(tk) > 1 and \ - tk[1].isalpha() and tk[1] != 'Z': + elif ( + tk.startswith("_") + and len(tk) > 1 + and tk[1].isalpha() + and tk[1] != "Z" + ): tk = tk[1:] new_line += tk return new_line @@ -85,65 +91,71 @@ def process_asm(asm): """ Strip the ASM of unwanted directives and lines """ - new_contents = '' + new_contents = "" asm = transform_labels(asm) # TODO: Add more things we want to remove discard_regexes = [ - re.compile("\s+\..*$"), # directive - re.compile("\s*#(NO_APP|APP)$"), #inline ASM - re.compile("\s*#.*$"), # comment line - re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive - re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"), - ] - keep_regexes = [ - + re.compile(r"\s+\..*$"), # directive + re.compile(r"\s*#(NO_APP|APP)$"), # inline ASM + re.compile(r"\s*#.*$"), # comment line + re.compile( + r"\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)" + ), # global directive + re.compile( + r"\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)" + ), ] + keep_regexes: list[re.Pattern] = [] fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:") - for l in asm.splitlines(): + for line in asm.splitlines(): # Remove Mach-O attribute - l = l.replace('@GOTPCREL', '') + line = line.replace("@GOTPCREL", "") add_line = True for reg in discard_regexes: - if reg.match(l) is not None: + if reg.match(line) is not None: add_line = False break for reg in keep_regexes: - if reg.match(l) is not None: + if reg.match(line) is not None: add_line = True break if add_line: - if fn_label_def.match(l) and len(new_contents) != 0: - new_contents += '\n' - l = process_identifiers(l) - new_contents += l - new_contents += '\n' + if fn_label_def.match(line) and len(new_contents) != 0: + new_contents += "\n" + line = process_identifiers(line) + new_contents += line + new_contents += "\n" return new_contents + def main(): - parser = ArgumentParser( - description='generate a stripped assembly file') + parser = ArgumentParser(description="generate a stripped assembly file") parser.add_argument( - 'input', metavar='input', type=str, nargs=1, - help='An input assembly file') + "input", + metavar="input", + type=str, + nargs=1, + help="An input assembly file", + ) parser.add_argument( - 'out', metavar='output', type=str, nargs=1, - help='The output file') + "out", metavar="output", type=str, nargs=1, help="The output file" + ) args, unknown_args = parser.parse_known_args() input = args.input[0] output = args.out[0] if not os.path.isfile(input): - print(("ERROR: input file '%s' does not exist") % input) + print("ERROR: input file '%s' does not exist" % input) sys.exit(1) - contents = None - with open(input, 'r') as f: + + with open(input, "r") as f: contents = f.read() new_contents = process_asm(contents) - with open(output, 'w') as f: + with open(output, "w") as f: f.write(new_contents) -if __name__ == '__main__': +if __name__ == "__main__": main() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 diff --git a/yass/third_party/libc++/CMakeLists.txt b/yass/third_party/libc++/CMakeLists.txt index 4d2dbf3bd7..06fe7a18b5 100644 --- a/yass/third_party/libc++/CMakeLists.txt +++ b/yass/third_party/libc++/CMakeLists.txt @@ -124,22 +124,13 @@ else() ) # Make sure we don't link against the system libstdc++ or libc++. - if (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" AND COMPILER_CLANG) - # Some bugs about -notstdlib++ on FreeBSD port... - # still linking to libgcc - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-builtin-abs") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-builtin-abs") - - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -nodefaultlibs") - set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -nodefaultlibs") - - set(libcxx_PUBLIC_LIBRARIES - ${libcxx_PUBLIC_LIBRARIES} - pthread c gcc_s m rt - ) - elseif (COMPILER_CLANG AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 6.0 AND NOT (MINGW AND MINGW_MSVCRT100)) + if (COMPILER_CLANG AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 6.0 AND NOT (MINGW AND MINGW_MSVCRT100)) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -nostdlib++") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -nostdlib++") + if (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-L${CMAKE_CURRENT_SOURCE_DIR}/freebsd") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-L${CMAKE_CURRENT_SOURCE_DIR}/freebsd") + endif() else() # Gcc has a built-in abs() definition with default visibility. # If it was not disabled, it would conflict with libc++'s abs() @@ -155,19 +146,20 @@ else() # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83931); -nodefaultlibs # removes all of the default libraries, so add back the ones that we need. if (UNIX) - set(libcxx_PUBLIC_LIBRARIES - ${libcxx_PUBLIC_LIBRARIES} - c m - ) if (APPLE) set(libcxx_PUBLIC_LIBRARIES ${libcxx_PUBLIC_LIBRARIES} - gcc_eh gcc System + c m gcc_eh gcc System + ) + elseif(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") + set(libcxx_PUBLIC_LIBRARIES + ${libcxx_PUBLIC_LIBRARIES} + pthread c gcc_s m rt ) else() set(libcxx_PUBLIC_LIBRARIES ${libcxx_PUBLIC_LIBRARIES} - gcc_s rt + c m gcc_s rt ) endif() elseif (MINGW AND MINGW_MSVCRT100) diff --git a/yass/third_party/libc++/freebsd/libgcc.a b/yass/third_party/libc++/freebsd/libgcc.a new file mode 100644 index 0000000000..9a4cc9cb99 --- /dev/null +++ b/yass/third_party/libc++/freebsd/libgcc.a @@ -0,0 +1 @@ +INPUT(-lgcc_s) diff --git a/yass/third_party/mimalloc/.gitattributes b/yass/third_party/mimalloc/.gitattributes new file mode 100644 index 0000000000..0332e03158 --- /dev/null +++ b/yass/third_party/mimalloc/.gitattributes @@ -0,0 +1,12 @@ +# default behavior is to always use unix style line endings +* text eol=lf +*.png binary +*.pdn binary +*.jpg binary +*.sln binary +*.suo binary +*.vcproj binary +*.patch binary +*.dll binary +*.lib binary +*.exe binary diff --git a/yass/third_party/mimalloc/.gitignore b/yass/third_party/mimalloc/.gitignore new file mode 100644 index 0000000000..df1d58eb2e --- /dev/null +++ b/yass/third_party/mimalloc/.gitignore @@ -0,0 +1,11 @@ +ide/vs20??/*.db +ide/vs20??/*.opendb +ide/vs20??/*.user +ide/vs20??/*.vcxproj.filters +ide/vs20??/.vs +ide/vs20??/VTune* +out/ +docs/ +*.zip +*.tar +*.gz diff --git a/yass/third_party/mimalloc/CMakeLists.txt b/yass/third_party/mimalloc/CMakeLists.txt new file mode 100644 index 0000000000..f41e40616a --- /dev/null +++ b/yass/third_party/mimalloc/CMakeLists.txt @@ -0,0 +1,609 @@ +cmake_minimum_required(VERSION 3.18) +project(libmimalloc C CXX) + +set(CMAKE_C_STANDARD 11) +set(CMAKE_CXX_STANDARD 17) + +option(MI_SECURE "Use full security mitigations (like guard pages, allocation randomization, double-free mitigation, and free-list corruption detection)" OFF) +option(MI_DEBUG_FULL "Use full internal heap invariant checking in DEBUG mode (expensive)" OFF) +option(MI_PADDING "Enable padding to detect heap block overflow (always on in DEBUG or SECURE mode, or with Valgrind/ASAN)" OFF) +option(MI_OVERRIDE "Override the standard malloc interface (e.g. define entry points for malloc() etc)" ON) +option(MI_XMALLOC "Enable abort() call on memory allocation failure by default" OFF) +option(MI_SHOW_ERRORS "Show error and warning messages by default (only enabled by default in DEBUG mode)" OFF) +option(MI_TRACK_VALGRIND "Compile with Valgrind support (adds a small overhead)" OFF) +option(MI_TRACK_ASAN "Compile with address sanitizer support (adds a small overhead)" OFF) +option(MI_TRACK_ETW "Compile with Windows event tracing (ETW) support (adds a small overhead)" OFF) +option(MI_USE_CXX "Use the C++ compiler to compile the library (instead of the C compiler)" OFF) +option(MI_SEE_ASM "Generate assembly files" OFF) +option(MI_OSX_INTERPOSE "Use interpose to override standard malloc on macOS" ON) +option(MI_OSX_ZONE "Use malloc zone to override standard malloc on macOS" ON) +option(MI_WIN_REDIRECT "Use redirection module ('mimalloc-redirect') on Windows if compiling mimalloc as a DLL" ON) +option(MI_LOCAL_DYNAMIC_TLS "Use slightly slower, dlopen-compatible TLS mechanism (Unix)" OFF) +option(MI_LIBC_MUSL "Set this when linking with musl libc" OFF) +option(MI_BUILD_SHARED "Build shared library" ON) +option(MI_BUILD_STATIC "Build static library" ON) +option(MI_BUILD_OBJECT "Build object library" ON) +option(MI_BUILD_TESTS "Build test executables" ON) +option(MI_DEBUG_TSAN "Build with thread sanitizer (needs clang)" OFF) +option(MI_DEBUG_UBSAN "Build with undefined-behavior sanitizer (needs clang++)" OFF) +option(MI_SKIP_COLLECT_ON_EXIT "Skip collecting memory on program exit" OFF) +option(MI_NO_PADDING "Force no use of padding even in DEBUG mode etc." OFF) +option(MI_ENABLE_INSTALL "Enable Install" OFF) +option(MI_INSTALL_TOPLEVEL "Install directly into $CMAKE_INSTALL_PREFIX instead of PREFIX/lib/mimalloc-version" OFF) +option(MI_NO_THP "Disable transparent huge pages support on Linux/Android for the mimalloc process only" OFF) + +# deprecated options +option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode (deprecated, use MI_DEBUG_FULL instead)" OFF) +option(MI_USE_LIBATOMIC "Explicitly link with -latomic (on older systems) (deprecated and detected automatically)" OFF) + +include(CheckLinkerFlag) # requires cmake 3.18 +include(CheckIncludeFiles) +include(GNUInstallDirs) +include("cmake/mimalloc-config-version.cmake") + +set(mi_sources + src/alloc.c + src/alloc-aligned.c + src/alloc-posix.c + src/arena.c + src/bitmap.c + src/heap.c + src/init.c + src/libc.c + src/options.c + src/os.c + src/page.c + src/random.c + src/segment.c + src/segment-map.c + src/stats.c + src/prim/prim.c) + +set(mi_cflags "") +set(mi_cflags_static "") # extra flags for a static library build +set(mi_cflags_dynamic "") # extra flags for a shared-object library build +set(mi_defines "") +set(mi_libraries "") + +# ----------------------------------------------------------------------------- +# Convenience: set default build type depending on the build directory +# ----------------------------------------------------------------------------- + +message(STATUS "") +if (NOT CMAKE_BUILD_TYPE) + if ("${CMAKE_BINARY_DIR}" MATCHES ".*(D|d)ebug$" OR MI_DEBUG_FULL) + message(STATUS "No build type selected, default to: Debug") + set(CMAKE_BUILD_TYPE "Debug") + else() + message(STATUS "No build type selected, default to: Release") + set(CMAKE_BUILD_TYPE "Release") + endif() +endif() + +if("${CMAKE_BINARY_DIR}" MATCHES ".*(S|s)ecure$") + message(STATUS "Default to secure build") + set(MI_SECURE "ON") +endif() + + +# ----------------------------------------------------------------------------- +# Process options +# ----------------------------------------------------------------------------- + +# put -Wall early so other warnings can be disabled selectively +if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang") + list(APPEND mi_cflags -Wall -Wextra -Wpedantic) +endif() +if(CMAKE_C_COMPILER_ID MATCHES "GNU") + list(APPEND mi_cflags -Wall -Wextra) +endif() +if(CMAKE_C_COMPILER_ID MATCHES "Intel") + list(APPEND mi_cflags -Wall) +endif() + +if(CMAKE_C_COMPILER_ID MATCHES "MSVC|Intel") + set(MI_USE_CXX "ON") +endif() + +if(MI_OVERRIDE) + message(STATUS "Override standard malloc (MI_OVERRIDE=ON)") + if(APPLE) + if(MI_OSX_ZONE) + # use zone's on macOS + message(STATUS " Use malloc zone to override malloc (MI_OSX_ZONE=ON)") + list(APPEND mi_sources src/prim/osx/alloc-override-zone.c) + list(APPEND mi_defines MI_OSX_ZONE=1) + if (NOT MI_OSX_INTERPOSE) + message(STATUS " WARNING: zone overriding usually also needs interpose (use -DMI_OSX_INTERPOSE=ON)") + endif() + endif() + if(MI_OSX_INTERPOSE) + # use interpose on macOS + message(STATUS " Use interpose to override malloc (MI_OSX_INTERPOSE=ON)") + list(APPEND mi_defines MI_OSX_INTERPOSE=1) + if (NOT MI_OSX_ZONE) + message(STATUS " WARNING: interpose usually also needs zone overriding (use -DMI_OSX_INTERPOSE=ON)") + endif() + endif() + if(MI_USE_CXX AND MI_OSX_INTERPOSE) + message(STATUS " WARNING: if dynamically overriding malloc/free, it is more reliable to build mimalloc as C code (use -DMI_USE_CXX=OFF)") + endif() + endif() +endif() + +if(WIN32) + if (MI_WIN_REDIRECT) + if (MSVC_C_ARCHITECTURE_ID MATCHES "ARM") + message(STATUS "Cannot use redirection on Windows ARM (MI_WIN_REDIRECT=OFF)") + set(MI_WIN_REDIRECT OFF) + endif() + endif() + if (NOT MI_WIN_REDIRECT) + # use a negative define for backward compatibility + list(APPEND mi_defines MI_WIN_NOREDIRECT=1) + endif() +endif() + +if(MI_SECURE) + message(STATUS "Set full secure build (MI_SECURE=ON)") + list(APPEND mi_defines MI_SECURE=4) +endif() + +if(MI_TRACK_VALGRIND) + CHECK_INCLUDE_FILES("valgrind/valgrind.h;valgrind/memcheck.h" MI_HAS_VALGRINDH) + if (NOT MI_HAS_VALGRINDH) + set(MI_TRACK_VALGRIND OFF) + message(WARNING "Cannot find the 'valgrind/valgrind.h' and 'valgrind/memcheck.h' -- install valgrind first") + message(STATUS "Compile **without** Valgrind support (MI_TRACK_VALGRIND=OFF)") + else() + message(STATUS "Compile with Valgrind support (MI_TRACK_VALGRIND=ON)") + list(APPEND mi_defines MI_TRACK_VALGRIND=1) + endif() +endif() + +if(MI_TRACK_ASAN) + if (APPLE AND MI_OVERRIDE) + set(MI_TRACK_ASAN OFF) + message(WARNING "Cannot enable address sanitizer support on macOS if MI_OVERRIDE is ON (MI_TRACK_ASAN=OFF)") + endif() + if (MI_TRACK_VALGRIND) + set(MI_TRACK_ASAN OFF) + message(WARNING "Cannot enable address sanitizer support with also Valgrind support enabled (MI_TRACK_ASAN=OFF)") + endif() + if(MI_TRACK_ASAN) + CHECK_INCLUDE_FILES("sanitizer/asan_interface.h" MI_HAS_ASANH) + if (NOT MI_HAS_ASANH) + set(MI_TRACK_ASAN OFF) + message(WARNING "Cannot find the 'sanitizer/asan_interface.h' -- install address sanitizer support first") + message(STATUS "Compile **without** address sanitizer support (MI_TRACK_ASAN=OFF)") + else() + message(STATUS "Compile with address sanitizer support (MI_TRACK_ASAN=ON)") + list(APPEND mi_defines MI_TRACK_ASAN=1) + list(APPEND mi_cflags -fsanitize=address) + list(APPEND mi_libraries -fsanitize=address) + endif() + endif() +endif() + +if(MI_TRACK_ETW) + if(NOT WIN32) + set(MI_TRACK_ETW OFF) + message(WARNING "Can only enable ETW support on Windows (MI_TRACK_ETW=OFF)") + endif() + if (MI_TRACK_VALGRIND OR MI_TRACK_ASAN) + set(MI_TRACK_ETW OFF) + message(WARNING "Cannot enable ETW support with also Valgrind or ASAN support enabled (MI_TRACK_ETW=OFF)") + endif() + if(MI_TRACK_ETW) + message(STATUS "Compile with Windows event tracing support (MI_TRACK_ETW=ON)") + list(APPEND mi_defines MI_TRACK_ETW=1) + endif() +endif() + +if(MI_SEE_ASM) + message(STATUS "Generate assembly listings (MI_SEE_ASM=ON)") + list(APPEND mi_cflags -save-temps) + if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang") + message(STATUS "No GNU Line marker") + list(APPEND mi_cflags -Wno-gnu-line-marker) + endif() +endif() + +if(MI_CHECK_FULL) + message(STATUS "The MI_CHECK_FULL option is deprecated, use MI_DEBUG_FULL instead") + set(MI_DEBUG_FULL "ON") +endif() + +if (MI_SKIP_COLLECT_ON_EXIT) + message(STATUS "Skip collecting memory on program exit (MI_SKIP_COLLECT_ON_EXIT=ON)") + list(APPEND mi_defines MI_SKIP_COLLECT_ON_EXIT=1) +endif() + +if(MI_DEBUG_FULL) + message(STATUS "Set debug level to full internal invariant checking (MI_DEBUG_FULL=ON)") + list(APPEND mi_defines MI_DEBUG=3) # full invariant checking +endif() + +if(MI_NO_PADDING) + message(STATUS "Suppress any padding of heap blocks (MI_NO_PADDING=ON)") + list(APPEND mi_defines MI_PADDING=0) +else() + if(MI_PADDING) + message(STATUS "Enable explicit padding of heap blocks (MI_PADDING=ON)") + list(APPEND mi_defines MI_PADDING=1) + endif() +endif() + +if(MI_XMALLOC) + message(STATUS "Enable abort() calls on memory allocation failure (MI_XMALLOC=ON)") + list(APPEND mi_defines MI_XMALLOC=1) +endif() + +if(MI_SHOW_ERRORS) + message(STATUS "Enable printing of error and warning messages by default (MI_SHOW_ERRORS=ON)") + list(APPEND mi_defines MI_SHOW_ERRORS=1) +endif() + +if(MI_DEBUG_TSAN) + if(CMAKE_C_COMPILER_ID MATCHES "Clang") + message(STATUS "Build with thread sanitizer (MI_DEBUG_TSAN=ON)") + list(APPEND mi_defines MI_TSAN=1) + list(APPEND mi_cflags -fsanitize=thread -g -O1) + list(APPEND mi_libraries -fsanitize=thread) + else() + message(WARNING "Can only use thread sanitizer with clang (MI_DEBUG_TSAN=ON but ignored)") + endif() +endif() + +if(MI_DEBUG_UBSAN) + if(CMAKE_BUILD_TYPE MATCHES "Debug") + if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + message(STATUS "Build with undefined-behavior sanitizer (MI_DEBUG_UBSAN=ON)") + list(APPEND mi_cflags -fsanitize=undefined -g -fno-sanitize-recover=undefined) + list(APPEND mi_libraries -fsanitize=undefined) + if (NOT MI_USE_CXX) + message(STATUS "(switch to use C++ due to MI_DEBUG_UBSAN)") + set(MI_USE_CXX "ON") + endif() + else() + message(WARNING "Can only use undefined-behavior sanitizer with clang++ (MI_DEBUG_UBSAN=ON but ignored)") + endif() + else() + message(WARNING "Can only use undefined-behavior sanitizer with a debug build (CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE})") + endif() +endif() + +if(MI_USE_CXX) + message(STATUS "Use the C++ compiler to compile (MI_USE_CXX=ON)") + set_source_files_properties(${mi_sources} PROPERTIES LANGUAGE CXX ) + set_source_files_properties(src/static.c test/test-api.c test/test-api-fill test/test-stress PROPERTIES LANGUAGE CXX ) + if(CMAKE_CXX_COMPILER_ID MATCHES "AppleClang|Clang") + list(APPEND mi_cflags -Wno-deprecated) + endif() + if(CMAKE_CXX_COMPILER_ID MATCHES "Intel" AND NOT CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM") + list(APPEND mi_cflags -Kc++) + endif() +endif() + +if(CMAKE_SYSTEM_NAME MATCHES "Linux|Android") + if(MI_NO_THP) + message(STATUS "Disable transparent huge pages support (MI_NO_THP=ON)") + list(APPEND mi_defines MI_NO_THP=1) + endif() +endif() + +if(MI_LIBC_MUSL) + message(STATUS "Assume using musl libc (MI_LIBC_MUSL=ON)") + list(APPEND mi_defines MI_LIBC_MUSL=1) +endif() + +# On Haiku use `-DCMAKE_INSTALL_PREFIX` instead, issue #788 +# if(CMAKE_SYSTEM_NAME MATCHES "Haiku") +# SET(CMAKE_INSTALL_LIBDIR ~/config/non-packaged/lib) +# SET(CMAKE_INSTALL_INCLUDEDIR ~/config/non-packaged/headers) +# endif() + +# Compiler flags +if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU") + list(APPEND mi_cflags -Wno-unknown-pragmas -fvisibility=hidden) + if(NOT MI_USE_CXX) + list(APPEND mi_cflags -Wstrict-prototypes) + endif() + if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang") + list(APPEND mi_cflags -Wno-static-in-inline) + endif() +endif() + +if(CMAKE_C_COMPILER_ID MATCHES "Intel") + list(APPEND mi_cflags -fvisibility=hidden) +endif() + +if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU|Intel" AND NOT CMAKE_SYSTEM_NAME MATCHES "Haiku") + if(MI_LOCAL_DYNAMIC_TLS) + list(APPEND mi_cflags -ftls-model=local-dynamic) + else() + if(MI_LIBC_MUSL) + # with musl we use local-dynamic for the static build, see issue #644 + list(APPEND mi_cflags_static -ftls-model=local-dynamic) + list(APPEND mi_cflags_dynamic -ftls-model=initial-exec) + message(STATUS "Use local dynamic TLS for the static build (since MI_LIBC_MUSL=ON)") + else() + list(APPEND mi_cflags -ftls-model=initial-exec) + endif() + endif() + if(MI_OVERRIDE) + list(APPEND mi_cflags -fno-builtin-malloc) + endif() +endif() + +if (MSVC AND MSVC_VERSION GREATER_EQUAL 1914) + list(APPEND mi_cflags /Zc:__cplusplus) +endif() + +if(MINGW) + add_definitions(-D_WIN32_WINNT=0x600) +endif() + +# extra needed libraries + +# we prefer -l test over `find_library` as sometimes core libraries +# like `libatomic` are not on the system path (see issue #898) +function(find_link_library libname outlibname) + check_linker_flag(C "-l${libname}" mi_has_lib${libname}) + if (mi_has_lib${libname}) + message(VERBOSE "link library: -l${libname}") + set(${outlibname} ${libname} PARENT_SCOPE) + else() + find_library(MI_LIBPATH libname) + if (MI_LIBPATH) + message(VERBOSE "link library ${libname} at ${MI_LIBPATH}") + set(${outlibname} ${MI_LIBPATH} PARENT_SCOPE) + else() + message(VERBOSE "link library not found: ${libname}") + set(${outlibname} "" PARENT_SCOPE) + endif() + endif() +endfunction() + +if(WIN32) + list(APPEND mi_libraries psapi shell32 user32 advapi32 bcrypt) +else() + find_link_library("pthread" MI_LIB_PTHREAD) + if(MI_LIB_PTHREAD) + list(APPEND mi_libraries "${MI_LIB_PTHREAD}") + endif() + find_link_library("rt" MI_LIB_RT) + if(MI_LIB_RT) + list(APPEND mi_libraries "${MI_LIB_RT}") + endif() + find_link_library("atomic" MI_LIB_ATOMIC) + if(MI_LIB_ATOMIC) + list(APPEND mi_libraries "${MI_LIB_ATOMIC}") + endif() +endif() + +# ----------------------------------------------------------------------------- +# Install and output names +# ----------------------------------------------------------------------------- + +# dynamic/shared library and symlinks always go to /usr/local/lib equivalent +set(mi_install_libdir "${CMAKE_INSTALL_LIBDIR}") +set(mi_install_bindir "${CMAKE_INSTALL_BINDIR}") + +# static libraries and object files, includes, and cmake config files +# are either installed at top level, or use versioned directories for side-by-side installation (default) +if (MI_INSTALL_TOPLEVEL) + set(mi_install_objdir "${CMAKE_INSTALL_LIBDIR}") + set(mi_install_incdir "${CMAKE_INSTALL_INCLUDEDIR}") + set(mi_install_cmakedir "${CMAKE_INSTALL_LIBDIR}/cmake/mimalloc") +else() + set(mi_install_objdir "${CMAKE_INSTALL_LIBDIR}/mimalloc-${mi_version}") # for static library and object files + set(mi_install_incdir "${CMAKE_INSTALL_INCLUDEDIR}/mimalloc-${mi_version}") # for includes + set(mi_install_cmakedir "${CMAKE_INSTALL_LIBDIR}/cmake/mimalloc-${mi_version}") # for cmake package info +endif() + +set(mi_basename "mimalloc") +if(MI_SECURE) + set(mi_basename "${mi_basename}-secure") +endif() +if(MI_TRACK_VALGRIND) + set(mi_basename "${mi_basename}-valgrind") +endif() +if(MI_TRACK_ASAN) + set(mi_basename "${mi_basename}-asan") +endif() +string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LC) +if(NOT(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel|none)$")) + set(mi_basename "${mi_basename}-${CMAKE_BUILD_TYPE_LC}") #append build type (e.g. -debug) if not a release version +endif() + +if(MI_BUILD_SHARED) + list(APPEND mi_build_targets "shared") +endif() +if(MI_BUILD_STATIC) + list(APPEND mi_build_targets "static") +endif() +if(MI_BUILD_OBJECT) + list(APPEND mi_build_targets "object") +endif() +if(MI_BUILD_TESTS) + list(APPEND mi_build_targets "tests") +endif() + +message(STATUS "") +message(STATUS "Library base name: ${mi_basename}") +message(STATUS "Version : ${mi_version}") +message(STATUS "Build type : ${CMAKE_BUILD_TYPE_LC}") +if(MI_USE_CXX) + message(STATUS "C++ Compiler : ${CMAKE_CXX_COMPILER}") +else() + message(STATUS "C Compiler : ${CMAKE_C_COMPILER}") +endif() +message(STATUS "Compiler flags : ${mi_cflags}") +message(STATUS "Compiler defines : ${mi_defines}") +message(STATUS "Link libraries : ${mi_libraries}") +message(STATUS "Build targets : ${mi_build_targets}") +message(STATUS "") + +# ----------------------------------------------------------------------------- +# Main targets +# ----------------------------------------------------------------------------- + +# shared library +if(MI_BUILD_SHARED) + add_library(mimalloc SHARED ${mi_sources}) + set_target_properties(mimalloc PROPERTIES VERSION ${mi_version} SOVERSION ${mi_version_major} OUTPUT_NAME ${mi_basename} ) + target_compile_definitions(mimalloc PRIVATE ${mi_defines} MI_SHARED_LIB MI_SHARED_LIB_EXPORT) + target_compile_options(mimalloc PRIVATE ${mi_cflags} ${mi_cflags_dynamic}) + target_link_libraries(mimalloc PRIVATE ${mi_libraries}) + target_include_directories(mimalloc PUBLIC + $ + $ + ) + if(WIN32 AND MI_WIN_REDIRECT) + # On windows, link and copy the mimalloc redirection dll too. + if(CMAKE_SIZEOF_VOID_P EQUAL 4) + set(MIMALLOC_REDIRECT_SUFFIX "32") + else() + set(MIMALLOC_REDIRECT_SUFFIX "") + endif() + + target_link_libraries(mimalloc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/bin/mimalloc-redirect${MIMALLOC_REDIRECT_SUFFIX}.lib) + add_custom_command(TARGET mimalloc POST_BUILD + COMMAND "${CMAKE_COMMAND}" -E copy "${CMAKE_CURRENT_SOURCE_DIR}/bin/mimalloc-redirect${MIMALLOC_REDIRECT_SUFFIX}.dll" $ + COMMENT "Copy mimalloc-redirect${MIMALLOC_REDIRECT_SUFFIX}.dll to output directory") + if (MI_ENABLE_INSTALL) + install(FILES "$/mimalloc-redirect${MIMALLOC_REDIRECT_SUFFIX}.dll" DESTINATION ${mi_install_bindir}) + endif() + endif() + + if (MI_ENABLE_INSTALL) + install(TARGETS mimalloc EXPORT mimalloc ARCHIVE DESTINATION ${mi_install_libdir} RUNTIME DESTINATION ${mi_install_bindir} LIBRARY DESTINATION ${mi_install_libdir}) + install(EXPORT mimalloc DESTINATION ${mi_install_cmakedir}) + endif() +endif() + +# static library +if (MI_BUILD_STATIC) + add_library(mimalloc-static STATIC ${mi_sources}) + set_property(TARGET mimalloc-static PROPERTY POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(mimalloc-static PRIVATE ${mi_defines} MI_STATIC_LIB) + target_compile_options(mimalloc-static PRIVATE ${mi_cflags} ${mi_cflags_static}) + target_link_libraries(mimalloc-static PRIVATE ${mi_libraries}) + target_include_directories(mimalloc-static PUBLIC + $ + $ + ) + if(WIN32) + # When building both static and shared libraries on Windows, a static library should use a + # different output name to avoid the conflict with the import library of a shared one. + string(REPLACE "mimalloc" "mimalloc-static" mi_output_name ${mi_basename}) + set_target_properties(mimalloc-static PROPERTIES OUTPUT_NAME ${mi_output_name}) + else() + set_target_properties(mimalloc-static PROPERTIES OUTPUT_NAME ${mi_basename}) + endif() + + if (MI_ENABLE_INSTALL) + install(TARGETS mimalloc-static EXPORT mimalloc DESTINATION ${mi_install_objdir} LIBRARY) + install(EXPORT mimalloc DESTINATION ${mi_install_cmakedir}) + endif() +endif() + +# install include files +if (MI_ENABLE_INSTALL) + install(FILES include/mimalloc.h DESTINATION ${mi_install_incdir}) + install(FILES include/mimalloc-override.h DESTINATION ${mi_install_incdir}) + install(FILES include/mimalloc-new-delete.h DESTINATION ${mi_install_incdir}) + install(FILES cmake/mimalloc-config.cmake DESTINATION ${mi_install_cmakedir}) + install(FILES cmake/mimalloc-config-version.cmake DESTINATION ${mi_install_cmakedir}) +endif() + + +# single object file for more predictable static overriding +if (MI_BUILD_OBJECT) + add_library(mimalloc-obj OBJECT src/static.c) + set_property(TARGET mimalloc-obj PROPERTY POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(mimalloc-obj PRIVATE ${mi_defines}) + target_compile_options(mimalloc-obj PRIVATE ${mi_cflags} ${mi_cflags_static}) + target_include_directories(mimalloc-obj PUBLIC + $ + $ + ) + + # Copy the generated object file (`static.o`) to the output directory (as `mimalloc.o`) + if(NOT WIN32) + set(mimalloc-obj-static "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/mimalloc-obj.dir/src/static.c${CMAKE_C_OUTPUT_EXTENSION}") + set(mimalloc-obj-out "${CMAKE_CURRENT_BINARY_DIR}/${mi_basename}${CMAKE_C_OUTPUT_EXTENSION}") + add_custom_command(OUTPUT ${mimalloc-obj-out} DEPENDS mimalloc-obj COMMAND "${CMAKE_COMMAND}" -E copy "${mimalloc-obj-static}" "${mimalloc-obj-out}") + add_custom_target(mimalloc-obj-target ALL DEPENDS ${mimalloc-obj-out}) + endif() + + # the following seems to lead to cmake warnings/errors on some systems, disable for now :-( + # install(TARGETS mimalloc-obj EXPORT mimalloc DESTINATION ${mi_install_objdir}) + + # the FILES expression can also be: $ + # but that fails cmake versions less than 3.10 so we leave it as is for now + if (MI_ENABLE_INSTALL) + install(FILES ${mimalloc-obj-static} + DESTINATION ${mi_install_objdir} + RENAME ${mi_basename}${CMAKE_C_OUTPUT_EXTENSION} ) + endif() +endif() + +# pkg-config file support +set(pc_libraries "") +foreach(item IN LISTS mi_libraries) + if(item MATCHES " *[-].*") + set(pc_libraries "${pc_libraries} ${item}") + else() + set(pc_libraries "${pc_libraries} -l${item}") + endif() +endforeach() + +include("cmake/JoinPaths.cmake") +join_paths(includedir_for_pc_file "\${prefix}" "${CMAKE_INSTALL_INCLUDEDIR}") +join_paths(libdir_for_pc_file "\${prefix}" "${CMAKE_INSTALL_LIBDIR}") + +configure_file(mimalloc.pc.in mimalloc.pc @ONLY) +if (MI_ENABLE_INSTALL) + install(FILES "${CMAKE_CURRENT_BINARY_DIR}/mimalloc.pc" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig/") +endif() + + + +# ----------------------------------------------------------------------------- +# API surface testing +# ----------------------------------------------------------------------------- + +if (MI_BUILD_TESTS) + enable_testing() + + foreach(TEST_NAME api api-fill stress) + add_executable(mimalloc-test-${TEST_NAME} test/test-${TEST_NAME}.c) + target_compile_definitions(mimalloc-test-${TEST_NAME} PRIVATE ${mi_defines}) + target_compile_options(mimalloc-test-${TEST_NAME} PRIVATE ${mi_cflags}) + target_include_directories(mimalloc-test-${TEST_NAME} PRIVATE include) + target_link_libraries(mimalloc-test-${TEST_NAME} PRIVATE mimalloc ${mi_libraries}) + + add_test(NAME test-${TEST_NAME} COMMAND mimalloc-test-${TEST_NAME}) + endforeach() +endif() + +# ----------------------------------------------------------------------------- +# Set override properties +# ----------------------------------------------------------------------------- +if (MI_OVERRIDE) + if (MI_BUILD_SHARED) + target_compile_definitions(mimalloc PRIVATE MI_MALLOC_OVERRIDE) + endif() + if(NOT WIN32) + # It is only possible to override malloc on Windows when building as a DLL. + if (MI_BUILD_STATIC) + target_compile_definitions(mimalloc-static PRIVATE MI_MALLOC_OVERRIDE) + endif() + if (MI_BUILD_OBJECT) + target_compile_definitions(mimalloc-obj PRIVATE MI_MALLOC_OVERRIDE) + endif() + endif() +endif() diff --git a/yass/third_party/mimalloc/LICENSE b/yass/third_party/mimalloc/LICENSE new file mode 100644 index 0000000000..670b668a0c --- /dev/null +++ b/yass/third_party/mimalloc/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018-2021 Microsoft Corporation, Daan Leijen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/yass/third_party/mimalloc/SECURITY.md b/yass/third_party/mimalloc/SECURITY.md new file mode 100644 index 0000000000..0ad51aa062 --- /dev/null +++ b/yass/third_party/mimalloc/SECURITY.md @@ -0,0 +1,45 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +<<<<<<< HEAD +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. +======= +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. +>>>>>>> dev-slice + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). + + diff --git a/yass/third_party/mimalloc/azure-pipelines.yml b/yass/third_party/mimalloc/azure-pipelines.yml new file mode 100644 index 0000000000..0247c76fd5 --- /dev/null +++ b/yass/third_party/mimalloc/azure-pipelines.yml @@ -0,0 +1,197 @@ +# Starter pipeline +# Start with a minimal pipeline that you can customize to build and deploy your code. +# Add steps that build, run tests, deploy, and more: +# https://aka.ms/yaml + +trigger: + branches: + include: + - master + - dev + - dev-slice + tags: + include: + - v* + +jobs: +- job: + displayName: Windows + pool: + vmImage: + windows-2022 + strategy: + matrix: + Debug: + BuildType: debug + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON + MSBuildConfiguration: Debug + Release: + BuildType: release + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release + MSBuildConfiguration: Release + Secure: + BuildType: secure + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON + MSBuildConfiguration: Release + steps: + - task: CMake@1 + inputs: + workingDirectory: $(BuildType) + cmakeArgs: .. $(cmakeExtraArgs) + - task: MSBuild@1 + inputs: + solution: $(BuildType)/libmimalloc.sln + configuration: '$(MSBuildConfiguration)' + msbuildArguments: -m + - script: ctest --verbose --timeout 120 -C $(MSBuildConfiguration) + workingDirectory: $(BuildType) + displayName: CTest + #- script: $(BuildType)\$(BuildType)\mimalloc-test-stress + # displayName: TestStress + #- upload: $(Build.SourcesDirectory)/$(BuildType) + # artifact: mimalloc-windows-$(BuildType) + +- job: + displayName: Linux + pool: + vmImage: + ubuntu-22.04 + strategy: + matrix: + Debug: + CC: gcc + CXX: g++ + BuildType: debug + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON + Release: + CC: gcc + CXX: g++ + BuildType: release + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release + Secure: + CC: gcc + CXX: g++ + BuildType: secure + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON + Debug++: + CC: gcc + CXX: g++ + BuildType: debug-cxx + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON + Debug Clang: + CC: clang + CXX: clang++ + BuildType: debug-clang + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON + Release Clang: + CC: clang + CXX: clang++ + BuildType: release-clang + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release + Secure Clang: + CC: clang + CXX: clang++ + BuildType: secure-clang + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON + Debug++ Clang: + CC: clang + CXX: clang++ + BuildType: debug-clang-cxx + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON + Debug ASAN Clang: + CC: clang + CXX: clang++ + BuildType: debug-asan-clang + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_TRACK_ASAN=ON + Debug UBSAN Clang: + CC: clang + CXX: clang++ + BuildType: debug-ubsan-clang + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_DEBUG_UBSAN=ON + Debug TSAN Clang++: + CC: clang + CXX: clang++ + BuildType: debug-tsan-clang-cxx + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_USE_CXX=ON -DMI_DEBUG_TSAN=ON + + steps: + - task: CMake@1 + inputs: + workingDirectory: $(BuildType) + cmakeArgs: .. $(cmakeExtraArgs) + - script: make -j$(nproc) -C $(BuildType) + displayName: Make + - script: ctest --verbose --timeout 180 + workingDirectory: $(BuildType) + displayName: CTest +# - upload: $(Build.SourcesDirectory)/$(BuildType) +# artifact: mimalloc-ubuntu-$(BuildType) + +- job: + displayName: macOS + pool: + vmImage: + macOS-latest + strategy: + matrix: + Debug: + BuildType: debug + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON + Release: + BuildType: release + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release + Secure: + BuildType: secure + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON + steps: + - task: CMake@1 + inputs: + workingDirectory: $(BuildType) + cmakeArgs: .. $(cmakeExtraArgs) + - script: make -j$(sysctl -n hw.ncpu) -C $(BuildType) + displayName: Make + # - script: MIMALLOC_VERBOSE=1 ./mimalloc-test-api + # workingDirectory: $(BuildType) + # displayName: TestAPI + # - script: MIMALLOC_VERBOSE=1 ./mimalloc-test-stress + # workingDirectory: $(BuildType) + # displayName: TestStress + - script: ctest --verbose --timeout 120 + workingDirectory: $(BuildType) + displayName: CTest + +# - upload: $(Build.SourcesDirectory)/$(BuildType) +# artifact: mimalloc-macos-$(BuildType) + +# - job: +# displayName: Windows-2017 +# pool: +# vmImage: +# vs2017-win2016 +# strategy: +# matrix: +# Debug: +# BuildType: debug +# cmakeExtraArgs: -A x64 -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON +# MSBuildConfiguration: Debug +# Release: +# BuildType: release +# cmakeExtraArgs: -A x64 -DCMAKE_BUILD_TYPE=Release +# MSBuildConfiguration: Release +# Secure: +# BuildType: secure +# cmakeExtraArgs: -A x64 -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON +# MSBuildConfiguration: Release +# steps: +# - task: CMake@1 +# inputs: +# workingDirectory: $(BuildType) +# cmakeArgs: .. $(cmakeExtraArgs) +# - task: MSBuild@1 +# inputs: +# solution: $(BuildType)/libmimalloc.sln +# configuration: '$(MSBuildConfiguration)' +# - script: | +# cd $(BuildType) +# ctest --verbose --timeout 120 +# displayName: CTest diff --git a/yass/third_party/mimalloc/bin/mimalloc-redirect.dll b/yass/third_party/mimalloc/bin/mimalloc-redirect.dll new file mode 100644 index 0000000000000000000000000000000000000000..a3a3591ff9b07edada16ad83ca1391963a126b2d GIT binary patch literal 68096 zcmeHw3v^u7b@mw{;qgOAU<@LJ8v-dvxd}p719{0|j4fpi0OBiE?2;<|74IrkEpMOvOHH_lR z-uv`r>~E+4{oDap)!)ypZVbn?NVIi(G}x@w2U}WN2tHXt0Qqja~krU`->^Gq9OSzbrn1@_!4awdZKAe9>sto7njW2|mTv>_M|GPYp`GTISlAv7XbK7N?T5<$k+Tmxl$ zP}q(@6<9ug#!^cng0Px^L|d5~@y{(pNcu@$SkQHnF`_M8xBt#T7mY=!2^>3MC^`tS z<>M!L(NI$>2zQe_V9MoPI)UyM3)>%M@)i1wTsoJrkvFE}>#H~5YZ?rHWZt#-diFYe z4ZaIskJ9DO=HY7uT{!Y0>|mh16#`+R(D zCEms8j*(81w;6pg^4pv6^%Rw^q$*2D=&#UyBX1G!5PD?fnFaW|frRdQ558W39dVt1 zPrvlNRUWs#x7t%$wYSn!R)tGV?>JMgcUO9J9a&mc7rykaYL8#UyoV%(-9VQ<)^-mmp_QCsouYx7R1aNJ>G4v2rN_n7vOwY?n(Qk% zRp==>RpoK5em-7oUMze*U6}r>6)Ci>?A+T%E#&R%gt;jA%k-BEJ!AptnKmGO`i+I2 zSeXJ_(=Qp+yPhXyyq)g!tLIjCrW%1_(MUTPP3}5Hud0fkV|!BZTlHOQ|NB808rWhV zfK{b`MbPaWDZgK(=eXv+SQ&-ewRk^Aul zZRe4;ulmB<@MR*E@`1sVo+Yn7NLN&W$M^z- zW(h!1AbrTDMjNl1>g*~$xC8Ysq^>0`d{lQtzY(d}4Zyw~r5yWfNPeS1%Nru`No)0` zzfC^Gi%w|sIDRlG1$ZFliizl8TVRp}Z>W9LeWDrm6n6aE2axJ6 zdi`yJp?DNmWW~AKVX;c|lDmWtqBVp}sYvqPMXl{Klmyz=S~_RRbQ3dsGbxCY%Ko2;W4m z)hj8ksoZLBIvo=eGs z#57)zo~Ibl6pg1PNPeO1N$!u&qL>{Yp#mI49ZA$hA64@-4H$oXk*F0~1%+0kcBl-0 zXKWC8KvP>iDEiT_v=TySVuaxIXG}TQm~!|e%HcjEei`*lA{Tu=U87wZR$QOos%XXFJ~w;~xJQy=M< zIRTcKwjwohfG)Ys@MLCx35vfMm{qP^6Wlqm*)*0ILJ5-(F0=H~pfKljfaXUcdxJH{$@ zUM*AiJX|bNDGj32@SqTm6s^f75h(0!;`+SgRMF*cGOTbC1!-TN#ri-Li}i(QE*9%A z{t{yd&DVJI^AFF7_LtNw zqP$&e|CH7#?U<^HMqZ{IAPB#{kF%;JA^B52v6M@=%LyJIT30mkDehWu)iE|dUP2~@ z^$#5XNPH^ND#7$prDwTD+6G8lTvdeewTx1Jm;|qCAy!->r6q2nNy7&fmz}rr|A57d zl(RUoiYnl1_45MYlNlL4E@Qcb>O4T)kdd)$=N(d`8y~I|DZ{7lC*u#Ccl+MrgP$Yz zpov&h2C<6smiTB!Vl1&z@7nF5(;U4cgfbs6qRoDV{n zO;R`^q!J~24v{n^VG^lz5~N6C9u|CE4#C4V(O4wrrT&(FbSbAL_K zgQ#fN83Q8#rWZ%V;GoA4yGr)FinE$+6$6DXz56qsA?n!f;>)S75B$_zLR%IU#LQR9 zun~|6d*QnZq)8o0=yOh@p8T_p#z_i=XMv87O=%@EE;`{1QtS| zjshA$i%1S1K#PzdyIv#~Kt8(U1wK_3Iq12hD^?XHd;Sl~?fOPY{JM<3D@`FQm5`NI zeXqBuR!zL&bVjvuQ#C&4LA7$B+8R?TKguVK)?}$Rw{W6rMI`{{b?kk_{$jgW_yBFD18=z#khKB^ym8 z8%Z`gyHO|!bCTnuNmbQ^sxXz!Sq>o>{i{jV_ej>uyFKkhjt|XhNuKDg_9XT0eV%q) z8Q&r@Ir~;&no3q-0;LkY>oa0{?k@gy7=*itu)C;)ssR0dBIzzFm6yHZ@)~*BCobp7 z%YJdWLS7Dt%Qf;cMVHWY5HWrDL3=%X5MS_ei7_JUp7TT-V^0yH#t5J{}j(XcTj#^*eRYs~&}ebZPC$KyWI^5!L!7Ba*_fD{1%y&xy_intf(;}GEif!`BjugMC znH_nOae8{)r)fL|ypYxedyBLPG2J|(yvw;|s#E3laiqtkleypM}XCieA zE#k12$EtoEPA+gRiJdr|OZL2mmdcT)!6VNpYN```KGf9nnJlbKlm*US-xpZgMujYqLp5@b2j-bKU@vX zz~^fOHRq?dO=Uhl_2Gz|oCtWS-jf^xmco!v!R z`r4AOpeftNd5>vCg#nfFk}xkXB0=RPUs-?Y>gONgx7fTD16U)|^zcTAlo&L|{+(Ou z2Wcci$x<$bcPMUTU@*#SD4(ShKQYtO(?)iv1FiFfD9az%FZ7pFbmC`%zO52XvffMD zb*2yNOPcfs5Chwiuk7}ukc7?ADd3HO%3M6sEJ$Q8!K7&}rzbziR3pao%jq0P*t;78 zSK7Or_E9G{d8`s5!sI^-dKy%GVNMA9SG;Q8xq(FVBaYgLLZjhR)xFQt1F=+9>D@R+K-EN3L*o{W2v8&HjMK}oo|ACl#B;wD9)a1e7YSVxeS7a*_-lSc-tN>o z&NP~iWxQl%SUhZ8BQF@l#2~&zP})Au5AP z)zKbVy_8M^M&Ao}M)&t1nY$Q1JcY&Q7@h$l4q)KHKmqoEn?&E2!pOmaO;@EWVjDVM zqW5-`vp5_trINE){G&YC<%cF&<4mJ`O(*IbM3QTGqg&i>(Zu~05YWjHDAV8Jf^dB$ zU3=4?;-}4x7q>Ijd|pnb??wADzHlFs!#_Ufj&93r^AMSJkyJ9@4Jx(GZzKywUq&c~ z@%oLDrE0`_%^B)X%o)ljE!|JVUc*?TranvzUNQB3ZeLDi%4vQ=E$4=l#lP=5!`J^% zvX3`gZYH*)YBH)`%A}4Forjd4Ql^`iH&KX08kn>rjAdfQF#@3&P8LL7DLeKEH9*KMOa=m(zIorf_*G&CjI0UH-=T^z`2p zmpx~>O*=uKMru9$A-ZyW&PVoIM_SRyG;++{#bftkeeC@wp|MU3+~V}{IXBoCUIV7o z{Q>UDC{;r#ZuefY+VYa!elAndH3q=~;R@ zPZ9y~a+`=y3SR`O`D{*&STZHe>ES`WTi#N^VNJW_ufYr{_A^5?Sd+#Jn6^k;qnDm{qdPUw5la~Jl)dAOZp7$x9!*6D5rt?pjYBcc2aV6- zK~eCnMAKHXowT*`p(*mv8VNd^p@$16f1F79Gt0x0s0wHDg|4+}2uUngi+R4P;sqV4 z0(w{wk5gLbo{>W!ANeA_6bE`$iNYx`ODs|aLR43wt0Pr}CKS?LqC!qACeQSibd935 z2!X$5iJMh{MW3|zCT?M&CmUVf ztGvQ@WgzF7G?wB;5R<+%{Wm1#tcpk*p15_;`Ksp%2&7SmXAY$=!l(FTA_glPC` z)%HvtjbA=B)JV5NV>Q+!hc%sXTrw!11k}j?aazr2!?6QD_K0H#Pvc!aR3R=pQq%N~ z;W6@boT-kRgK;Mooq|a|s55OOB9=O58#I>jYfq~2#1{mZ?8-!oI+EB%NFX|raP$S~ z>t#vGPwM!px+*Zo4T;Py-Dhn3E*wBd-h;Tj`15H7uwN-BR$i-L??}37dZAmtR0n00 zv$mprNC68f`&u!v&N(91QU2bP>{ZfV~l-W;&L(} zpbq$rAED*x>nr%$pPme*yuXH7abn&gWMMr)D~NoDqtGF%ep;_;*w6D*K%nbK@@c-g zfGodp{5yOhKSxdE1Gqb7`~sQq2AISo)_~Scr-+yXAkD(w3`*`X&IhtA{Tj(cBC-o3 z%0#{a@enb~ZU6Pr}K0wDMf!D{!AAY!mCt)D%CEcIGpu#Vt$jGRP8-C62V)#&x z@3NA8U*HAY42;WYJo+;xhg#@hXyI?h$Ccj}LU4Fw=5VL)N6vgj!~MPgWjeRX%skxT zLm=H`+Iyu-(PAY8rNEt5IXK)9Iin*-A%WC%?@WsJb{KA{qG%7Twz!I&cs@Z15Gqcn z$*ts?ggB2CHJO^@p)lk?^EcsjMi!!as+YO}8;OsobQz0(mUB^@8w>d-!!E00Z$?p#eaJz#?oD{ z0N7Eq5hVe@v-0U8-eA6T)!oKPJm!ULejZq~6POCiERx+mEgPLr0%WD4=ysqp0-lu$ z1X!m4L6PCK#b$8AGQs%E^v&`_ID`Mvyft(fMH4D98x0R88vSnpG`j9v+>c(=T}~e7 z`#YGOsEY|2B$8%0s_m8 z|MyMa5tB?7G+$t?&lgOvwaluB7Wuf%Msv_fk^)!j9mRAUok25-xy;vy2_s$;H|6l+g;```nYr{Maeh!! z+x>+P&7;Mv9|v{;;qf|ly8M{bXf*TfpFjPNoUo+(2fQloC*fpE_MC|D7VO>T=7b!>QWaUW8eh>c-}sem_*Q`_qaI(-oGvSBl!Y z{BXcHd)K?B(@H6;wS*0sWp*xMQ^T>5ZG`~pAK6xDLY0bEi++$w%Jy{yFiwge+klpD ze+e!BA}PA-MY7)uw#WP5bZhpZ*>MKo$~z?b-)teY>x{;4h~DA;4eMII-sMBnF9w`? z?AcGeeu5J))1dAyex?f0Eg}KkbH0W&N%R>@kwK3RA@gZ7^UW8{TkG7x2v-B7X&mC| zb#hK#y5t33>TL2s^o$;Va885m;b&Un3A){XhGOG4nDN9O>_Jk<2Zin-g+7m}h>}=H ztnEQP{&dbF)8RG>rF~duP^!_`&)HKrKf^9dh@DMhKZpD;xs1;u!tKBwL|AGU@I`dZ z|BSg<_wVHcZigY3 zzCflo(hdni6%1LrZ}u!URO#C&%atf4ZZHY2?-QGAy4K_Xrcn%KHQpMf zp&%QM24yyo7A+S+%l}E+r#^?P_mtv96HwC6?_iS65ApO7@W_~@v`(I00TD`{d*1063LqAhewpP zx9Ho*f~q)W*-C2>9}48sCUcK3jC`@Qp$U=JHE|!UX)Kw1L4y_s==|e{@Lr;cWaQl> z@x;h(M0})DHNv;@6_i)5pu8l}YpbRp{e7CIy{v7utvIW~`^^f)kmmLxe~INyKdbs6 zGrlKXNah>RXUWgG{>-^1wx1NDg*ZoBCkF}Li0D`|$l~}(2Ol+*(RJk@tmOb%%ZHv5 z0|MiRm&_V8-a}a!Kfd&11Q&$S@a!V$`%#2F!Ui%w%(pZn&ZZ?Pzw9pFj(wvs4K!r3 z3a4u+(I{@&6o*L4`$3U5Z@QoTEdDG|@#2Z=0X~XgLF9KHdR8<^qXua#E`E15f*+S- z5kE)2m70LlJL53uFtaW|=4P3x8-8NO^SeA_(0BpI5(JNy-ih2bCT}>yyTarxl)Sf4 z4THv?nY@1RmYKYZGrZTDyn|om^%n4qLE}RvFF|*UXi4JzkTc4q(|pzg7BNh=`eq)&up6s&l;gVM zIp|XJWEo4`FVbBBH+7!h*#8XhLJHGBMVW;T=ZMr=qU$bxk&yUXPRSuimh$&fh&0v` zcoaYoW0oPIw_1d1BRAtj>cYULpz-qK3**^Y!a%-^nBR%9?~b$ZluVtHsXYf%F8Uft zAIL#Jas0?iu9r;L=3uf|L9Y6YnW9m{i0zHCw_lQenc|Kd2gx@2Zb`o;-rl_UeqAy>lY_~EZ@%*tVLpE;LU^(ehs)wzpg~VsifHB>$<-&)3cl=grb&Ji4E=dcvPpV5a;k@0IqZF^7~cfd5oQ1w^luHJG#WJQX0 zShoHVnW8HGooSyave%C#J5@*L5u$#6rtdReL?dtuRovdqz-5svk{Nki&KHHF%P?f> z#{v~Ahz!1VN+HkI9%8cNJ3Tk2;IE{+`&@{1(}Rksm0zP{{cJD6?Tc@c@@o3jM6L`B z=fHntGX5M|UW0X{P0LJunf7JtYX_+zPo?LeZ#WMtS0abLpPP*ST=iX<$KKCD-)r)) z=F|6=la0?D_9txTN%}#4w(U>_`Pyf(^y!n~!`qjmQhR^?Ja_ftd6%60SijnZyq-r0 zM@oF1t3E%@BPu_C4t@SDkC3zL11C+p5?W5v>1_?Ql&@NK>Mg(V4Iz2r2}%yAMF0LM zcQ4uKC-~P~?X?p9{+HX%kGL!K8{fkVB`gz$-}p2#(l?}WIIwowdYlNw%eeKK4u5>k z8#rq_>y*Or&UP}GC~=H;X}>Fddm7tQ>fXD}+U0WlC-|K^2t@t=mk#wN*1tp6pK~2& zF}bW{W_(+f;zvXQ9NWJ{dk&}+Fuo2AI3Ojvzl2QKBc72d#rCxs$r^ePg{X=_CH5m? zLwUQPbAt{~+7QKuC54yR_@i~Pc|5rg2NR&EAP#^*nAuZxY(Kgqwp#J`{9-(Tk6U*X?R@$YB&_p|supXwZ1 z2LT5G2LT5G2LT5G2LT5G2LT5G2LT5G2LT5G2LT6xTnOL@X|j*{Z=3=UZAYlwg%B1T zstk95*_8;#QFdIG2@2!b^D(m9Qk11%*8#I0G3-?wwZueE`Ev@`9M@XrnpMDNxi&D@RQp|ixr*XQ}x@gRlbsA=TrIi*iLt~i(O6aQ}R?yI7}r!pPou?u6V1&o9d(fu*Rex z%AJpcz{NrU&v($P3D{Bm>V$4z(VRsgiR*m`y$IKr;{SmVu0i-Pf)^o${8tfvj4*~U zb1#@6p#c9-SjZ;Kol6>;nvh$|XVhT4HCo%y+7imn3Acpfl%wX`+JnR%g> zJXy8zP_&tJa6S$Kf0zjPlA>J`M%OAW9u3xqR%v<5L(yn!bdvH1g3*?6%XUSP8@~J@za0)nNYs*?sc0st*=}s%a^UL-gMWx)gN8A zdG$v8+>e>`>+Z-Uj`>#KR#CNi-Re6wuHJYX9!fXqA@-Jyo0Zs1eqhu3jhoh2ufJzq zZPmt#%G!;aD%WjdEN{Jmit1bS+VvX(8#h&No`mkUjhk+@wAL=~w$N5hTc$1dEnmS~ zx-}7QYFMSs$9rA*=YeobL+b-EtquE&aBB-n9!xBefe2K8awloIUfc?fXc&LOO5QU= zw>TE6PefJjg#5(~p{s#r)1f+)Pbr4^1_^EM9vErc zg5f5xJ`j#KLVPqH#>}xp3q~SMVPx|PLosdfVlB9}6{V;f7->zd!3Ly-IA)b+swf2W z`eN~5OG7Z)poxv1hX2D=ETnbYvL(yv-^VG%R}T7G{A+Hlg=;RUl!%c&(a`opQ!pxp zFWnXmg_h(HYLWlDnpFQs+Y$%H(ntttr7um~=%4U|!8p1O_So8xXbQppwbp1uD7tC_ zT}ZGdS{9qElr^6Znnhq-q;19CdX?r&&WF#l)~jUYieab(JS;61YzyJfjCZuQG>|=_ z50c^LM6>pZL~}%og&%}yj6dxr`erP_*A&FJJrvg-h=$`KErQiVI7Y)&+twP@f(;GP zP%Ng!uyhG&-e6ptAK|7^+f3qwHiDB67|4&HA>l>Tga?yIJ{V?`mA3?~dT?tc0KKSY zwKlne7X72G7zA5`_4nl;Gjkck3ag$k#A*p;%e4g6ELYdOje^+P-r7(yf)OO8K#sUJ z<3xQlPBkb)$ut94wl<-Q=FN}IogbS=gN85csQXMWk>#DKcOubH8_?C{B9i(>_#rvo zh24?k6{xS$>hTZO=ri~a)eoZcd8fcXm|D!!s7s+tgep>wkuJUS!^#IJ`fzguKF3rZ z|CWs!xk(UnhDZLXwqk=CWMA`RQe8Dp0Kzie2>#65GQ70lHms0Q`wUI27DG6C6#6O`#2?MWLCVgENy~Y1c z%ojpT{?={VVxf4gm~OfLB54u?@oIiceUd&J;v-({4Ta-U?cfYq^=9)}T9HexaDx&a zlCR{NQmhoY>yfV1vi+0|tZmpzC-kV7@Rcp4Jos193Q54rZF!teiq4W_$rGKfN@YFV z5JaBx&x(TABYigI*{)5jO#d%iAoa7<%$p_nl_e3alpy8^v-z_9F08_37OX7nXlaDvEO++pGL+Noo46 zLOs@U)gMZStnp{-pGjy;Jj_v%Wxk?+ZTuEk&sMHr&-A}pPqxWr^=lrQ33%k7Qx&%V zoVC-L*M1oj?|FUh7X0VM;}EkxiTA)1agW^P;64>I8kBgSo+9qO4(?Ndof#7Eu_@v{ zpKT#&*^A!?t{QLPr#_2VQw{MEL`yJe;gu7qjT{A`8dmY@TgnOgJyM2nd zvyV9InW+HZBk}H^BJM+uy-x*pHYo8Po+9o($KI!e`EfVoNSwzM-oMP=pE~=di-UWq#95PZxr|da0)OqE?0&d7TKw@z4M?2pWSo@o zsKh?XpqgnY7_PHE9mlxgyXb z@$Qpx%7*{&$Fu#-xyp6u&Vs;S3S3KNTqa}1>P`s6}K8f>y4ey?Nob{u^=wif!67SPC+}Xz*+%E>+O6I7-`wQ7PryP4% z5M7kGRN}4KaQ8d*eo^pNLVXf%zl;Mi?w7dNZOI-7iqDDP83K(G=cJ5#WE_#WYYy)3 z4B$N6eG=yZ84t>MRN~(2;GPG$ca}XYaUPSgDeYdzznEBDghc5V1-@Px>oWFB-0Pfu z(?vj?u||n=yNo+!tSWQPffW)LHr_9B?w9ePjGgny3yZT|+OWiVOvYt@CHzkNy)t(E zj2*HIa$PQQu92}4IwWyd*G_z45lBkB_se)d#-$$@_TKB*`-KHuN*k6qv%i+>8X4C~ z+*1zj7X^1m=>HA^^!`=z^|E-Qx6;Cr&9jJ3zNVK`nU|7*%H^9~Rk^sdl&km05&@Sl zUIA^&=db3KI-uNqFRy&Pv63s_NG}9ay$Sx8Lbd#5#FCxlTe8J_ds{*e(0@BLMq3}K zwa{6z1zk9X;@ZYgFal_{Fof zt(SOzzF^Aelfg^jt6t;H4N4T}fA6!jF36i;XX@SCN*;+a2Tw+>>K>j$_?tB9-OZNw zMx%IwoyvD@^J)}W3L4PcvqU|%+`M$Mx5d9iSl-tz^;d(~Ql5o3+QL;)e-MFsR2P`- zRU}zB@xiU6PV(MuIoK5?$Iq5W71{IE`=2vd3kliyE2)xS^%QT6kRY))2|2%b2RP5O zU8`!@rg+n4ybN83(=c(c=Gp)61ZB73_5OBxGcO1Irced_cWE%5;S%kDtO;dye$#5? zs6xL zFJC)R7JF_0xBHjNXbYB(9k#0x)@eSw~qIVk2L-(kiCr1i!}bfj=hc0hcvsJu|){|NaKIL z*mi^gr14*CtOp^5H2#N*y?`)?H2$N9y@@b{H2x=v%|keiH2y<|eFR|`Y5cDV+lOEv zjsMzU-$fWj8vp;mX4ODfr175w>`nx>2lR+PiQq=M7x4f>Dbjea75gDV8Pa&y5_|VY z!G|=Sk7PjvFVg*p{{z8?G~Nfq$`Hzt#=EfCEeL+3@qQ+@6G2BBzfEJO5CTZ!cRTD1 zLJiXR?G#&tP=_>rSHiX+G$M^>?%4r^2-5i71N%Ng5@|dW&Sq@^&Pd~Ve0C>7C(`&$ z1lxzugEXEOXJ19wk2Ib!WN#w$BF*kaT?l*gkjC$)*{cYrkshvPY*ig%qyvbbMldKJ@!LV@i?s7`5O5H&@Yj3m5eW(M!6*or9{0{x{d0zTw6JQU)r;^5O>Yj zw}j&rEwS+Ccr@Iyo&8e*@-o-et#`GA>suQ_vY5S9gz~Js>edW7d^)RYh~6HGS2i{2 z!Ip-m5PQjWM?4U1jfd*vcZZ|#M6hW?sJS(|gRzxVKWOiW-yMp^@Ql$NiRP_HG1f{_ z*0m&>Ls8u7uWAi8gc>%qHYA!tF*cVpxHA!o?pW8-7LK;IG>2N^cL$^4;8xUjKXKlM zr)<`>#G^ZIYmHVkG(e+Ou_Xu>xdZ={q5RdLh;%D2sg8 z#U9EG(QJ0-%0ytz&B^8_txb%%wF{Ol@h#9oEg0o+0BaZARejsy@&#Hfj^P_@YHbOv zU9ck*TX6GD#WU6fW3f>4)}|dA1hmA~E=WXMR>kTYL(Re1;^uIDv^CbcExx$EwRu%A z*1V)`*#fOO*b?3rim4GUYJ(_ETN95aV)6AY+ghcxd1sNfVjkxMXDn2oh=$`kTA(d8<*Zqnzm_#ivvgXs)NCBs*DO`I;lk_%C*dIAAixpm!QKpsz{#4E zJtvXqWenSRHrPAV>ptK=s2^-UIPhrd(XmH;hszH~4);CQ|CsTZ*5~c3>)U^%_sHoZ zrN_#S1&(zd?>RnroSkr=@SjMYY(F_bviczx(P-~jukS$P!N|eBgTs#+k7|c%4%ZzX zdhGOLdS9TgrmwLtb!70!(2?OI<;VQT^kX&0`i~DBPaPjR?mgi zSmUwCvE;G#W2cV~A0Iu=o^U@AI8k$=@kH`O`-#DmLnnt%j-DJl$%Yui(F5}z&<_k8 zOdT9Nc>194Q28PMp}?WO!~KT`4i6sI9``=(d%XN{|Kt1ndizR`mL1iO`i^!U>p8an zSl=;{T=oP>?K#0EV)H6=cK0?Oh#Y7?FnrKBIC^mGAbYgtP~D-%Ly<$tLqms8A09qz z93DNaKOT6z?(tOLVBb*R>AvB<@}vHv`q7%B{l^B5rH&0A8#?BF!uLe^6aFXkCwfox poftU5MscP6#nvPUZ@0LD~l-<`hcc2X@i2uZQ6pNuaKte+cY`J@w z59lxO4|Guwp(_zw7;xcI)Qt$DRHdG|cjo3MchV--4ov3Ex!=s0bIx~W(sMUg$*g0HKoYv~4 zP1;ixyQ!MKm0KaA6VkGX2OqUsxZmEoH-5~Nn% zFwR{^8k&wXcc-4Y0Hvcsg?-yIjY|_O-N# z3WaqM1_dx&SQ8qQ1O0zk}L>r^x;uB-k3o+p%V-(x>9GTKi*1PElEuX7S;};)E zG*slP*GIoTl7W-rcS1#u@lY8R%_`E~6eyx1M}?kM{s%GPBg36zoZE?UA04F)x^^0E zi>!2`0<5gH-A7NJe(pCs_cp%J{3UMsr}9ttuFS4CHT#X(bf@17z9LWZ8OIWiEGoBPB#Z*>e2+;h!H-vh0;w K7XRjC=Klf_eUb|~$szu3h-r?m0UK6Y($Z*CuIKN7aP?+-9qY&+fpXa5GqjfKChjq$0wEb>{`#hrwYP2b~*Sd>`dXp zLVc=Gp3Vl*I2yn{`G?iPItXj6DZ*T8UY{ZqlaPnM8dHQY_z>TM;ccT`gYMc<+80UA z?GoC*_SbkLTr}5V{0-o`$W!f*fVG#})}zpbttfG?a+t#+-c5MmXJ}|dDMfh*CGJ%& z2wWW1yghyo-rRTNZ5CA9`xMo^ zHVJQkBFa1Oz}x)0@HXzR@K%W~4DNrXH+!(JvQ^Xt#AwhGtqj_=HkYAM(90SK$A4D0 zd7^wdjlOBliBZy7@VAfsXteo(-gCPbr06?#?tC78?JuDVLHfdr5RS0rbs^)_SEyl6 zyH8_k52d%!#b^{k&)=sDidu$H7qCQvc74De4vKoeoD#r$b6Ak31ANdP*_+yuf^YuQ zDFJrvKa&!$M?ycw@R4vc`dWXpR`A{JZ`KLE3FpKJO*AwfUk30BijmNxtZ953O(JCR zH|vG1B8cwQwpa!rGsZ_76m?W_HE7qd3ZPanW}L7LWSp>HHNPh5GERv2Zn--DbVw8W zfvJ5YU?T{b^rfgGG$ANP{p}C}>CSA?dnW-)nmxK#)U~9B-bqNA#mm~0x@Od@UW)weuNYGZl^|9_^+(968BV;9qVLf(m{#N?kz zh$I~eSsI6T{nl9-r$iWuGzp^8SIRXIJZx~YQC`Qt^^%T)Q;BoH-cQ4~E(b=_8-Moc zTI>TDJ>ZW_cuJRX$}-?TZ46lYLK8Di*$4b*@&ooh=%CLkPvNB0VF6JaI)VQBJ+Bg5 zyWS>$0489|COTj24~XHQC2Y22d1uARCJTKtZSs$J-=G*~DhY?WR708&v_zOm!gRy0 zUoNyqyd?_B$ljKxAW1SBHkbMKNKoug6H!lMY}re#L3@81Y+JnA!dxeG?1qJ#NZ{A0 zgFXi5=1C3BV@UT}v@Hw~he_tm@Ulk5)f0O$=t004r?imjB6vo|sZ^+SR?vdFDexGy zm|yp0#yb&{pDuKs^+{o4CeA;dnEVB1Kkzi2RsQh9fdOcXYYCxDX9H)e{1&U$A>zQb zqs)Wh+ANs)o1?=Vo|^ZHk&z_~ew8pEuv^OGVvDHblY!0Y%$?F*bai5iO`gfyGfuU` zsC4GjzI4C|-G!M#2Fz!*%qkFkfa2vFj0E8SP)wi?#ssu;cL&bdkuk zn0_cUTIEL&PMLlpV2%Kd_+^hW-cd3HaK^PwKFqpNyeGj(_agpUF$dnLXN{yxNHdBE zdh$AQBYm-WN6XDjMemp>q8T^J+hQO>GDwcBc|JQnpNAO9=_4)}z2Bf{K+DNQwr2DO z*tK3@tKL)Kf5S@$0vG}pEgeQ$I;`?*JkUV1w2V~BFR9m^=09M0LF9wI(ejc-0S|$E zGU~3v=@41;Ldk3fr`(8UprcXgsxo?%u?@VdGDvZ%3SjQKW_zv|6x9z-A!I zyx*mv%}hQM5p8Df`3ti_75{ZMGng}*uYs>mn2}X}nOiYaZhCJY9OIlA27Q6;aQ3{K zvHJq*;$moe@5`uwC#BiL*r135SVNfOhq-=y>AlaO11ZiQnJwK8tqxSm(nkzv?~~UX z`#3suh*u2f#C}52A%5iU5U=9%FAB-|(getL!2hAaH)pSXpv8U_-j*hY6$=bA4+Iu* zfV`@AGQ&ds7y4#{3I*Vik#{3ta^yzpeZ>e*O|yjePQfA^TMCK&o>xKpUs-Rg!XITf z;LPt9lOda*`RxEYz1FUIOgbw;(yj6NE5M#XF(&B*J!54%VI8BM^h$L1`!G8(Y- z1noUG`M3YU#eG|hNh37D#I~Kd5oVw7Z)r!@jul29wCIDC(SW_1&!W*c-Gx)9UVfI- z@i+GizT2P@97;$#MwJw35~)TBYmqz=jkL{_>HLdz-wM@V!Rlha3fq+t^tvnMON?M< z$j1k|2@DxZweKNd@SoNMfCRHy8V!hj9az@<&3!`iWPDo~wDkS?7qs(10I*+0AQ(q` z2-+<66O$`8#FMCX^F;K6y(xTX=QCiF|MX`rk2zNc2I3d_PZ2pr2`XdQMDb(T`KOqj zf1)G+cK+ciy}^(Hdb|2sU`6M|LwKh}ASGkZI2;#U8eF&T*?x6k!0Kuw9bZ`7n7JkT(!hC2rHdM-E%g>hcaMA;lh&9Y0O9gS1 zO_Wj6Vts%~_7ef3#9|4vV$}AblGXNc`hD21l9dA5Q$wJ&5y21o<7P!iX>Q|us5}U5 zh68B;!g~gt))=L7X5u)j^$X zkiC;mDOk@T_q@Tg@$RWiNA1v&LMiJBP+HF@DZ;s+g1W##U0|TjtMt!v`sZ8h(IEnv zON7?MLprMtIm?Dv&JGG>RI@23NYOWC2fsiT-q_KIy-F=#S%D0Za>ZLa+^@_E*#Kewj|NrPjkIVbLHXCJ#SvX7QN_OVavp|<_g*vA2)4@0By(G$xAuGfOy2;d_>6D{S11#axp3>9Dc%?SPu;&xH(1sO&^Wf!kebZ^tp(C$rB}mKn@If8vvp_K=G!K1K zeRsHyQd8^sG%B;_0W#ci#;9yZWX9DLbyr3WyP9JKt&v z!?`22&BA73V5-iFM3aa_Q@_ykW$ZR_AThmK6R^lH(W+h(uwQQ7(}ReHP1t4u`5uG| z*(TfXBf8PxN)1SbZM+M@Ml?8O;0^39GETHZRO3<6u|!L$)sJuXOg>rc=q1CCu-)sx zDo)&8>%S=IHw&^nQxK37p*(5R6gYi9U+O;-?Qs2^9~iU=;MvtzfSVz7Kthmtfzo)s z@uv3Pw-7Vbl@1c@(C8s3Ed+JZCL4g!)LsLaFKTiAfMEZl_3NG3d*U3qS!fk6T1g%9 zzzhn>%$!~?}tC`P>#b~Q&yVIIl`tY#YQ zuEF*h3VerYwFkO*{cWaL4ulj-447hDEi&+S+vFw?Cz_CW$MTc`fvr`a$=;ah)8KE; z6eOf~GtW_^g`9Mi^5)->`{D2bhwT%XNNFttdYl%J&h)T$9F>`sviALGXQZ@23sP+T z0c70JfwGt|-Jl%_k8s-3;(&;0_xMj|1uWe!(TU~sJ22aB^h&GHuV*Ruh`&gXmLe77 z-ww4F!op$DX_ZL6B6kz@7YLO1f?1pG{X8=RlSFPBXrrNDv;1c!=!8yOCtGv4 zS)#B#cv34eP_iH0T5)n}9TpRqW2rp*uWJJge*tU_%+FWKb3he~HA|O7pjw>85OmTi zivy$_+25W8<-ABb!M&3p=_$f>g0phJ|49S31(aIC;gMQa8lXWXeo=N!Ioq?=uz6*c zcRlKuW+Q33{Q=9Rp#2hz2f4Zb*~4NxT-A)@A%ie0V800a2wE=I1T&XFKF~DhGA&@~ z%6NgUyND}M>I{mV$j0saZ1k(c9O!VYs98y*lNwrald2V=RMt%=)5gMu+#;+y8bF!} zmN^;FcfCUo3XnTvrxd=9MwmY6{*qO`hsl6UmSto$Iqji*GV03QUGE42Sm8tvLGWN_ z5O!Q+nJv@~JAjSGl<_mPiakRNm$`Ivdhh=Mb=N)#+oh&=6>pa*;7~e1HFCA@9@4v| zhb6nZ*&VU4S&T|gz-`G7qtYhw0VG=Cs?p$5%nN}bLScn!`U903TFpp)q}70yOapmo zBxl3HN@Pg-X*e|~qbQ>n4J_4=NbF$@XT|}+Uq`~D`!w^%Vo23xplR=c?pToHK`g!Z z9y9@=RenlsVNQ-@QW{#YccmK(bVgl(;dD6c%f*+Vz0c-aP6Y2@!)UWhXBAjV1d)$? zBjP`U=tWKz5HG^WAxA}#INEBljRjG5{3N11dJ?wS6CjI^&bM^I&sfa3pvH`!w6>n5 z_21Ho_21s@KQjwq=ibW347JU^>mmPd?g)OP^-KP_r75Eu^IakGyp-A}V%QtA2nG{FaG_gi!AjcDpn-1r;4T2}tv<(zjW+Li%<f-PFCgqfHrjZW)1D$wUu%@JPR^mB0|K|1+1X&b=vVLFtRM9h1{}$Yy39CJjXRoR%RP zVFsbw!4KyV@U+SI^SVKAi*#rv_9Bjh%%HPH0-A}4mz0{CuDBSC4_);kr) zqL6syB>2w!#YpmGJJ?zl{^|Y?;%=q<6Mp+fi8&d29-xIorXUJc%00hg^28?Qu?}os zJH)pKI>dME9pbzA-h+eDZ}Isau79WNfHpeBUQ35~S=%9o9@|VY6}t;$+`qx7P&Nt% zg8P#o{N)(iUdjZz`dQe9>v&=uH*|=dp!hmCz5xzz;(Hf3{uI|g^L2{Jeg+JYVWkk_r`Ii-ScGJyymxoFyWBH6(zQG6!^<8)rnSgV9(eg z{vK@J2k8&+y&r6GlUDo#$o~q}pmeIR|-4?U>+!&auc{z|!$%LRwLemf3%aH`K{zm}

K+)gTjNQ-T0!chd(Skj4 z6Jq9`_Ke+M<$Z!vKna-)S~e6!OIS2f(<0J(Mkz~>@DvjEn^|;@m=e${z|JjCp)8BA z#SWb0&HaH2y@5w7kfm7P&jJ_0j`4E22(IlS!JXFo6d2xW17-FGd19K~o7b_m;hR5a z-lybLeAqbS{MJva8MlponjG!1@hshO!v8fsz}1cm(u};3S70~exa|EO^KV!`hoFTjmdn##01=Q61d*!QAD0% zn}PGwppQJJ4u{-)8VnOcoscx$)gxk`UO;UA*njASCVIX@(=~Juxkc0?AZT0H zrc6_kG_-9axlrh2!9i=b>sZaN2_=rtzB}QXv}=UOD3<5~m0Aq=H#9?7Na`U{!&x={ z-tjm^!A$W)E;1xYl9ux@gLKM2c9Eq}(r-G80~(SeJldu4!CnrtRvo5Z#~4lqL96_w zKQZeHEk<36?=~7xi5+5C*&*t&Yoa43J?MuME7aK!qSy}H%WloW0g;N(1(s@C*+z#p zKos);W&#Iu6U~sxADdUjT$)LeXSMKbwk6C~bUlwcz%?G{&CHE_nXY0IN$ZdT z38B0B0G4XNHBU8=$-<9s$d$N1JeP|oZWgsnyX`a_iTz1b$zZJq1VG zwWU!|DIY?UoP$VQ=FjXZf$g*LlEF;`mya_4!3dzP$do1b6(Jz7g?+DpXBH_Vq7P>7 zvbk!vL0zxBPU?cj)oS^5R)hRXIP?M=KgG41dLHNX$VNWE?E@L?x1dWJjWyJ@vjCl5 z;N;j%PWA*2EWnb9Ae-SjN<>;Yk&u$qyk0tMaH1o8!~83;n!!D=k1XTNm5djqf}*{B zuohP>43o}s^3$Dcw(#azt|nBvTBwDTm&V6hwF}ixId~e*qb{cq|S_rLz#VTL?V~it-rG%yHa$TK_1CBWwPJ-WE zK@ij<9%C);M!Q_X+H@W1w9iSQaCZG1#R#HISk2?XwT9)kaG$`_me?;!O^T1e5BS-J z9nl?w5wlYM_W$FChU@^&#T1tjpfg$E*4SJJY4vSXx=!=Cvt>w+JR}IWE^E5 zjiaOQ-SR0@MwlNK`x>?x)mY`n_!m>VdOYUsE7bOq#O+^H+wV!-K2B}Fg6skI9mDCr z$E$HZO5EO|w*QcSQPHvU1Mq)WZGSfL_^+z%&HPKwu=&-f?T;lMzd~&z;EBJj&qWSY`%s+8^huZ$b#O>cz+n-I)UP%cVT9!N6246G8L%4loN5PV6bo&K+ z*G;Tr!bI@U{$!H$D--N4zre;&F6)dY?p{Cj$KjtI`$b}=o+IGfPX1KG-Mng0qeI;g z;|GleO*e4O_|Y#MtnyX1 zgYk=_r@H}q+UwEFg4@x-of50cMm5alo0)jNkXH|*nb4kwxB>9q>TBA*$ZL7libn>% zSDC@9LidGc_)ChhybIDD>Q;wrb6_e}DvmtTlNOi{9j1Kaj-`ZFhE_0MGXG%-=Y^yo z_C;UkRp`XHl_%mxmBbB;$4K0id=_r8>5)S^%J%t~+_pG|8M~jRYU~@{(Mx#rgxiuF zW{;s$VwBwy%4Z$4()|qZU-9wCFd;mfEt%e|pgnUwozv(5mj)X0pAcP5Mk7`%*uE@Teft{RVA#+ioZgl79oma{qEr6N+TdtfRxa4A!8*T)5S z)u}$lC-Vp9(gU*v+~gmatzqw4_D+xG49wQE_e}Pl#oni}cLUx(2CHP(qy&-@NJ=0n zfusbI5=crQDS@N}k`hQtASr>Q1dhmDQD4RC9a4j*s=A@x zIkd)I@0O@W9aUBB_BuAyvR<(|XZ;9ORg%+FM<|l78*~B)H!l3LyWW(u&gQIjI=s$n zJ3{e|)yy`yUELV6WUr(Iu9bjsyKrq}DKbbN$3|z7Az_=-<7x1W(6-Itsdv|JqSnMO z%PPx~ii*|Cs+O->vt((7SY;EJud<2D)}s40ZL1#=ZMId*MYLs`*V^o&`4MrAdG&+w zeZ>cdlM#&O6-z4Ch~@`Zn^&(`5l0u~mabkyR;^w2s9068ddc#t)wboL zZ4`diC2NLSBzFK}&h z*H<@e^BT4yR=6ALQTwECkx~e^8Z}txkGo#jfDs-y{#=*Vgu#9bz0Qq353g0nDs>C1 zof~|chWC%FjP>{MNTl+tieFBg^^#`?em~=KdYp!B^v3MNYpC{lUiw9o`=5|#Y(0XS35k_2EN!D z@PEc?cS5^0dr|g0!jqt175?qGDz5hog5dKKe~)vMuh!vFh~8i0aXJ^p5hH@1(^tI& z*(r)A2@*`&Qb6YJCmj;h0b6UR_SHIJzlH`+wbN5Hq>r;&@YOGj4Z)=_n;H883|L^; zfEBLDVB9_%aVVbGNaBF8kw9TFUdL7^{#<%TLwz;b6=b{JUFWMaZ1&YP8ocf&K^A^v zD5ZBnsB&&^gk}ZogEq455*Sp~Q8)ZLjqQGjCbI^b$lTeD^UQ|9hD}b%u+8I^oQ6hh zh}>TC&A4%%1{l}|$Hpxq+v5!IQ+Zd>tmCc;i`o{$#XG4OW1oB;3I&W)=cRo5HhLtY z&J+b&gH!N0y$!Vx+05DA8MD1J$vD}zkHi`Cz9G?#NyOva3KushTTF64?%&LhViac@ z91`Rl)#cF%bAya7dfRvVnvBl|K(xhe-|(sbIF3qJd&> zn8-{)oiC~dugh6mtL%#TK$;ut7K@v)-80#=!RvI^c?mK8q&&_<4UQV@d%|a+b{#HoBY}w-BV7 z29F^&qj(+FAo{B!6@-LKz8Y|B{j0MjG=;>sx^1 z;3!!3-$E=$faCZmSdQI7EbYM2ISQ7}TZrWnaP*CW#klm=*-IEWG~XDRpZ*qN(F4b< zQLtEVA(mX=SUC!o{kIUy!@wbpf~D~mV%Y~AM@GSN>=t5a1CGv7uyo!+EIowdnUVdd z{}y7&1POB@2bx`fs6q497AT7>p>z3Kr`a%Ebx{>rh+@ zmhv&g(gX~5x-f$ zk&9BU;2FzUw-y-Iqqr0-V;SqV149eSA(Z`yx$R@<)5n3~EXqZcc3|lrLoEHk5Jk~F zE9e`6ngzaVFgR0g2jbv_6wbx$Ttyb34B`= zEZqv0!?i^7wJ3*4E@OygH!$oc94OtuQa*-QjswGa6cXIdF~mXqyMZHu zlKC8D11z0m80&I@VI_(cg|Lm~o?$&ONGL5R>mir1T&E8MLmNsLN;|N0kD-pjz@Y!O zqMtFgT_bQ*pwy!1A(Q?wl*<8NIF51=rJQgm=Mu8x|H?1^%ou<4rV39vkSy3^WXjVx z)XpEd;;peu^rN=;k+Dmq&EsTGr?7!&Q!e>9dekb8A6+tf%40MP3C1W-&+sum_JDL1 zJshOcjH}{(>Ydx@c`uizVVl|~u8Q|@d(l+oaylA;X%iluQGkHoHBBUsI2qvSs!jFI zYCZt{711cw!{!~|d2s%CDxA^abk#oUV@3)T(!ubiL^vGMAoihHsvMGvUDbn1P#ttF zCO(MftM+ZEsqs3c*i%r9+sMtZ9pxb|^ijVG>VsXVQ@`T5Bi<2p>Q^>D6|4GuJ2MPS z1pQ(*!fZ$BpjrsphuDg86?;xTHUmye6*uDEH3`6y1QVj%la z-x2E+U8*#d;Gr7xHF8m7)Uf`lh|pgR z2Wk%%)P`V*1t=~^HEzW4$J%3}i1*WLS`*XeV`F+y{Iz<+Chi~c*D7LaB)wx2v012f z3c1+$_-oQ_74OK##$PLb7#lb8wF-6c`PF!uF`ta%6JMv=0QW)GiD4P>dYInhQ1dcgBU32PLbgLBhXqJmmIBK1Z#^S=T_ngEkO<^nFlzi2s87 zL0{bld=rExh>BS6t8;pg_Nl;cX`R&;_RCqXpd$p8KBs4gSijZnX{fKmZ!RBlc--u7 zIlMG-1ya=_{nmO#gJ(%~HGcL7n(^78c*!QeCnfMnB#052uh&_(0e^G=0`=aKIX+K)krzKHu5)-7*10!&8oUiP(n9>? zxX9tHTeLNM4t^0|@2+ur)kws-ft0~eEO~rhX;pnqgFC@fC~Ji9RJ5a?#dolOz+{?Ki<__de&hzDc zB`=uwblx+0|CINgyg5bXMK2b8uXuOK-V$%=&r5GFTUYkK%HAylQ3KWmd=wbJV?1FL zvR7sQ_w1i!|0a80PEF31obTkkkaH^Mm7EJXZ|3|m=MOo0(+txb(=yW*(+<<$o1O*l zADGUX-ZZ^sx@?m2cIW*|-uT6Tzu25Vr|6qS-Nk<_)|Ff?nOB-qYA*e0>90%wPzpv^ zpvv*_{p|a4zn-hhv*tgMKal@#1s4js3&g@_3STq7Rh(Z^QL?Y}MCq?e|5CcOELgU` z?0=V?Eqk?0F1u1jgF!+_HO@Ewweb;SR(3&wrQo{-FBd#h`29j%(d|W9MbnFB75%p& zW6|QG;v$+H$?jOe@q*5R;==O6b%o~(FBHm!Yl|K(l8U;EdWr;dt+~;>&m1vF%~{2} zi(8716zfYeON=FlN)DHtESX!npmb&F@zS=^uF~?d^P> z^H%0{=5^(*TfBa;ytr?1|KiAEDSvx@Q~vIJp+HlhEzlL{3-%T4FE~(eu;5TZR^hZl zL*cB#xrIjxj}{&)Y%4rjXe`RbJeL=(EIL`#UUa^wv#6_RrMbe~Wxi;>WbQGqFRm_j z6*m_5755j1i=)NcOPWe{mh3KRDbbW_OZS%^C_PwuxO7^Xp=?&!g0iD!&zBu1yV%b+ z61m0#W4W>2c-DBqSdnebemMJL_9fU~Pqv(0o#V=>&1uY$a{6<^Igy-bj$mps?KHKR zw7I%meQsuMR_=k^gSm%tkK`KiX64PzTaahWdp_@2-toM)ypwqai;EX8Uwn4)`NbC& ecP{Q)Y|USr|8V}g{Pp?ZpL``Hkdy$E!2btt@imG7 literal 0 HcmV?d00001 diff --git a/yass/third_party/mimalloc/bin/mimalloc-redirect32.lib b/yass/third_party/mimalloc/bin/mimalloc-redirect32.lib new file mode 100644 index 0000000000000000000000000000000000000000..87f19b8ec0f7ae1024ff508a738b517c71f11aad GIT binary patch literal 2928 zcmcImO-~b16g|^{QYspvM%`#iBvFDu=~n_VrYIN*1)5S4HfAWDDx)n!hN>hi`~!vs z8-IuXf^O8UJ9Xj0jXPNpHj?1IGxOT%%v9O{FL_`0-S^IW_ntd<2JRP(;`)Q|Sfsg( zj5p84ENTzp6DfPN8U}C?kPm@i7UAatZCv-8=-Twa-; zS$dS4&E^+#N=2&_%4NMcVw65*m7!+k;Ig{}!nM{b!!D=`o$yn)oe0W=;7$!C_Ax72G+NiKVMF_(AoLEg)_71M} z6PdwhbGd{8?CxBi*NTR|r9Z1i?&gLfnc0OoYq`1YqbYCX13-l5Q9rfa=BNcC(nY

Y=Y)Dvsj~l!2>A;LV<1T;~ z)Sfyt+ahbYQ)b0dv3efKNCB+8et-PQyfk0kpINH8)My;ciVG<->w}osJu~av8PksM zZ__zFDC1-B>F-|zU=hLJg8zxlq%a!Om1EOc9I=@Ya@PAPCU$>rmTt$o zLv)F@$l9r;Q*oQB5&q}lz|KgCslVTb%~yw9soPA)t-cT)$B7^Im#^2pGqyzEVg8kI z<)^d5PI})xo|>$U5H|k_Ft#TVo*lX)p9!nK(I-FiM^AR6+ZneNc*h=}@V@+|5v+|9 Oc9%c4mDyWeAp8$DwV4nA literal 0 HcmV?d00001 diff --git a/yass/third_party/mimalloc/bin/readme.md b/yass/third_party/mimalloc/bin/readme.md new file mode 100644 index 0000000000..9b121bda59 --- /dev/null +++ b/yass/third_party/mimalloc/bin/readme.md @@ -0,0 +1,71 @@ +# Windows Override + +Dynamically overriding on mimalloc on Windows +is robust and has the particular advantage to be able to redirect all malloc/free calls that go through +the (dynamic) C runtime allocator, including those from other DLL's or libraries. +As it intercepts all allocation calls on a low level, it can be used reliably +on large programs that include other 3rd party components. +There are four requirements to make the overriding work robustly: + +1. Use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). + +2. Link your program explicitly with `mimalloc-override.dll` library. + To ensure the `mimalloc-override.dll` is loaded at run-time it is easiest to insert some + call to the mimalloc API in the `main` function, like `mi_version()` + (or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project + for an example on how to use this. + +3. The `mimalloc-redirect.dll` (or `mimalloc-redirect32.dll`) must be put + in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency of that DLL). + The redirection DLL ensures that all calls to the C runtime malloc API get redirected to + mimalloc functions (which reside in `mimalloc-override.dll`). + +4. Ensure the `mimalloc-override.dll` comes as early as possible in the import + list of the final executable (so it can intercept all potential allocations). + +For best performance on Windows with C++, it +is also recommended to also override the `new`/`delete` operations (by including +[`mimalloc-new-delete.h`](../include/mimalloc-new-delete.h) +a single(!) source file in your project). + +The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic +overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully redirected. + +## Minject + +We cannot always re-link an executable with `mimalloc-override.dll`, and similarly, we cannot always +ensure the the DLL comes first in the import table of the final executable. +In many cases though we can patch existing executables without any recompilation +if they are linked with the dynamic C runtime (`ucrtbase.dll`) -- just put the `mimalloc-override.dll` +into the import table (and put `mimalloc-redirect.dll` in the same folder) +Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388). + +The `minject` program can also do this from the command line, use `minject --help` for options: + +``` +> minject --help + +minject: + Injects the mimalloc dll into the import table of a 64-bit executable, + and/or ensures that it comes first in het import table. + +usage: + > minject [options] + +options: + -h --help show this help + -v --verbose be verbose + -l --list only list imported modules + -i --inplace update the exe in-place (make sure there is a backup!) + -f --force always overwrite without prompting + --postfix=

use

x9bx{#wOF;}`2-SsF8m`>oKd;RtZjLCwq|vD!E$~=*fz9}wmiBxMJ_y41+%jr zv$Pt4C;H4~3B3)N8A!~oLuZMi7Lg~?rw$*~D#AvsBV`-2a( zG5nlbGlPvMqRvTUcRs04rHvcEK+`s04MkNw*){T5*Hy)Oy)(^U4^(&92lT{O0VdYs zv^wv=O{X2a{pBW`vc>K!lWV$ZQbICg@=Z(Ra$ekZ7R?n@PlR1$o{Uh`05M`haRKf7cMRtl_%8{H?c6Ei{BF?$p z+|Tqz18HhZY6S=hlaqOM-%vqR6q#N}BHnunS5ge@)yD3WgA~-tGD7eXGpp!)Vlj8r zMPj*J(Y0&V0haRxF7<@%nzNVgGdu1vuAu1yX}uu;sV+Er_D!6>@-ndxJbdw0>e+^R zUb1}lIV!+iTwsn?Y%Vs;Vugv8*=j}fP*;-uMv3UrinmD!J`sGPZ+b$26oljq7(+-x zSKVW?`h5D;e?YkS1nKPGkWPPr7|yVga2CWD_~g}BxK&GVesq^USftw!yAFwgK17V& z6-FFN`tA6ly#+5R50gWO~cUC3JVm3y1XKQd0fTi}QKchfUWuR0vJ z6Sl(|lyQwGGZF2>f|+Xa!&h%~S!|(pAKu< z27z=r>Tpz2Cf3jXSYGCV8|Hl)gLuqT>ltjm!f}PiGL8Q`@K9L-A3SFGdw>R9h9n37 zkfZF^2mj!4_Yru97g)G_s9MI%ryR+z!Zeo zM}lNBT~|Vk7^nGU=joWt+`_WB_iO0S zU&No?#{{Jy5)2d$T+Z#ZnpTm-=RI`i5{W5kB7RD2H*00|pro1|Q@V-*p}RVh4z}CX zGa8VA1Dg0DYv*#iZQ#OfwBbW=-|*S)KKl;nsX$7Hc8ykU*jX?bIsX5RO{#TuAbmA=P=th8579do4a-96&`;_(0| z?#65~bk`+^PCpxuc?}rtdSNw;VGg6dVpiXV^n# zM#SnrR%Gts>~rtAK))BC&%I~Lof#P!8L`%H{nl@(cNCc>lg>ig(+6QiC3vhq%Q=)_ zw{q$;C)Z=F$L7+Jxw18+>1HHAQ%78>8%1XvbepCoP-7s3K+{B0&;>Qdpmu^Wq&T$* zI^}j8qSQGshKSMX!{bd>*ZNrFvC`lRhwrab8=*7N_4JB0LX|MiGHw#( z`hJRDk1$x#dPUObsgBe?=a2QHpK-O;|Bx$N=YR_ z>q!~IxM}d-QS>aKju`I--yh|OX__ubo`VLEl6^VcsSaJkg-W$#$}UVe8bO{{r5 zdim9rHIllG$(S!O$74zBw7nO)7_Gi^(J4!6F7~kG${km4hl;@RHXlov!kN%-Zm|Bs@1%0;ZNtWxMpYA>vH=f<@X-fHK9-AghGi6w1ehlWvL?q%eE6*Bp+Iu~l zW?P6SlMA|-UH7`R&pY=cMMNAXO*I>5V@#41(Fma-)(K@2W^X4c^xg>rb+K$ZB1}pO z)fq3isOK}$;cbC4hPnzgsiCw5CUv3~&8TWZVp0nsC46CNlIP^fQ`nXLtayW@A-$C~LOMXK9mFxXF;;3~ zt?sWN`TIzkY!kK)L#(mJkxbLA56PJP7%apI)z*pbskCKnl#62_3UORxivfPHjxG8S zMohLgX~zExl3qJ2km;iO<+f?4Q+ML&YrVJxw{e8ipKs<>>-yWutEOz=Djl?yvdQYh z8A6b(5IX-N%fh+yPc40VrlpzPt_=V?Gc7FG(zg|_mg&ZqkvMhBTjN`2#ZE8N11LVsoP_dmYBva7aKC{+(cSfGv(^KR%VZ%*)&p8IhJR~Xh-`b@-9kAyG|h)V=BA=c0@ zEvDyien{Gm`14lTchTic#tkQq+|SmeVO(3%q$W)^*uOr&CQnt5*<_O<3QP+8{uNRX z7!1G`7?VH(k`v665$YJU+YaVt>AC1jiAk)Mea<=OT-NrjkrIqX+Z;P_f@)Ifyp!cGXqzvg zisB|GuqRdr11>mlE=5@|91n>>*#g$?Pq&5);NI;)6ZBP0PlSsht$bA8 z+gsQ)HA*^PU=OlbvmQ)p4WilS&?;K*5|5g~>{-w>L2s~5J!xo0oB1NZ>ANw#|I_}V zwekcgF-|=N$Or|Fh*oed>VQ}cQz)(7VU3UoNF;AWR%zeu<{fA9kenmr*D%(hE9GMH zJChs@?X8nEL+JSdjYPJ$H`%vufVZA{oY)LQSnE?J==CaG5XJ%eg~c{CF$hgE zh$#p(R4FnEkx?zwjk4$rhRLwuY(m&3rZG0EW}~rfxG$Fq zPY^>*9O-97Na$+T6V9_xB2zo8o?9>6UL<2UZwoT8z1`^%Cd(bgu3D5*L{L(-}1NIaQzo(nmCtF zwBs~PJqZzb^rOF#XFv0qU|~ESQIrJ+sGESXmQyEA@xm9rh+A&C`3xQG{1rqRuDId~ zUhsk!FenEYX9$fh3dRZB!!17Yv5)dsZ~Plh9NSpRj_wqjb3iCbnkv?`$0R5U$J2iN zhxw{&uHiSI|6-0EJ4ymV-Bxit{bTMRFhGpq{BzIc<-h*|-uRYldB=O+w<9_Fq6;r% zeRUo0T@Eu(V-7ooMW@Z8@*fvobP+%H!#~2^ci*i#Va8&dstnwI#~u9Do37@x5=#KkbjI}UBW;02^B>uy9K8bJnrf=a7Ui4z_ zy7$oBeYwK%@Bh#5qG@XW^4h-v+A6!VHJKJ#?t4es2YUTJ-}xO+;ya%71g^O9N_u5U zRoC2m=sw>3mTUR&M?TK>_V!ZqZnw0C4&~~O17?&kW`kqX1jX6l)sAmEuXAN(z{Y4Z z&qtHXb)-FSO0&&5i*=r&98fp43c?Ctb54x4n8snEdZl7?Gx5&0W@#7ql>{Nhykjg~ zpa1|M07*naRQM-~LaWEd5HYqjVhmzqhkV#HeN64`1OXc%1+XN7ZE#J@-gTmAB7#Bc z2{9(TjMU?IkQ%5X1VF*9Hu|hT^iDPSQ^(blFeYSJ|rnrwI^i6r9~r9_HQtgiqgq&kWaiB&^t zHjri;kp?8N2&z)h%siWy)iR4jWsyyXhI^z2&Pn{V7!TJ zZz4?%g(Jni(igtOq}Q*Ek2GzAUdaoe|2(dI;1#^(+PCtl&whroEP2S4SMYt`{oOqL zs;l{xXZ<>N-gVEO`UYwN-Fx4?{PTZ*2ZBnj|HM$iaRLPZkKfb*NYP#R-MF|xkC z#(5R-^j@mCveyPP4^Pc>gVm~-7Kq8U~SOnXMg6W`RSkeN&fW%@8k8ae?2Ep zo@C#FgFNc%AITrQ=tcbTpZ+H0G361B(7Y)tKnIfpjq)dn?w zT1K@m`Z%u%)pThMXP?w+P*lQML)q^!Sl>r7x$Hp_CZkFhK_8G9FwF!>0c^^#Qay{H zkf^l|mlsX-V6E0zG7!aL+jw2A(5;baHkgsl*;m~=I@>Qx)Wq)RHft)O8;;3eQ={j~ zqF96_tKRHV+g{TEUW8ONtPIvLQKfxOY*eKEC53P37bC1SxW*8JCxsrSfN>KUHi_-+ zicuA`o>7yi>#PBZ)-P(3QT;mu&18u2k<=JMJ;5|v*c5O^wEocy6fMlNNKG#1Zdyb) zvysGFLZk6x7UH}{^`uA%n!3&#SVOgWglZ!(xZnT}did9J?s*q*^Z$4+_kZz@JFhX&);|}?MvV+y!JJ3;KBp%Vr?|uKfkw)-^ z!(bS^;#EB7SAU5YJnz47&pn6u)Mq}cLo%y^$#gU7;%1=b3Uc?$(jg%bL+o~jMbX|C zR-26-sZro4jpxMCV|@KLd@VoylTYPkFMl~it>I!3ya8`D)+Z8hZNm1#9nYS--K?B* z{N&S~!qb2Lr}*)w{1hMf&_^}!p2cKe`^Go(xVQWr`wtu-z4GBkCq*1OTzXA*E7 zjHIIy4G2OPEz1Bz1zK;&Sgb@$0*}!+k@u{Y4(mOU0MalTH#Bu*y&SMwT1uBR9#to- z2+l;d>xNoX=W$>i>%BhxwLY%z7*2*9-`rw6*1S83V8t`349)%?!yv@XAuDAM34w78 z;0v~!n$67#8yiEmMl}?k5F$;Tv_fmLj2dX7!xaUwiu4OltR2ZjsE0tK@#<(WCLtzi z-7}{Mfk{f5O5_-+F{-mymeipp3bBeTP2|+6Eecm?Kz}@% z8mF_1xS9Dk_ji>Fdf+9O^XSJunwP!e75v+0Kf|{?@!NUKBOl3a_Z(4}%kF*bXB99u zM?+rv%0K4E|KpGIYtQ&Oe&ct3j}sePglfX6QyMq0-NHCyLBSkLZl>2cP9ifNPHSRm z%t)*z)V}VGf6cf5yC?93Cp>{qede>f_IH^=gO1NsD#u#8r}?)y=1GL>Z~OwUf8(3@ z_1}0lAN%-6`M~=>1er}N?{h=1f>n=S@VC%?ge}zH6U|lhk9X1tV&l6up8KV_7k( z!`3fLihU(zufPs0Wxt2@g_f!DP_CAkeh(=NnpkuE&=GFC<32Vvh8PRmnRrkKN8Kwm zo-;}KazKcxbg;2KW>g1aau`&%ubBubs&PTV?XFmJ6X=SC!%*`^D2=L+dPxt$d5(~&ePrb^r>2FdBCNYa>4oM@~{8;LFy*bG=W}Oa>>OP(q9>1 zt;_69;mEP$)OC&XzKij6GF!Q??KNO#VeJ-)Gj?xJO(87RYowWCdkLrsDp2Y@r5~u0 za}yvLoK^5gQoz|hy~43_&bj#WSJ^&sg3<5#$ZbBT#Y2#QeM7ZeDX6lT2|9XQp4v1g2@Q5^*u&61ZW&jFbj%tf60)^m`?1 zEBhGuf`Rv}lqD;Jl77FWTw9~Je+|>KF!1zBL#(wn5mvx?T+yTMm(($_zg)u^!{Nh+ zFs9&WlN8o#8sbEt?}^6K7>Hu1CpF_L(9}@{ovmdus;R1ow-C$#R?fk>nzUU*Q|Y_s z)o@4v!4svyxk7V>(3~V)`*MkQr|+>tjWd0Hm4f3OcwZnPb~$f0p*5R~!HO#Mmj@2; zZNK<)T>N#9;P@@~aO>y)jlNsu&X0eV>)-J%jvPIXTUp1ft;6=-=-qqVjOG`hWeW!E zzA5cU>{|$^4Y!B<(2xEI-}Bwy$Jc)C*Kyh9mr;1n+S(ew`ix)V#v5S|x9qmnR^ker-3%Zl6WUR$LEYYo?2brtXWmw#cny-i&; z6lIU|&p!u%k6-sme(gEWp$ZMom~H`D922_n&?<~6rZoT=Os{ zlL^O5g%jf8Kdx#n5H!hUW#)92P( zU-T-vVu|^`loDmx2P|9L+sgd3j_-W(llb*#J(DZ1d?402R#sOyboV{{$WwoU+wZt% zE?%}whWBuaG>5tDPgo>t~Fd1*-tW$T?6Y~gSMedFo-f`Kd8KUiUX z&}X&ivE~P?t@l}5UuFH^8tdnuLs41?BV0;Emq-OTFL*y78jJ0fqyW*DSYJ?&$26Cf z^!viTwLaU!38oq`v6k^>g%}9I5=&22S2!_L^@OI5G*zOCiMm!@JtKx-3;cx-X6>Sf zaOvuPZu;;$sZMPXMewGCvZpB(enss^sep)xRhDO-JJuN$M$U{mn^ciI#;AocO`h9E z^UqpuO5X**L~xG%mt2lnS>?`;e4O!c!u_}2!Mz{<3`MBf+S+98;!D^XZ9<%3?4F^W zRnn{4Ofw00i^uFCp>OebA_BE=`)&8~l9#;_(sVIdTkUhr!yd}ZU-nX-_Ozei+0TB? z86mW}1kbD@&|N19YpbjH!V_9LH?vTV2IZF0yQQVmDvUE6j(Ohi{vNm8emlv6agJYq z*0b2RzRoZG(l2qxq5Eb^W78$%0n;pdnTL^*5peJWKkz;5TO07GM|>UEJmjHZU{Y1Y z(D2JY{|kKh!=J!8Pw7wd_}V~pSH3`Hl6t)!WJ>IZKl)+b_O`e3%fIx?+<4=S_`I2v zWw``C-1S1V`C8rcjR}ciJ{E$~)Gxd2a_&2HFLl#QA;6hUW9b~qV`tipp}lOrW~Vo| ztsNAcP>yq-^e_H!6(`QNU&hM#}>f8HUN)VAH&bTC7tI0I(bCGGCw z-L=|}^A&d&&KVP!ND`y)RmwavYmuI#H(1A{9?@=+CYAa$Rp8j&Uu5;*K~~ldW@#yp zFA5|Yir#>t^w`qtlnf2cq+**yvvEvsOLSpOLRT(4(AHiiUBLVl9iPe_Vw3U=@kr|XRX(xKj=}c6`1o^2z^Hw zkEq5cahVCXGAJm$#}+-DGg$AbM>Sp|CP0*i;1g^6*Ez4MnS}dUp`ac&^j*Q&fHPX3 zNXfB**hI!nqKc6)PBcxUYmqB(2QOvyvWF0)PmG4iaFaxXT{{=Q|2)JNq_I-HO|aPL zP=V7Kd!s`ll%;4fN_9se$|U(`YX~u~S;o*`Sw)QJ;KdiSd2$0Uf$dYr`M0Ll-&*&~}wTGKIc$6a@E!!5V+V?X}Gyz|}fAw{9zAMlyaUe9%(`ZUHk z`bDASih)W;Vn` zs%ylllCyII%xX#9G@6M&++-_MOop3S>v7Jre$Kh{*Z1RmkD}1RZPjSZWVFp>GDMOV zy|YP$Lfd*@T2Co4!@a<{se|Z@!8k2-U@TbYI>f2oo??S54W<^PX>h55 z*pO0WurlDldFQcl{}I@(iS>j@9g&{LUn7mG``RNOCQLI`wqdR z;=Yq7Nt099!ch(mQuNko!URKuHIZln$;M3O+GP$WFjhBeElDpbpJX)1+o{_-OAy1# zzVrCT@BAK)9=o6G-uo}ec$<4a_c^Zo>W6X1O*d0l6AoT}8MlA_X1qXcN(L+IY?lLQ z>g9b{XnWd>bg@aNU7x-bg=W|DJm%5g$e;YtD|p`rKgf$;`Vwxr=~lLfBfNKcTLWxt>-3ihq8aqcBomKQS5CY;KLFbmN?(G=(E|eiaLsnlqV}ct5kb zPP;K67bSv#Oj)+mdzpTSE`v0yMkEB9hFUf-){|1i^m~*m16|BSkPxZ2Hi^mNY>9|r zZQp)|)h6S^TZ~SgB!rr#o?uhL#5xOXCVi7~OgE;8vvh*0*5*7Wt7B%37TYs`^=67< z8gM@6xmgd^V4TOe0^>>$EyGe*YBWu%icOdB-r!AvlL=k|WzVv?wZWvKJui)(|HFDLm!D0~{2`#`c|ze1nOJW>kX_>f{L4Lo(D= zO>7d994q|-xBqe;@HLO;w(G9rjhQkgGxU32ZV*YfR8{5HP%@sH=)x4c!=gO|h{W_#Cuf5}A`^0>!7mN&fN z4Qy{|(68U?8_Anmwz{|D9%4y;ai~53)DBM(6;a7j< zFXh;YlY8jx&L&YIoPK{=RB%Tvsa=}Euy1W2S6}lm?!5huYz*Ytk>Jyx{yeYz!&mWw z=Rc1S!xUaU?KyKFroG|1>psadp7Bhc{p-)-agX~(uDSYZ9&qU;Y@9g3l@GcSXACcY zx)@lc4fECepV2;qfP8M)^wek>z_%aRBKYD zF_`FaMTsrEo+AblRn8p}MjJ!!y6NT|1Bxo#85+FkJEgZGm?(&wj<~S~lQeHBB3g1h zdxWL4oy?VUnVe+pbgYa46Nxb(VyKA3)KE!BNFd~mT#z~;)-f^rSi9ok+;!WXY~A-oOdLP%wicF}RF-7b%I(eKB6Q%FrR|~c_H&yzmZir(A6k@X2 zsv*T1^S?Ujby-`?lb9`+(6-cC4CuTLP2i%7FXkQZd>7+M&8I&339h>8!CZFPrGyZ< z{m#32+uQ$%|M8&@apc%B^o7aif-_?0xo4P+Lx18EpX?S=E7KUjVgOx5Qe)ohrb|xB zWyNp3^^3gZB`@XZ(PMi4Y{`Le%PqI?+rRbOY;BElc6@$=TT5GMiL?^-7$vM(V7NWz zPyXc3`OGIjJD1REFo?C3)^qng_whTw`#hfh^rz#j@5aNxt|cGZ`#W;~QJ(Xgzr{U= z4nY_DGdKq~-*g+#dCvdCvwroNYN_uS#k>y95Co=PqXp*~kH);2sUW@!PFw7(uUh^ypFk?cYAjrI%j<*L`xwvpZ88^n2WW*IitH z!wp$Kue~{o2>jjOy`8P?Z4MtkLW*Guw(qIwSSArZ{Na!Ci@*2`9{rd{@hi{z70Q0e z{l`!6(NBDWk6-r*?z#Iuny87K?fHwGL?}_`g^4o^?snf<7^vAeTl*|#-?!Ze4D<6{ zJ7KNH3YgP7OmwBmXRAF*t2eKml8}f&aNc7`BrBNAQ#DDCjn$ae8&*|-ptlth5*s?R zM%rP|t71-0^;QGw#t7CsthIP+(e+l99K7`%WQsAlj|$7iD2k3mQ7;dpMdx}YuI#Bn z86pJDz1gS}$rh~k3RWA#sd5W@j%C$Btj266&sx6(3B;zR8E#^Yp>!TGhV4)hW1wyl zqfumREW>e)h{eSMk%njt!5BgcIYmol!XyC)E`Wm;v zXPZ7mnOQB2X<-rJ_|X%*_r3350(m5wCh-3Ee{gz}`5l%S_*SDs`lB&f-Lg7634?W>stWw|oBoDFhYl%~zVO|#S{O~HDgk=3JRa-lGI^nPQe+6 zm84}?@`CAoK@%HNutWmM7dUGO6G2=gnZR0qh2Hu)#u&z%r%1lQuC7vg%jTvaCJ~#l z<}_N*C^o8-=be_8XdoF-cxMDzYa6fel6eWl_q9ja6TFRZkWIUlh3;o@_#f-WIBCoD3gdt|v z^UHdakUk+ys0o{3@^rR>8%x6B6t*vdM2vyx=>pbJ=GcV9GIhxZpL-*P3(U77j zD1FH|oNmW%J(|{d-x;@9Wc9V4E=<{YVhpi`Te47Nw7PhAXEl~!b?(HdW2RJeyI={M zd<~�+VVCPTlGmsC>z-OtJwgh3r^o;RAOJ~3 zK~$4S)Ls1?lML22kkn%gG4EO?rwv%mfsq_5!kR86!&tTW+mrwYG$Ck7?HCCmU|oSR zSxzXlwD$sEfN_?lnvh~d9ISY+-kLFN4!7v_OAH03iS*ZdOeQrI2^SN_tAHz^L7Jd- zhkULzR$4|?pl%|Yql)d#A>$@8MupT6zcGg>_5GA0j9VVpB<7K!P^+TPk! z>qZ3cGVZHd6x}&zuM5lbik4od?PpTczK=F%JxNTPvvBf5~MY%nApctQ*q6FN_)?RiSATEMh2tVmmaU9`xa$w}*?WE}%o z@3GzyVnj$qvEwG7HW5)+%JwxC|c?d!vEMByCHLLaM@ z07#@dVv{9#OA;*>SdSYfbPFFQ}1E*)OOZMuL{X7XY$PD_$&>E z&i<~=A29TKeYPhflGO}f$b+@eX1%OM7-C28R@(9a`R@YNq{eyO<#)eN(=C`{20=8t*}?8=KL%7^tKC@gfh`2nK9jkzU^?0IoJG_U zcar_j441f~z=Q=VrfL7%eJqU0Zh8wDF&l%Q5uo4YG|Li}=PsYy{f7^8&DGb;ZIZJJ zAMMY}Hi@d5u(7p)FUvCkd^7#pMMmdz&49k>Ru^1vT60xT6^(j)#DtxJD%&xG>E`2d zK)-u_r|WB)s&&tPuicrB=-jrI)WDg7uBSP_q&cTL37HI_3tEg5-MmFmDX4%-OB;y# zPD?V>GRcr+hW%RW*<=A>B95#it+L+w=b4yT%KCE7dEHC}Xkjlr))u<(nwa4#x(!M) zOm4kJqD30P+JdSc(?ml|5#xoXi445Og+wzNu^lU;KIj#2b-1f#eexHnp1}mEXfww{u-uS0m@vT ztO66U-ePSZlPs~?22%lbCOfsp!}2w>HF%X#myLl0DZjUxZUTbE6pnJOL?#Nesyu0= zEIndNR@V=(y?GpiqZ*C~b)!ax&C*n7f)!@kS+MDhKIkN!rJH1aGurjzyTy5u?z$VB z8yr7=V$Tai7dT(UzGr`DrjMfiT~$?BV^jol;hfgCp1|})KAV`q&NhYmbOTUggsR%K zui^BLPgkNrm|u`*J&M-zo5mt$@WS2BHAf?yo;)VO;4SxG64)y&)$ zGp|&8%);01vl*srdpmo#rz_z;t4DbHCfN&HxUSCFCGWMx+C+;+wy{McHY-0Th0IJ3 zSMEbPJvvb@R~%x-AT>~sWa|BNRiVV`@w71@!K9R>fu`?yl0*n?iFIu)lqX3@&H@e> zg;Y0c;7NoSRf5YJu!Y8^j3Hu)VkqfRq==6?3dRZ{)Hv%H19cPeMGx-{DFh-adTgDA z5J|>R163nT8lkEhCc_Fb1@&aCNxkUeZ`aPFch#fWJ~igS zwS_*UG{~s{NufCt!56sxIS@`koFGD{P9Zb%q%+#O5l=W*AVFbpYucPk-Sn+f@mK-s z5C|!gtxALSj%rll2NAncV(Nmj=NN8pQq^N@*a-=rsqLM>V3vG8bEUJXQrUhsA@uqs zH{5VT4kpWKKYP00&%$=I-kHo;%du(j-e+2~h0K_1Z=*f?>`T71woJR@=ROO}Y23l= zGMA@>D0Z7tYnQ1ixw6HjWB0Y7Lu*C1W^9}}vC4OdP9#hw2hUv5fSbW}TIi=cVrJp# z))bk>y4>t0Y-TLy#f2$DT^3E0HP&P)63GSq=BG;BtF-}iv937}H!&QK389(mOM73l zb6~fHIEoImWL4|MW308^zrD7lka6zbva@tVC$&qb@M(K5i`Q;{mRSbi-Uf5={!*IO z>+E`LEY@Vj?ksb#yKR|NoWV?A0abJM7-R9izc3P=G-B`GDMfH#KFsH9Fz5JF;9*HleXWrayikOwcNiM~?Zd=)RGjL55 z!Fh+XZn}iWILCzAlSOIr1I)o+WLjQ4pJncT+j=4q2#WZE)eLJRLK}HBOdm-1=`?S@ zw-YSX;B1-k;q2|0b}YcnoTK$lEK~TW^V`~cn7OXWp08HZn2D3j9s?S;0wftq=de*b zW2e;}nCTWvPc%Df3ozqiGIL4_t$^ut0^4bBpo67m$uo1XaS|j(eO9|x^gX+_(<3om zJYZRy_kgCKP1ysQJ*I14;ybZOAnG>i*<-MXSCtGS1~Hh3>im=6$C9>CkQ45Wp|n0Bt!L zbZS)9n4U9CHcl~~G}xjbilZzC*rJb2v}}dxN)n#Aodva9K?3dk*_gP~Hf40^dAuv= z^-A>?_PBd4J>M>WYZs$gqJ_0CS05EzUMUwT<7ZR_V2W}{XW`B+BH60XFa{Z^b^=B; z?^&Us)}v|5cgqsbUwOp{)jSpeIdfyL2D(6OfgW?90T|CMy2Ta$5h zS(-q-2pM9aC<-tThy)8_A*2}tKJD~E$P3E!WYgHe`~sEl%NhoQ6*VSI&Tnd6vxR<6 zrI%*UqxYpkqRYLO*#dow?M&X%^zYI=)upFO(8(n^tD1ZNe>JP+){PBloo;JKkB2f9U%q4Sx21|M)bbGIfr*WoGtOLPw5?f3sNR55ibcV2J2go zB3t@d{ayWK9EG#5UM!Gj*l_T9IVHgCte8nLCq zk=AFUq*1R~oWheyNU;F|S^fp*`^0L3jWrS`kTlktvOS^imp3Q9^(``a&2jeoEH}+#84@nny4F`DL}l= z52MMhDG9DC=&kQl5~XJseu_A>#q`u8YFAt43`$9oEqjv-@hL{2;XjKBj$Y4G79~w+ z=nqz~w!~m@HdBnerLam8GC{B=>V_biCznDZ(Wumr(Ve(yA}MJ#*w92x1a5>>)jCg7 zjjZm+Jmhigd&oCYpWJ40{~gF=3vqp>nH-6j!ZgV0JjOW08glBYvfqeF>tJ+*Ll)l6|Rim(yXlb>(wK>?J{W=59&3l?tV_|J&1!G&7A)Rrqw=fN&$_8b> zM{JrdkhUz5`pYA(%v`%e9OvQZjuFOmcFlskiu^9dpmxICJFd5j5w=v#T>k<|7!HTj z)qG-QS}-IqV9OQ!>Up}+)FV<;;f+=KC0o6+l$uOr zuAZ)eIwgFzX{JfN;GN4Q$J>9?jtUy~(sTxu2~r}8B_*LO48CyGb%paKWZJ#NKvgGf zuZKg~aMn6Z0iYfOA_SwcUQEght|Lj*3aL?3K_n*Xsv^`Anr4h~j;enV7d-a+i059; zksH6j@cNI^9KQoAQ4y4w)KiSr%*4@o8ztmgV&Gg)E7rOdBqk&bF)3)g2RfiYqSi?2 zVy{5L;arc1V7$`YQy_~R>+@DmRO51Pu*NwTT}n0DV*BWQIA;klb)ZEQfmq`UOR5sa zMpCS)>jqPRcLPkYG-0@OUM{vhZAJ=+v{9BN*5+Q$F_)JDSY@^KdwqOSEI;;3#L5@* zFJ{>!&grC%Q7E$!erCqe;@{_eMg6#&`>V;ZojKA#i{FYubf?u-+oekeh*-^$aA)fG ze6!QeYCrNlwAy(h`hw9a_LAW{nKBF)nPDxFsVJO!xhXR)_nM>G(sgI(rfqz!z2Dib z;oS4i&n!8#(^ii-Q)wrY@yWHdRjQ^sol^uX-=y~33>}ppfytzr%gdWNjJED#N-b3S zrMKM}rn__zjKbA*U3bZe3CPyc{7dc}wp=CIQWr}Xks#DI;H{3YO#!NHq7|UfG!2RQ zoNdf9LrK}Iu}O1sU^=hb^(7?;P0FzwGrh;8M1YyMJd+r*N<$0B8r8BhIhJIt!Qcr^ zgCwDFj=HHxF%qI+ODzjwY*KH)8;UINqw`BhF%eQkj3r4_9ln%n0yTjUg;>|b&`{Sk zu?D&LDlYhrr?Gz3Be?1B|CZr(?ysaebgc z;s{beSDWl%w_!Gyy_+^%kT)9d3PPCZd~4y*?DQC;+!EsqMZZt*P_C{JCF%x7!g>t_ zn%Om@uCZo9^aZ9Yan?W$gLBRyj3zWEzk-|5&elHe)3;>_h;z-76Z@7g%BO?e_S$A+ z0D3N)sn837+%M}oW{%36vSNV#DW~MMW^lWH@})067S-A-{d5!GQEqy-w%c(=H>dHW zVr6Zm{nb3An9uILVk{vAg^V%`7BE;z6w&BJj}6d(1dK_z zmKc$siJ~m9_MAeumb7Ehaaa;G-NH;Z?vi>22p&}>G30kCk1L|_9g`t63>Yy)5khRR zK(F6pT-79FNM?o+7h_geQL3}YBRQ~-(Ri~2tAva+)w?sI5TVq8rmhvDOqO`@!&raZ z_p@@@)f{@?KeF-Z4-nZvDdln@m*ztBR#(MUuiqIXOPZ$@NPp3P_ z*;uQcoXt!;$T>6rl+VyQ3w-$%W{#5`ziXTAC}l|@v3!Vay@Io93QxsUQ^W~ zJ10`m#6YIR4FOfvo_1TuB+dV-y?)yyt3w!LX+pqA>ORxvX)Qu&XX72s9@R~moX=EG zS}b2H3%B>HN*cL;c4nT=Fze>VWkK;|@qKHVgo~*Oi|Q1emqCVLp({TW`5a1u?OMRAb$cLPTODrU|hu@eg=3<%7S8DmiX`+uySN`47Wn3vrHO^-`j7 zBpH#at*(AhYzM2Y+mxrsR4E0ex*NI@RBaQ4{{9QN^x@yY{>!f9?k{|X?Ndjln74yE zRY>-xbV|+`P&ou(5>#6wNCoA3pJH`CgMA0tJaL$!QH*Xw?&-$6{P$mCbKX&uNpV<;d^*KABlnXT0MZF*kr7&=c9+9K}HrN!ea5x-xc;T(R@Sotd*LK)m zcqJ_?MQA}$E_T<5t^^@lg1t+}_L zam{%ed?oi=7&YS#$_@@m$Z%;>b)mf!Q+xCru?iCbyA#@|52w2$fYJu0bE}Zu@ z!vTo4?5!vSjwYW=hPep?Z$rd)Y3GlUOn&b@d9gyaUKwktg9!I6LC}t&IoJDYbzOb& z-+c`Ea=)!(!+k{fyjVX{j4?5WqAF#4;kPc9PAQF1L#!+DhC-SGhcQ^VN9^RyvXDUo~j zfhaV|vH-zeGe`%NRfWx0Xg|RE5;f5HVl9cSW`sr%f7+Fj5&}3Yb6=Gum92zXVYr8V z`RgQa{aq4mc=#v(iuIijsMZg`I*d6K>%v!P>jGOuc$ZaJ?M{QywH4W3hDoBaQg|I$ zH{XxOeEYxuA9(q#Z}Gi<^M^e6`H!*db>rbsnrgkm;hKCi)i@vO)uX{UPqms+tmo*b z8EH1AESDrcltS;U(W0HCO}F|F0;mRxcu*n!B42sh2+R7$w8OhdhPuWE!c3(d!e}osQtzbts>H)% z;M_%zTt^Ly=>*&FUyCA6S$UEK5*-IBCWAM=Hm$XabU!X=`Etd^V=k4Qt3q6LS}E3f zjt}lX?Hx*`pcSt4RMv+0r^Y%d1D;@UWsV}a>6}HXQdyjG6v=?$=YJc2^g8+N54it> zKVkXcJ+XW#i8Kb)d<9WHFQcaC?(_8I>ot$hg7E-Z~W?S@{ND+ zAM@AW`8WLZJAZ&(oYv$yJp%czYv{;?CbhrRwXzLOl z&{kZx$JcMNT+Ff7Gr4li>7DtOr@Ps97DkH36{TzM<|k|pl#!S7(?T%SUH1sBnlU`m zigdn*ez(S^^ZePH)5iX_VI;+V9nQsa0*>fV+I92OTI1|)!uz7uxGxb1D2k#g$O8P6 z7P_rt+sNn0*nq->e2a52sg|MGC(=Fg=0c|79X^|256&o z8%j^~6V(E#yWd6&V_U5AqB0jvsS>v_Pu_MUBaK_qhFFd7nV08jU4>_3?6icxm4t*i z<}4ditxGJqJaZdkB&{29iZWkdost+t5d1pqN^;*Gyh7O~Y1$(K#YQ6HI5yQ4!X1x7 z>uVp}SK2AO7WAEV8kIzW7m{Ej!V+rtk+=^XP9>yfNaBPMVx6z?I8>UTtRv5J(s3$h zS0$r#h|LSU^*Cj5SrK9^CFOD@UausuN|*$%6Py|14qt|&*T^3};PKmkN&fMBD8I&M zL&)~f%pqSwU`$ z-{Ijg;}>3GFdg&pd++e_*S>{4d&KFTTj!r^Pup1-xz_H`??l+%&Tp_G=`}SNqiGnk z{e|c4;)=Vdxd7!hF?T^VeeL3o(OR9`Yg?lWeYN99-Rnn)ZxhSfje2G?KIfg}U}VWB zN9J$j&Hz>bCRX1!n%zTozTPSTH0!`wC-s~{y$ywwm2lQ!sqjiuq9kz?g;Eyf z!716?%f*}|NvNulqR2%J9HgOElEG2psuFx6u6*msi-OcRmWw4S9gyHzE#_#Y84d?H zTTmt`TzdnTj9J^1{HK4x)A!$j^$MRQC^tl>6U<;w_WSY_Jd~m6r>=$R=Ch5l$C_?Z zfgu5{V<5MJG*y!L zD$hkaP9{@E<9*69r`oIe<*%IQe3Snc0G#j!Gm1J>jB>4q>JZ#DiDG-Fbu;DoC z2^&Cj5$~GcKdt6Hg5av`h9aRhDyGVsw0J?+N#av~hhbOlvbo!Ke-eNK6>Ip$?_2$b zKAaNE)_Wnx9S%oSWsb{Z7Qb(49*RUm>3u*vgz^Q^1s&dNPov2D_`Q=S+E}Z>5(jEr zJ06q{$(22~9uD(BWo5q7Ktd|Sy!;d(*a|`7x zdNf82u3)PKUKVLob1M1Jll7d< z7Lw;cv+1{~D=zH!Ir5Q)gCV2QNKi2yMK{K+b`DyfbBjFdIPo#Z3G^YJu&vK+HDDW~HCL zdpSRp+HDn)UJ%@N)`#a~Hvtfcg{8xbQU}O&$dB`YQ&+!l4D|gB^HuP@wmXO05-&G

2Dur4Shx&f5LyKb^^y}XwcQE_-WAfP?Gn}IJkZdf7vei5kzXQ}D z!`KN}i?J1MIFMM}>`_B-pbU_qvk?|yb3j|jM!0M$)M-wb&5cTg4a02!w3}uN-1?05 z;tXXH(oBFDLW&_*bXdGMsANDgnvkX$`D#g#2e%xxhg%doC-1yU5=@$*la$Jp9NxIb z?9NA!!7u#!Z}arQW9BDM&yQ(K+tD(T76~Bxa~HO^4YNBI84GQGHp7*6`F;09Ma|IQ#pzwY{095M++Oa@D+#aDTZ^bf-D77eNYgb@&z^j%JjXk&Bx6TE^oXumjjW-?M zop5aZ`-WdEUS0BWocDFUoyTL+jQ#yXR`V5Qp3@c(g%ySdat0DwS$ztXiap~A4_Njd8%s+Cyj&XW&Dq@VjGl9#_kes;#=AN?6+ zu|iLe8BTAKm0)o#3&8@Bk;MK-F#`_ZCrNol6VA7%3suAd-K9E zpLxF>JN{gJ28bnvH(&DedZzca;T>FUl7q8YuCN{VXkTmRH#y}FWFet)_ya(eg9};zmnV*~a41YC{NJ1OkZ4&nEn(IZlc(20`OA7|(pZ z#CG$d;-(thgR0X{y&h`;hRmhy=)S?2(y<&vq_`q{?~%M9WS??M)iF9C6vC5(g?wA1 zlM#BFQ6)Lc^*p$Un#X9%B_^9dI+P7^y}-E=l&f8IeO676&AH{XOwfk>kIPhv0;R&v zXe*CSJQfp_WG6(#PN7N}uti*9O&Ja?bC`tKms4ovsft1bg|!R@L!7T9eBdmY3|EN- zk*XB0hfLr26%M}oYnN$<=G@zxpzd-uWrN{7b*Z zul&xp`S6{evv~ASP_a93LRUT_$wOU9e({K&%wAcmKZ-5WZcNVPI)||(H_Qw5b(f(aTx#c~+NVm$CJAPkQk5|_*LKm$;53m&uo9NOD$E@XtCxR}LFU)c)lN@@~Ym4dC%+ObZj3|@ST@wLzK_($L4%jxOCV*;X7%6Fg?O1lQ2fCo~JzWjC4<7+5m*njCdcYgQ-X77JUbvmc2a=dk< zS_`iunhFo=97@ah$_=jl;xBM^@`$g#`6a&n?SIHW|F_@a-p_x^*@N4-`I24Rjxi%K zSK|V>jOEa5+8v7uaMpAG{(V-fRh_{1X+pgF#+l~@2L}g;dqu|8mn_%^>Fu^%1(Niy z0!7Pa-opmvM=N`NYu_#RncsL>+lTZa!Q^ju&uwc{T4@&ZnUEp2w)l(6D!7n-Jsgb? z0Td8ITU?;$oHD*8qI-OL4%xio)$=@OGMN(f?Fi*Qr;U5rSi7S*Jv-en*5Z6`Saj_n zs0Ih8O;aJ*wfs@FFbi{TeSpMk&FO3gCM0@l1wPi;3AbRvKYVMGj;*`rt} z273w0l@yCPzE}ehI^Scxs^%Y3aVRziBp8#`^W%IdT^^2?$W&l-P%=M`5F^6lyusx~ zh*cSEBA^Ik9VAj?sVFUGkYG}Sa~5SSc}UhyOp26KFv6UzH6Fuay&%noxZyPp-uhL{ z=z#m*|6_`~?=%V7CLu|NB;MneXB5kOP%h()Re;eo26SUzdiS(0Lu!xl@_ zV1&*xY;NmyqP!Oi^FgJGV{|D zUjO^Q!|)4VWcug{lK~`?jKOTh-S7N=6w4(;4OKD6tqaaRKIQ4t1(U-Gi+hhS%Qd%F zAM(HcPyZvQkDpMjBw6?}D&A+#+P%&7<$|6t=l3Q)I=q}4(Q5JY?@M>%d)id(d+^SCl#fq1{`{LqPH4f>x!+dRPKiubdMi;%p(=I zM#plwU@#bBjF44!87`V!uFXDT?WWaYS*r$8%cWUE-Ss3%l(KQvP-i?IsSau+xn?u2 zR7crGPhG87;-+hlMh8+&8E`!}@yWU{R;&dh)`RhE%&m`>P4yO>P}!o}+?w=aoSzkG2*k z^;LpPPPZ(mBovWzXkQTl8M4Gx5IVMYMVWw=NkLezAwawYkfOY&T2+!9o+UU}2^hg> zyo0JLQP$zTluOCd46RJq^M$j{S0!E@V$gA5np zYot86r$9L=L!rXL)(Mz2tt-SBqaau0od+W_?6Lcz|v0M+3Of}X|17zsB=C(gSrFFwvyaUousm7{u&a+(3H*&_F z)dZq0>YHpfZHuiv;!t_kd!9Ub!qux+fKW(n2kxr~VpB>{;GG}x>bHKAy=yl~vH@jPKs6MlXMT!ZuQ5hZp~0;KHIkPN zQyUT3I=VQ6Op5HfaZN;}r|r%Tb-_e;X}Io`midn5%&oJ+!jNdOnt-ybuuhRE7-&OL zd7RNwSJf6m2CEfSnPZ)2c=aY%zx3LRz^Qy#JoQ2UozV!I@97173ZAxKPC`FJ<$?m@91^sOuI9snFuRt3} zbb_)Ggd|eiDTu@)qk{v|VamzfTX+v;0W8`AL!*EEW0HZ!u9qakl%M~fa5~S~yLp4^ zXCHFz>nh?%CNHqtU3h|7Y>zZ4*eBY4LJRU%q9f?cTTZcEiHdS_#Cnh8LSl zSY5I$dhe*Jin6pMrklxg!C0ImPA&>z16E4!wpM(Amb(x-34RF?a(x@7VqN51xpGws zeN~&DJs)B#eOLC#bme(Lvn}^;$>;dqphgb}#j2_EblRBXu2Q6QsDsnmNo%Vt#I(+h zLdmtFq843rn~vCB-N#OnLBm+Om+70QIQO2$9c;a8@@kb~uy;)L@+*rGg-$M5%z>3nPp5x&lgD6xd)M zC~ag@_jLhxv=40OCKR?*&0KpW6^y;IVo5kfj>VaPI1B2$bz#npB0YKuv-bjbe)K); zyh3FIs%jywM-R%3Q0X3+FhP+G`n zgBL(6_~89pQ0;EK>`n(1W<~(PpGF)75~K>FwRY#&%+^49%bnK8FBdloYb{BV)bWpa z-5QeH*-*NS441WfHk*X%B}f{_)%+Tn5T4y^_^;9Ew^0!7(&F7J72-l(p^Vv3{I{YC zy(2RT*&xF{b;0Kz*ra=|BYU_f)UMueO|@NBu(?^r&#Tp%beNEhMy&E8#%1fBb6ZIb z72*vJUmJb#j@t3C3l)+e`BWIc?a8Z({c3_MOhw*7iSIZi@ZQi#sz3cFe|fASQ{QUtE4a9C7GkX1f#twpx30O1!`8qwqw<Pzyb}ZMl z5{GhXOhQIgE}>fCt2|g`R$$QCKBy^Wu_iH)C@6~xSBe|3qN*Q+3ra|MK^n`Qi2aZE z+L*OUgRf+XrCCZ+fx|F({S|)izxglu>VNQU-g|Vyd_H3|GK}}99A7!W7A22wJ>+;a z!n=}3ckXcKC+{&TE%(3wJ>LJx+nhdnD%40oc4f+M{CEE?)x6+G|Mt5KT!LH7s8(}| zVojCjcq}Ts4+f-M`rZ-BDtNSFmv%`v-KS?60c2UmXf&d#s;$TD%EA!aMFgqdd#b7; zO;d)$Va@yi!5z|yjr8SHeR2qJ8|MA|Jy@hXc-&Qeqq;v=YXca(lU%ISB%$xknozxC zGKTSF!YaQVTxZ*nbkPMB^Ls7}5P93W!DHS5B*cJA57 zzj3V2Zxj*iof{|nR|B>P?k20yCiGePdP%fhbMfB*R>eqT9pZ1oYpu`QqJhS$<_kkt z!t}QG;ql+jgRvrw6`-<&YBZ)?t@!wZ_wZH43vYalbULN*CHwnF*eh2k<_qQz9lT*r7h4&?iFR=Nt&Sx^lh=)Mw082J?0xHhe#T`9?&ZlGX=^zWdk%E5S zLseOfgTyFQDrBgYbzmJD5!NEC6p9qHe-pER#M!MM<7ab}%J87&Fo3s5RB#G)ZutjeIq!8wEP` zWNC`4DoT^_(y#p%H~;o;@bvJQ@BZuWayDBsP8Bzk2BBFrTkjpUoH# z_xL;i>3_k$|L6Y~XY-tFe8kN!zs8l9U&duAzw}@IC*1qV&w2k3{xxZF#$s)8#S+RK zT`n<|K#o@qWq01XihuiJIKK1WwiaA^Ayn-lyogf2{u9Z_cm=Jn-ceQO z>*j2$nFF#}8Dq%vLNvUdRx1ScwRHWD3u^|qUT}wJKA)o;OebR;jub7FFPvWo4WUuf zOw#ZJMos!MO)Rf1*4LKUU-yQTBcR`?CD4USKS-)JZn6uDX?k}!Ohl14_ZPj1Q6qT9 zQWLabBk~$>We|T#lZ{76Sv|{>`*&G9e8{VBeVvnAw^*D!;`l3HCp$V~a_u_K=4c$F zg9EbZVW7yVP{oS;>@meE#}+xZvO$Oo@0<@2eCNb^iI%BD;dQuo9W00-uw>p<1`KX|o-|3BJ@`3(HN%?}qX&3b;C(4W&?zPzq0zW17Z5?5 zpD?$cM6SHJ2g?&G?eXOTnI6e(33NJQ{P{6T(GXq5;Ph;g`3*?4^A zm}D^I@sHo*^yeQ6P0wP<*~w#O`xCTID69nxi}?!gJqO1VK6mYyKl)eyg1`Lkcd-v2 zW3gCeIC|?1e(^v1hunYvLvDTN-{Go)a)|mQX?B#R?@!8Qgq!9v??f8)Zatwlzm@Vu_C%bw?g)?XCGxlJNkJ$>MX_PC=v?;_;g=JDs5 zzI|lttu3*5&1XpzvE>~Kqco`zD5<=XQIBml+UIH3T2^_1(J3P6o>huli#YJGD%MCL z3f~Lqjjk|bq#Ech@-uEG;)+(1b&uxYU>}c1dsWvX+jf5Y^B^0vT(pEZ+p|Za;t9J- z{SYfk(**DBx!=5#9pAMM?~WG`?bmMmv3;0~%TlKwwGy~GR@0@IhK5kHC}P|ciiC{O zAZ2YFlgTk>x8CKQKm9|zuW)!~|LzZ&T)D{$Z+(-ue&cVkSgt7+OIGtU5(^5);9#HW z(J@uArZ_ueadJYwp5w}bloXYw;Ea?m4=k~QY!s8xJBxDiR`|H46oPULp95w>;{DFb z{MYVnFy5iegyYw~04C$%t#@$68D1rVki)fHvSDJaOs=&#PzeuflqU*IIs#W<^970Z zl&%0$l+#eIac+&O8ZeFTybq0N(zupAY)pd1qB4WB7GD;4K z0Cr|`jI%twUBbE`(SkxHH82?P(!m$`qkr;GDVGb%RgM{tIC$ereEuK&E{9)uliPp! zBObnc8=WcgVokAJLb;+?Ed^~Duucw*$_9+C-{9)kzDag`#Cm>8wOs7H%KpZMQ5)fA zjHzQrpLL6ybDrnSW@k*NQ)aW-Iq(DYBjmb*DUSIf>r7%yo6eJF<7@g80K*n{Pkn6o z%^_LTN(C2?sAG*lYhqxR7uEuGjxoGlz#1BabQU2U^})yQG4(C>-PiLPa)m8<^5hZI zy}e%H&}Z4krW_$3Di*$@UW*sqYrRoyi$`rSYZT&&rVsHKG~62e(CU8!IY!I58@12g zYpK?I)b2oazp1U#ZR$XTolBgn(^qQ_wAW}Ypc9e|Gnrx+o5Yxl$M<>nPyPia@m&AP zH?h~Qku8_(Ef(ZwOIGuzpmUs>kli?Bc>P7H#gh4>d*q85Wl^EMln;q;c^ONJP7L0b zQh+WBmxy`{uR=T1ptQoufTKnzI&Cn6jKi1SB)6Wk5C0lh72&NaaUQA|r%@>;NO&XV`Dfv-aqf|n*UZboC zLA0=zyX6A4EGW-TSf>Ml(+?7c<1y<;Pf$h0y0Q|h$_5J9q54 z4eS{)sf#wU#@d3m8%`EgTXhw3$Efg~Z!I@oNG7n>%0^fJe!lH2kizNKT5MHrSZ&X2 zN&?MFE6S>*#+J zFN|V)=R5zB`#<_VSKjzM$1lA~HrQi$e1-k1hpZM$iqjeSY(}MW{J=22mN49(v!0() zuGUnQ!`f1muF8ThL@=oE4k=gXl0hAc3FB--MwHg(FbTOW^%yk3SX5{vilkF zjZfO5plV`85d{a%X^e^1hYo_{A`8*gf5l;3_9mU69@3FW>~0s{kGiZuIsxGIUDJbVo7<8B%nH5kqo93CIeeY zS&?i=WfjUhl(L*Xy2tF%J?@UDq~imU(U@%SfZ^dGqwxXLo7b=wiq(qKhxc);Nv2oP z!;*n3gw=tPDlaJ4D|`V}<-vgVCB7BgvYZ!4AW5=vBSWsf`86Dl*`s?b9^J)DMG&>7WfOd&@wO6xPCCXZ2hO1}_-c)g)qp)J6s*q@)6v_%Kg>>YIrFyhdsH&v$g|JDy6?BCgnqMFy|U5>$C1fTJSwMk2bp3&Dg&7E=?&# zNNz_V7V9p8!+RgxbRrPfdESh*!`S`YmPh9_r8KkooWfd+(Loq_wwaaf>qcZ-w8l}Y zW>@I0mu&)K=P?HtU`xjDc51NVq64VAnKq$CFMO9}gHkXMP)(`{Z-kRPyFY^_(R5R= z$L5hu&Kaqpqm)BcYn&c(aN~2V=8uTu9A9aDkRw0<03ZNKL_t&vSK%xqnI=oepbW-V_%g>A zYizj?Gtwxj@k2<(h(lK%l7zwJh~e~*`NySdy znKP}^;N7rTU!c^GG#P@g1a{<0sv?)*XX1&q36+wlYc?G-K0IJPTR@>8+rwo0c)P&n zGpg085D}|7EG5rE$vc53(5i(*@NnDSI;_&@3Q*!1@CIBqVE@_;?)~h?6#0sx1m$yz zyaf_!?Kf@$2YUUSRLV>-e*Wm^*j4|Mm|V&XyFX zCp)dT*e)lGT5Gw_o%JjTaJN}++1r~k8VoKqjLWt*IKWmVr>CbTv2ts7 zhU4_~v_|mZ%L~_KLxk!AZIQuQyBR}iBbI0cQu0a5y1Ov&O9&%L+*OUH-EnyW;u`e` z=_L>;e(37KJ2H6~VA6<68m*%^b;5UtfYHv}~I(zc8~ z4cFb~FvT+R-Ui|U5lBjzjo+j7*}7pI3Y{dBdAVT$?e3DhxLpcRqBY8EtS#&L{)5D#iBM~`^JzZf{sGkuLq}f zQ`V*aebZY0G!=t5y7uKv*g`yx)N6{xie$Ra@wL~$F3F$V%`)sd#pP?!)@>DP4{5wHVUrhx zT_OP$Dpiq957i~sHGcXB8 zWm4+CETvMhR+5bDwA7d~3S&J_?%d_$4?e6*SqMK}C6!v0T&4}}2eFc)U^F_#6=h@# z1Fst1##l$m`I<19yyWE8Z7S!v`qnonpPumW)_Zs>0#8}cCU|ocB#FYL1N>mX;NXbq zwUIA70w$fwczyLdn6{K$_tcIq|*bYM@M9PB9w}JjjBok9oUr0W_Vpm`3;+6ox>WK z>|bLzPB?vdmwbMPq6=UNNc#H5ZTVt}jK1lX|RE zXrri3Pe_U!tvxPL%udeadhsG5W+u;dz-a%NY_bowpqL&o`N~%qy?TSmV$RRM|L2tN z-eP_G1H3j2ZoIzw2!Qa;R1k^mieK5(UvlMO8Rw z;az$|_wky|4ieq1TB~!`Y zAsLO?d+AMHO)~C%?@uU}GeB6it*fYV$9OQHunK3ZuyvK#%A(dwOq!9TBhmhsPgvU= zWip&ECC5mC^A?ZBq(gM#13%p{I!#E66?Q%6$=&-|CRnzy8)(NpMb( zv;=D%R0j;2Cr_Vr;#nI;JS~UM)@yH$qXDB_q`hwnapAE;XISf-jpnmbmAu8g;9WZ$+&c7m1yQ@Hc-iJ%X)8-@;nBX{hH|wz{O;{EZrr~a8Pwmr z5+IO+Y%pZKoN;ph4rVZ6l8&(ol7l@)*KSaLxWrYKY?_dcCX|I01_2-I`XDRFmf$K; z@=b>InzQv&lCvjLio#cD4B8|T)OQ}T5$XOR(`zqr{K}gsTkzoz{ynR+hu|w2W6$I4 zm)|8=iA@I5auE=^*|1$KYNey7P~arLPTO2SQSrn+nlbm^n%TF0iCv{&et~DcFl-fm2 z)&G+Z-U&F_mK8-=oXZL9bxXBA_Z50;EVS(|mbl55WmyNOw~@YHB=EXp?HfTAiA4npJmj3=JmPZzp5;`ghl>bEwa3i zMiCwhnHy=OUVUt);c}3%Wc+U9Y3!?GTKPtHr1?wwkM%^q%J6Ajb9&vvJ*q& zt(sr$v3WilSKKZwteDm}O1R0Alwy^0`rsbPaLn}D4VJ5dWH`o*M%eY7aRRtgqot@gXpf`B$3$GOXl$CM`7m|u zJhb@yH_V_Y327`drD!ZQFA1+oIZR>%s1f@MN%7trlvh$t!${Q&Z!J}6rQ}AqCmYUD zSqiR%EW>3ZMu$ffIb8qUzsu;A7qPc*vwZsp-2K7#8O#=#@q`;+{2H^t2%jVz7=yQ# z`O_21b;JMpthSqFF3)G-T;ZE)LY(pbpS?+S?#+7be{(%Z+s{?wJq^3LBO8=iP=o4dF8qP`xhrXtf6RgDe|jy#`-!djA&1Y{}ku zNU>OAbx71K3M%EmhQPm0NV5UXC3u@R6XB_A8;f|YTe#UC{0IFPesygP~vL!d($@aX(8wJQD?ZR2CWG;nAo1j!igzjr&P$X#Yuzn4( zIG~4`3ZiE*M+Nen!gB%UL|AL|M%f(?WLbhv(!il74@K!jn902cGMxA56wsDpu_RUr zhzeyg-eWwOVtq+s4EowN_OHG|?mRF4&aZOii?1_#`)8>8AF%xJZOY{t2M5=fzVr%b zYRKg1h-x|K^zjLc#f)T}QB|L|Fj5dJFUGsy78-~#hd!`D_dAE9tV$t1v`)0t9zoQ# z!gecYCmMGeSLS~-tF%%qd7;G1vv-$c+tB3T?Eb$7GDQQa8$Ic zczpZw_7>;4Ynkm_*24=`OKEM0=zKcTXxo*uBxN|t$cw6G-&O%NwGH6XZd!|GXj`uA zK30uSiV76H!(m;+Xbi!9qN2hP-N(>L?%` zW!NMk8H^~-9<%l%RGN_tr{Y?(B}KWGev$+*B*PJSPg$l^RnBlQVmRG{(o&Qq+0ZZ; zPZ>@Qa7LrOL**5-hmSa$onoCw`&1MSFSu#3E_H~>v|Z*GnbNvB#s{Z(R4lx>{xvSZ ztS!!4sZQ%G+WS^j;Mk(%gWBxH1^1Y+C&)W^!87yJ?|0j45iKmM1R1PIPztk{^I{-asMN(z3>`Lk0~ctFj`}jM-MZKqF}IEU{g;z98j*7+wZH@E)g>d-b=@Z zyRL7;ipaROR^N@y#O&^}tOVNKu~7Op=_oX|x#-#&kM~gna^Z6vuZ$dJAzllP$nMZ+)0_Khdu$hPPSFwrv5v~J?9H|5)r_4W z6#Kv=@wxZq|7=Yb-ZC-HJF+Z6DaFHw52-@XR-px*bV0w~Hj!a1Wikc&dcm|@7CG4n zi`JOy_?xfemth!N*T?1Aojs#D+n&Wkf z!f|r<7PeTUh6B`KkAqiUq$<}ePM?sv6f+t#n4}c*In&7=$`+6py&(I2OQj*D-!)sTWua;m^4!-SOUJmk-{I~Le~AC! zeeB%_+<4(llAEu<(J?+T3|zt4{kv3UPI)rJovx@9xN>);Xm?@cT(9}K%>wFfN_9-4 z72x(BY*jJH23t!=?~*&y|NVJaRoh>`Ty9G1%epn%_t+mxYMbHb&h8xRH-tgE&u!!C zlFT;u77;lvj3FujZK1SXU`7E_mgR;}>;~MsmaDi5H?7MylSMqPeU0uKspm6%*C&)> zeD3214?_G$(}ulvkFDnualb1fvFo!Hj$PY1*D^r~j$j2XRebB9Gi}%Hv)5fjSUPuc z{?o1~&+gcFZ5N?W+ny2yQt{PWY_TF6?USSlcD<%ttVz-#+9VttzskY!4Q5YINrnT8 z^(m<_l(Q$ypFSj03Nsp$?2X0!S6NI|adzi6)uNJkhj+AwAuhT0pxQpuK)V&+2`MD$ zf)}lAH7Ohxt;*VZiJlgvJsxp|X{SNO6oYEJUZcZWh=M4F5!xrX@bAt-R!U(^3f`fW zs^g?m@=22<1H2g^uy}_z8l{El+YLsT(U9!Q0i&a1m{&Z#{Q+gZ0_#v&%C*-%N1;+y zt0mJbH%Ja%Wc>ASF+4nCbTDA;7R>+bhb(^d9)$w)G!(;T&!1@%* zlB6ujyl3VshKGAB*6X_d)GqCBzMhc+6XQB;TbxJ(;1MLUj+@@?4ZsqBS~$ zQVCiMKUg17V<)!qEu^dvH)BCaIS-kKROebI~?5Ysq8!xR5X)?s<44))eC3ldd31&FPCmCim zg3$=Qzeic*6b~P>e)1R>Km|q_a-iE}70inn!@-cgiqG5H&cet8bV?$( z+qT5ezKx7}Qbju%4ToE=+eL3ZPg5;b$r!_AGGVn^^^UC#l|JWw>Dj1(-p#f6w1%7diU%Z;@TS$?D^KFfP#d z?vkhls+e)-FaC;ZzQSY!_Fn%2tNlX`4v#2{B@Tlg9FQFkaElcvP32)cny_Bya3T5g zW_zxr*KIM3zGE-?9fY`am^IvVp9D&~?fNlx;+$hV9tT*okod*TsLOEO*6O=L=~*vx zzw5H?#9-rd8&>_S{<~J22n{)}9RO`xL(OIt^B6Xq&f(hfdzy+c>bmB(x#&Q0cZ@L? z#@wv6q-nbGdsq0^ziEBWMHgm@qvKzS5&)mhhjaX}eHCHoo2X<-OxoGac~Su+ zu=#D<^k|0)+DFCb>zaSon1|+JHmM^QR99BQauBcGH%3wJ+gFdZnuL5f9SqPqp_s3r zv;;f`$|%y|h$I`LRLa_}F-d~co^&+C491KPk4cY?N%m9n`}g_idq2RQoz{1o9UroH z^(K{bqz4Bae)DVWf9-2L{mGBHGBw=1F=74D9e(;x{~3?&ypNu((R#@KYhPe+{W>Iu z!WAqQGfZi*t2NG6oIQO^s$3}cmol6V(8)vCG4J&f!l-}dKT!bH#~M1_+901^>uQi$ zmXRb0^Z9(^xVT|;H;Jww>bA?d4{gwJLP_G*wJ&QWw2^>)t|sTYFPkWcUG|5~<5h6| zod=0@uib7{RgxtMNs?SBNV^+%Hy(K5T!wB(!;zF7ap!r(`cQH0@tRPIIv}byfH>_I zEyMWCyF#|_dVQ`krDG!F#cQO`%IeOIJZo{qM*5nrfNZ<%c4=*8ahG-6*Kc3?MeIpX z(IrO5%~pfafw&Zsbii;l1*Nh1oGM?Vk+R?bP7#PdYjvydE+7aO`6PgabLkyU{D&H` zXcQK_ZgKyGSdkOO%10Xejz!eWb$24_|4FWq_e8+gMlj9O2)d2CXb#e&5h@f8Or2^x zgGP~zC+Kv5OAKjhNJ@n=3ZEL%@c^GCFdm@BBUTUZ@%XQQ0B5K5JCR-{#h@rudb zA*b)Zk6+EHRu!Ys9!fdPBxQcKW|)moNd|oCXu2?ymoQqky%ddJ?y=reeukA^8;w}5 znx-jLRZ$d0J;pvXT^CT$7f0$^G#9sc9f{LKOCiQr`eH*{dkULXRaIMrMx8OYwU0JO zn|Mw|o6oY0JYQc3Gwlj!8<1)ySU&Bsw=K_IEz0)!G3pk!O-6$O^ZA^zJg<>olzbdP z9NxOE)6!@0$8qoSjO+E|!4uGh_StC(+rB>!@Q4=dd5ft1H;(mL%1s~8sC(V+Z`=Cm z`z$Qd9&AC9rBr$dc@8eeD6wAhz$CyzpO};r+ z4GbhO{B>2$h^kn@x+(_6xgP&oNro7OcIb{bg(IiXnn4iV03M^JR0K>$#?|C zivPd8Gy9PoITQQuiy)atWgUHYtF>c|W@ja>7AwOZ*lWvx;e}x={ODKzg?_dD=;OtZ zv9_SKu(EYnnw4gHrf0gl?#v{UWJLHOB9crdnU&SuAbclZ1nV3Dv4ZWf>|R;ZhB0Mlv~IJex7GHThRxBFp)D4$}t@7~g+_sVmZx zM~r{;4^Th&A75d~&~^K6py80UR+ME$Q`SOS_je_& z;A&sjgoawE;`mUT=nI}>$bP+tPk0f93a+`0+k3#jd03h@p;g& ztH$dvsHvKLkX{L~JwLy%EtQ-oug+4e%V{`|Vp?r@f}@lW^IOC)5T5}-VV&~<)jd=0 zE`zsaiI>KF4{W?E65d2<-oI@XqL&yY(TrzP>T-oy7KjzDi;{)0jAn;e3zhY1%GK2Z zL@^#8phjcrL^C-!rq&5{k@NYdzvghhz^r_@Zg%7g8ln>(e*aHNzyGJC&z^Jk=?fk| zxyOluuYdA0u3mmghVb?8K4tQ~AMorieoS)wfXUGetqkV!oWz*@6m=ZYM89~fgO>y?k%9#A)Sx%W7hC^fY~20BwB zXq8fy1`&-?+h=AM5_K3XRq$+!0gU~7yT7+vkJ?VaDlp=F^4Nj)+n$Gg#G^lCz-~B0 zM3?ctd%NqfuTw^!&9i-dmz`;YK>)yeQzgzj2IzE(Srr&sS+1Q!oVT-VX}{W%R^I{a z@JJd@0F{dl3HadG)-XLmQZ1?5pw{^@8Qim8h)|iihAa&$C%mMNQfNJ5G?`$< zV)C3iU*Ie_5p*KxGyzFK3_6g|M5UzT2~KHWuSjcbnlVaJu0Q|3RIgs}^1AYlA=!xI z557ya$jOe5$UgcQ{rDMYzxX9nosrv$uYUClvelBSmoJ%r`a2GO@O}R1-~3x9kM5IF zQ=eZ@EpryH&&j_!h1Hc$&y|fKNw9g18>J+N6GR}DG1G%%7Rgt8FXi6ENHK)o-U1ri z-dbg)WcwiY!P_A4Hitf&%{V_l-%z`C+Z^hnkNbREh-$F6l?0Vi>%iT&f$sbzq%E=5 zc~{%uy|b&n+}bnGIgG0}92DBW@47a~!FN(PziA^ClwnHg4WGjd49cf{juo=u~4@HR&j0 zwM?+KW-|oE5)=Byb!)%3ZDz4yY{Z*o!uGay&ocz=B8m=gTWP7Et+i`YPvhL9TA3DZ zn6%&v)wOM}af}2fJUhr1hH)lHqJ8FZTT!i+gmPO*nxe-eP@1x?Ns<(6OGJFNR;LB0 z73p-0&L(7u=K9OubNSofL2W&0)ENg4o?@@UgE5&5rwL+(oI*I!|m*POk64aSigi>@ka98zme9-c6-%OI9! z%xmZO^A@%0ZkK^U4X*6%z=1zSj;bsvbqbZ6eVi@ilG_83F3Db-02ob4L_t*k40(vP_jf}&3OV%rwf^FY-SPD~UbF*xV>xp# zwFnBIycXyCi_neFihXo#Kke&%grd3R5pvhnyU*d`wyx@NZTp-e+Jti#5%ofgx7KUX z7@bfV!}#cs#k>fqkpzsxX^jdJxG3>lcaPNDZ@a}ODUF^f(N3{7l{1=OFu}3}M(K#r(+@Fq$-&e6Oh5i}ra$@-7yt98y!wy- z4s&)%-8aVK4KMz~zvspEg1Wc>Yq6S;h*gn}$4IKsq!{hs)Utf>IYw!}7b2)sqb2d~ z-h0A+S-Y*lYgpeiI53sLWkHsf%_@>>8|6)+dbinud$iVTPr7f)HnlIb5#2ud@M3zU zi2R1U=-zQu0U<_PktaZ2YTC5M#w~HqU<~B>iY!f9zi-A%Md-5)_1Tl!f7{T{)?+t0TNMAxAq{*A=fTIW@O0?L63&N*G?rcGqp$hCcsitfoB_3yrS7)_nW?y7eC~C3p!y5K^tlR|V6PWAfEClq<+G4Bml31X2X& zymwrmBg|I+(IMJa-dj(6zHPTf34+qF$F3I(&&B+6zQ5RhcoVeGz^+B%oWa@PHMq6| zHS=CK;Mx}gs35&28R!h91#K(pqHMJVq+=$JpOK756c-j#*4}&WZ4M zWQWJ7dP()>6{j!0^g+hJ@rNH_$QX@}$iDM2$q#=>efkxrKmBj4&M!9v3k@V%Q5j2J zuCQNyPW1V8g!tat;H=LRC=%>s3ZmnogqjyNco90KsEYS&Gx8akZ&uXp8?yi*NKI_; zZ1Wt6wNZ<^t;)GcH8O;_+Bx#Z<1wq%iagH;#}|>!H-qdqq0e==4a4f0;W7{JIv%AI zWm&e2<8eQ>xypu^ILcMYY&Jv1m#w2>iJ!au4eeWOzpEmPHkLkj=m@>UU7ei;TN4f! z#s4Ts=#XxtyQD@+!|3jw2qPp$H%fPjgdn54yAf%SAtECtAcGN7QX3__e1`Wv_dDGC zJm;L>iTM%A5|KM?3P@0d9n;YQ)>h> zvIC;Z9CQc%k;dsUeSpEh2Hc-F43+(TqhG6J*crG0TvdRYU9!_Wc6KCu`V0}5Tob;r zh_737Oc`lvZ*t76@3$JO?~bs}^W176>$T!RzQ4P#?_T|NJCdK>@kvcf zV+oW1q}0v63b)I*x#>9ery?(q&`BGOTyTU0nFq%)GVnX>y z+%8TmyXVNTw4tq|<6m76C(L3vIGe!|To#`BKE$qf@CL|A>WNWCVDU4_WzTU7>JByN zeS83luJvCLJ-hmac;%C100dZT)1jGFk$hH6#_D8ybVuDLQ`;#E{?$3Uoz4isuJ__) zcfg3R2pZMem`A6|LViB!P@z{?R4JO>4)*nCQcei=(Y-@G)xF0=1oj%|x}5&yNxM54 zRA6FkxxUugnvVV{ee~-7KL}-fae-aS?P*M>!H;pV_RS_sGtY!VOb4xnn|?`1g0@*b z^aHe8OYDtJ;p?Tx2VqOjne4^fi2?~FI$g2pM>o;&w;sVKH@^4oTOs5I{QwmgH8-6? ziQu)cZI9$)ey!s#MSVAI2L$I2UA-*F^_Ny=<>C}tKTQHuVGYhqhRy{z`xHoa2Xdal ze(+!dS1ZCGU`h~MLt;fzewC}E#600uxQEqjym%m!MM&&VbNCX=<(PNR%AbW#R#T)YPyJ5Q72rk$YQ>UaO!5h97nG;eO$#H zF6YxMm}pc;%wtq&L|6j=u(0$Bm){TWivHE`n-Hc)`@zf4QuwyLYxl3ma(t_1kiqkQ zXOm{La5!c_zG2)IJ~YhUI1Ez8Ezvvy#t=O=BB760HXa*1*DW@A`4MT}ruR}nCJscb zNZM_RK9t*cXT2K$=W`SCUT|poW|oXQ{OMuMAcY6RLj>B}K8socCoTrR-}Ss`P$2*B z>D=B_d+x6;kGpOvpT%%aV0$<>Sx>?Ar%yV_Y;VQ-&=EdY=;{DfN&Cf5@ zCtZhP2X;>-I(Qn$;rx`LXo@38{+`FOQd|BuWJ?uq8P`|oGyUoO)9uEAFM>Pl4X10Y z^t`C;R;&BjVSyh%a5|aQR0~U6-wxkYrQnuaJLiPBGBxjNX|KeF{i^QW++E2UPhlWGvCns3kiBTp2k6+Qx>tww7>P9-fnSm4kIb5p|FLF~M7MTIDS&VVgw2iFP$bpMehe$AoXUR(#K@s@;zVRI(Ac?2!lY@V zKCb*6uafCbx9*McmZxy>cnbV>s3PBvC|@*JRIgu1M9H9)@`P7Iw<@6=ec@#tX)&WE z%(_6icCRB?u;sbnZVVes=9_4_+;<3lE9*jsKP6yp66=z@f?pt~xPb@OF?60V84}NZ z8k$-UhK9%RJ?jJ7Q)T6Na#Q;lOTWe(Jf2D_IM`6GX<>g=Sdx}HF&H>>dcl`j@@0v$ zHoAs?@Som2o{Fe6P3z^%Z1oh;D>}tYdrjqjYx^k<)>OKx`ep5!CdGF8J6)Hd9kC@C zDKekN#3Z#sKgg9Kl$+F%B~L4vXVX+>=trHuB$aKv=hb_nQClKG}3&*zR4R zC-sV@#P3!C+UxLqxTT~-c1BpR4YoN5;X4(NJz*@yZvoLG9kYJ% zhZ4NOpegB_N4O>A77(N{v;PnEdZ-PYP9S&g(eG; zf~Zcx?)L{#67n}zQ&*2BDKt%J-1IkTN0+70i;Z4>F+=Z-CRP%-h}ZOIsg3z>v8v1i zC*0jL#|Gm3uNs5e#-#&#TR2aa`-^##F0`nM^&YUzZO17S zKq?5{9vdtGI+gL?s#@nj+7{)4$%$tOLl|!*<`dw89L8u_YeTl|YVy|FO|}(fwRD?w zCX|@j1EfdOu2EhS9JscxOvuieB1cXFBHX^bW{q2&2hpz#HLd>JwbHt8?ALu!LV^GG z07^}Pv&VyO#fcBRNS)`d*Vn{%0ky;J)8A#O;jy+CMg+>OaS8ZiID1V3bmA6$XjYCj z8~@RKpJE+LPe&Du1xb-a>2D^a4f`z3H4o*azT3?zQANJ2|*@8FG(H zt2ddW)w0Pp^?>UrCI5C2iH5+MEv;D*Pi}H;8f~Hh`?+tmpD$)AZDKLQ#9o^35d>49 zNeE9ErKm@wRrzB}!3K>M!UWw#Yr2pZaJl$`mLY@yi>U)4$8GQYJ)@Zt!F;u zG)5uORJJwlLf?Qpzn=FZ+71l`zS@f*>s9(5vaQ^4T?*;GS@B9108f?E!??RF)88YD zC!}e{WN41KimdpjI4uS(1K?tRg+%5P-L z3+#&a$qqwNFT`cOND8ec`)P8}anV^(+KTgdUH&E9>&M?RdunZv&M@R!|zWqM}Pu)g<-cO)QU34XWnY z{2&^MuJF;fw(|+}CL@kDOK!Mm?2L{#Mgrdf`;rvfwW?6lqHoSUk=`itwODn(otI*% zEJr>?CMcKqnwbVM+K3S9&SOdVE)J*LNaaRsblez?XBM&RVm<>7uz2U^xSjM3iJO2s zPi=sLjDwQPGhD0(q3N+s^;BnXwI))~c!^~AP*}nN(hqWP*AMYPm3-2`1Efp1@)-=A zN8-{NZwy+!e58xZH(;7dl|+Va+-PED*WC0~s@B4_i~XVna#CJhsccw<9QXVg13LWd z00sBvfUc2{A6kr-il363I>we((q^wFd(Exkk2ITx8XWrW+-vVKYv)IVfrMj0k+Gr! z;fCP}6O@PrN%B^YxOg}t^P{ThgTJ^E(kgslzl$gCl)SX>*?)F7^6E&BNv?CiiwlOI zR6fpk+#SvI^-q?ji83rBRv({`1dH3fK4tI=8oHPk(w8ph7nHn_mmd|FrDbALlxCBm z8Gc}{o`_Gk=5Py?sr9Wiw7(9Pc0Aps-TVNOgYTUK7M9(#P2Tw+#cV$fn(}dCUqFx3 zBHqn#dR-IAz}PmQm6Io;(QtA4NGmQ)f!cz7t#M!fEk0UHdRx{7n#*8PaA;8Hf`^~Y z^OZdRAwdY?`XcB1r1j2jaKtQ0XJ?~FSk4ytg6Iu-QWa{+7QUnw!B{$$S2mawMjB{E zSshQQ7Wail*=F&$VEHw24A15(Ik}}YHTKkLgz!%FE%{&0d6lzInCh5->UJO&G9%B1 zT5Z^TA%5twqR#ORe4ds^`c*6DIiSAD3ueah=(aWk^ z<$um(>gNTB{au0eTznO-vmkv3+Mt2(bWhSh4*4yGz$7-=nvS0RE-z>gmd~b4lk&5- z^7hX5ukZvhdXRc?mU()&5it(MGTHc0`58wz@xUp5YDJMJX^DL_Df>W(;!nW)NMJJX z<>M29Y~H{^-Q)G~>a5@a6~meHA#&dO<_ogsPf;Nt8UqSy>ZMw(12kT_N%H*TDRkn& z@66ElH{SQc3=HY;RC60U{lf5+>TF3Xwu+JESd#9!+-3ZA2h1)q-SLfwYV1bOw~dwG ze_9r|_yRe>x0k&Ip{LKg?^n~LZqPz`gEbk$A?zKj@*z-#uhU9#Sc) zjz9m?(u}ldhO;}iwYc79b~FUtq{iOd2#%ym5Z)K}j97co=!uky)S1D~lax98xXF25 zI18vY|MB|Oi)khw7xv9@9Tkvd>fJ1F+|{3>sqa1e^TBonBNd@13gc`E?WFseHi zU(~3_POj)oPDsNHIYj`1b?*#q0Xrk0?Z(n;Wz(aH-5{`T=?He`>CpR8(WgWyritux zk`N7Tf(BnQ+4I{F!^J`p)rwK_*?|W@f{XK+)Eb`^_8|7f*M|T2m$^rK2>GlO&&!^6 ze10&7FWc*68Lr8RZMCBn_Ws^UE#fYaiOH1h`bKjOFBAD1Z{jn8?V{p^=!?bnoRdev zg79VnP8|hiMhJU#Q(vFL6i0=jzFQmA+)*F#@Vn*81!)F*y|Y{}=P;(>9LH8)keWDJ zxWrg&HSuSt>UMvdbb8M6irGi1!Zw{zmsUP3wXn=MXR)hRqHEtrjIx0@)aCT$^&1h# z>$&H7;kS^N7+ShNi?9zcTNc&5JPLs!^__d(Q1fJI>5U;Sc_$3Dywml$@WldUo^y*> zxh>kdKU}KG>1IggV|rC0o=G$F9H9%4Lqmi~py6Uge@V{AT^jw|LFGFzG&OS)Ny4TS z@p(tplVs!xsjA5=kb03O&r>wDqepv*Uv%x0q?`bti%Jj47;)RTngr8QK3|!Tw)LFv zDxO6isy&potp>5yWHHqmR^+JP2nR=-W~-_Pkb%`l=e#zMYNs2**YvE|wE`jPm+eIA z^};l5)vCVNRnDRScr~Ep807R=_=ri#I(FmYEv(wPGrCR0+&|nTLgmjiT}5yzhb)JlxJ=MRktQJoDby$!s4{3Y)?Y>C zTi^o|jLrsD>u8C;*>%m*Z64Yn>_?01S!YDveg>OP+i!ZQ?0jPZgNl_5`gPQP%=J+H zt!=D#PH*C})zo~CJ5*xKc;Hg8unByxhWpWR_pN50onpUI?PC=SYZS-=E|ejHzvLmQ#S|bz^8g!DC87Z-#+DUdY zO4iB!6@ja%Y+?9$0DLk^ZEjRr5ZG?7BL9JK{`p<>=J73UvfczIt$+B~)%w3DqJv)z zBB|=;3wg-&3FE0M^zL&*?$2RoyLSIL%+8)}!|t-rVE2H3hsfw3R3{M zYpT7w1KL$ZWGRL`e(gkV*_Qy_7!`>A$Nm2fk6K0Gxvzq>yZPtFWJ;)~Ie2w)x LD_PRELOAD=/usr/bin/libmimalloc.so myprogram +``` + +Notable aspects of the design include: + +- __small and consistent__: the library is about 8k LOC using simple and + consistent data structures. This makes it very suitable + to integrate and adapt in other projects. For runtime systems it + provides hooks for a monotonic _heartbeat_ and deferred freeing (for + bounded worst-case times with reference counting). +- __free list sharding__: instead of one big free list (per size class) we have + many smaller lists per "mimalloc page" which reduces fragmentation and + increases locality -- + things that are allocated close in time get allocated close in memory. + (A mimalloc page contains blocks of one size class and is usually 64KiB on a 64-bit system). +- __free list multi-sharding__: the big idea! Not only do we shard the free list + per mimalloc page, but for each page we have multiple free lists. In particular, there + is one list for thread-local `free` operations, and another one for concurrent `free` + operations. Free-ing from another thread can now be a single CAS without needing + sophisticated coordination between threads. Since there will be + thousands of separate free lists, contention is naturally distributed over the heap, + and the chance of contending on a single location will be low -- this is quite + similar to randomized algorithms like skip lists where adding + a random oracle removes the need for a more complex algorithm. +- __eager page reset__: when a "page" becomes empty (with increased chance + due to free list sharding) the memory is marked to the OS as unused ("reset" or "purged") + reducing (real) memory pressure and fragmentation, especially in long running + programs. +- __secure__: _mimalloc_ can be build in secure mode, adding guard pages, + randomized allocation, encrypted free lists, etc. to protect against various + heap vulnerabilities. The performance penalty is only around 5% on average + over our benchmarks. +- __first-class heaps__: efficiently create and use multiple heaps to allocate across different regions. + A heap can be destroyed at once instead of deallocating each object separately. +- __bounded__: it does not suffer from _blowup_ \[1\], has bounded worst-case allocation + times (_wcat_), bounded space overhead (~0.2% meta-data, with low internal fragmentation), + and has no internal points of contention using only atomic operations. +- __fast__: In our benchmarks (see [below](#performance)), + _mimalloc_ outperforms all other leading allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc), + and usually uses less memory (up to 25% more in the worst case). A nice property + is that it does consistently well over a wide range of benchmarks. + +You can read more on the design of _mimalloc_ in the +[technical report](https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action) +which also has detailed benchmark results. + + +Further information: + +- \ref build +- \ref using +- \ref environment +- \ref overrides +- \ref bench +- \ref malloc +- \ref extended +- \ref aligned +- \ref heap +- \ref typed +- \ref analysis +- \ref options +- \ref posix +- \ref cpp + +*/ + + +/// \defgroup malloc Basic Allocation +/// The basic allocation interface. +/// \{ + + +/// Free previously allocated memory. +/// The pointer `p` must have been allocated before (or be \a NULL). +/// @param p pointer to free, or \a NULL. +void mi_free(void* p); + +/// Allocate \a size bytes. +/// @param size number of bytes to allocate. +/// @returns pointer to the allocated memory or \a NULL if out of memory. +/// Returns a unique pointer if called with \a size 0. +void* mi_malloc(size_t size); + +/// Allocate zero-initialized `size` bytes. +/// @param size The size in bytes. +/// @returns Pointer to newly allocated zero initialized memory, +/// or \a NULL if out of memory. +void* mi_zalloc(size_t size); + +/// Allocate zero-initialized \a count elements of \a size bytes. +/// @param count number of elements. +/// @param size size of each element. +/// @returns pointer to the allocated memory +/// of \a size*\a count bytes, or \a NULL if either out of memory +/// or when `count*size` overflows. +/// +/// Returns a unique pointer if called with either \a size or \a count of 0. +/// @see mi_zalloc() +void* mi_calloc(size_t count, size_t size); + +/// Re-allocate memory to \a newsize bytes. +/// @param p pointer to previously allocated memory (or \a NULL). +/// @param newsize the new required size in bytes. +/// @returns pointer to the re-allocated memory +/// of \a newsize bytes, or \a NULL if out of memory. +/// If \a NULL is returned, the pointer \a p is not freed. +/// Otherwise the original pointer is either freed or returned +/// as the reallocated result (in case it fits in-place with the +/// new size). If the pointer \a p is \a NULL, it behaves as +/// \a mi_malloc(\a newsize). If \a newsize is larger than the +/// original \a size allocated for \a p, the bytes after \a size +/// are uninitialized. +void* mi_realloc(void* p, size_t newsize); + +/// Re-allocate memory to \a count elements of \a size bytes, with extra memory initialized to zero. +/// @param p Pointer to a previously allocated block (or \a NULL). +/// @param count The number of elements. +/// @param size The size of each element. +/// @returns A pointer to a re-allocated block of \a count * \a size bytes, or \a NULL +/// if out of memory or if \a count * \a size overflows. +/// +/// If there is no overflow, it behaves exactly like `mi_rezalloc(p,count*size)`. +/// @see mi_reallocn() +/// @see [recallocarray()](http://man.openbsd.org/reallocarray) (on BSD). +void* mi_recalloc(void* p, size_t count, size_t size); + +/// Try to re-allocate memory to \a newsize bytes _in place_. +/// @param p pointer to previously allocated memory (or \a NULL). +/// @param newsize the new required size in bytes. +/// @returns pointer to the re-allocated memory +/// of \a newsize bytes (always equal to \a p), +/// or \a NULL if either out of memory or if +/// the memory could not be expanded in place. +/// If \a NULL is returned, the pointer \a p is not freed. +/// Otherwise the original pointer is returned +/// as the reallocated result since it fits in-place with the +/// new size. If \a newsize is larger than the +/// original \a size allocated for \a p, the bytes after \a size +/// are uninitialized. +void* mi_expand(void* p, size_t newsize); + +/// Allocate \a count elements of \a size bytes. +/// @param count The number of elements. +/// @param size The size of each element. +/// @returns A pointer to a block of \a count * \a size bytes, or \a NULL +/// if out of memory or if \a count * \a size overflows. +/// +/// If there is no overflow, it behaves exactly like `mi_malloc(count*size)`. +/// @see mi_calloc() +/// @see mi_zallocn() +void* mi_mallocn(size_t count, size_t size); + +/// Re-allocate memory to \a count elements of \a size bytes. +/// @param p Pointer to a previously allocated block (or \a NULL). +/// @param count The number of elements. +/// @param size The size of each element. +/// @returns A pointer to a re-allocated block of \a count * \a size bytes, or \a NULL +/// if out of memory or if \a count * \a size overflows. +/// +/// If there is no overflow, it behaves exactly like `mi_realloc(p,count*size)`. +/// @see [reallocarray()]() (on BSD) +void* mi_reallocn(void* p, size_t count, size_t size); + +/// Re-allocate memory to \a newsize bytes, +/// @param p pointer to previously allocated memory (or \a NULL). +/// @param newsize the new required size in bytes. +/// @returns pointer to the re-allocated memory +/// of \a newsize bytes, or \a NULL if out of memory. +/// +/// In contrast to mi_realloc(), if \a NULL is returned, the original pointer +/// \a p is freed (if it was not \a NULL itself). +/// Otherwise the original pointer is either freed or returned +/// as the reallocated result (in case it fits in-place with the +/// new size). If the pointer \a p is \a NULL, it behaves as +/// \a mi_malloc(\a newsize). If \a newsize is larger than the +/// original \a size allocated for \a p, the bytes after \a size +/// are uninitialized. +/// +/// @see [reallocf](https://www.freebsd.org/cgi/man.cgi?query=reallocf) (on BSD) +void* mi_reallocf(void* p, size_t newsize); + + +/// Allocate and duplicate a string. +/// @param s string to duplicate (or \a NULL). +/// @returns a pointer to newly allocated memory initialized +/// to string \a s, or \a NULL if either out of memory or if +/// \a s is \a NULL. +/// +/// Replacement for the standard [strdup()](http://pubs.opengroup.org/onlinepubs/9699919799/functions/strdup.html) +/// such that mi_free() can be used on the returned result. +char* mi_strdup(const char* s); + +/// Allocate and duplicate a string up to \a n bytes. +/// @param s string to duplicate (or \a NULL). +/// @param n maximum number of bytes to copy (excluding the terminating zero). +/// @returns a pointer to newly allocated memory initialized +/// to string \a s up to the first \a n bytes (and always zero terminated), +/// or \a NULL if either out of memory or if \a s is \a NULL. +/// +/// Replacement for the standard [strndup()](http://pubs.opengroup.org/onlinepubs/9699919799/functions/strndup.html) +/// such that mi_free() can be used on the returned result. +char* mi_strndup(const char* s, size_t n); + +/// Resolve a file path name. +/// @param fname File name. +/// @param resolved_name Should be \a NULL (but can also point to a buffer +/// of at least \a PATH_MAX bytes). +/// @returns If successful a pointer to the resolved absolute file name, or +/// \a NULL on failure (with \a errno set to the error code). +/// +/// If \a resolved_name was \a NULL, the returned result should be freed with +/// mi_free(). +/// +/// Replacement for the standard [realpath()](http://pubs.opengroup.org/onlinepubs/9699919799/functions/realpath.html) +/// such that mi_free() can be used on the returned result (if \a resolved_name was \a NULL). +char* mi_realpath(const char* fname, char* resolved_name); + +/// \} + +// ------------------------------------------------------ +// Extended functionality +// ------------------------------------------------------ + +/// \defgroup extended Extended Functions +/// Extended functionality. +/// \{ + +/// Maximum size allowed for small allocations in +/// #mi_malloc_small and #mi_zalloc_small (usually `128*sizeof(void*)` (= 1KB on 64-bit systems)) +#define MI_SMALL_SIZE_MAX (128*sizeof(void*)) + +/// Allocate a small object. +/// @param size The size in bytes, can be at most #MI_SMALL_SIZE_MAX. +/// @returns a pointer to newly allocated memory of at least \a size +/// bytes, or \a NULL if out of memory. +/// This function is meant for use in run-time systems for best +/// performance and does not check if \a size was indeed small -- use +/// with care! +void* mi_malloc_small(size_t size); + +/// Allocate a zero initialized small object. +/// @param size The size in bytes, can be at most #MI_SMALL_SIZE_MAX. +/// @returns a pointer to newly allocated zero-initialized memory of at +/// least \a size bytes, or \a NULL if out of memory. +/// This function is meant for use in run-time systems for best +/// performance and does not check if \a size was indeed small -- use +/// with care! +void* mi_zalloc_small(size_t size); + +/// Return the available bytes in a memory block. +/// @param p Pointer to previously allocated memory (or \a NULL) +/// @returns Returns the available bytes in the memory block, or +/// 0 if \a p was \a NULL. +/// +/// The returned size can be +/// used to call \a mi_expand successfully. +/// The returned size is always at least equal to the +/// allocated size of \a p, and, in the current design, +/// should be less than 16.7% more. +/// +/// @see [_msize](https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/msize?view=vs-2017) (Windows) +/// @see [malloc_usable_size](http://man7.org/linux/man-pages/man3/malloc_usable_size.3.html) (Linux) +/// @see mi_good_size() +size_t mi_usable_size(void* p); + +/// Return the used allocation size. +/// @param size The minimal required size in bytes. +/// @returns the size `n` that will be allocated, where `n >= size`. +/// +/// Generally, `mi_usable_size(mi_malloc(size)) == mi_good_size(size)`. +/// This can be used to reduce internal wasted space when +/// allocating buffers for example. +/// +/// @see mi_usable_size() +size_t mi_good_size(size_t size); + +/// Eagerly free memory. +/// @param force If \a true, aggressively return memory to the OS (can be expensive!) +/// +/// Regular code should not have to call this function. It can be beneficial +/// in very narrow circumstances; in particular, when a long running thread +/// allocates a lot of blocks that are freed by other threads it may improve +/// resource usage by calling this every once in a while. +void mi_collect(bool force); + +/// Deprecated +/// @param out Ignored, outputs to the registered output function or stderr by default. +/// +/// Most detailed when using a debug build. +void mi_stats_print(void* out); + +/// Print the main statistics. +/// @param out An output function or \a NULL for the default. +/// @param arg Optional argument passed to \a out (if not \a NULL) +/// +/// Most detailed when using a debug build. +void mi_stats_print_out(mi_output_fun* out, void* arg); + +/// Reset statistics. +void mi_stats_reset(void); + +/// Merge thread local statistics with the main statistics and reset. +void mi_stats_merge(void); + +/// Initialize mimalloc on a thread. +/// Should not be used as on most systems (pthreads, windows) this is done +/// automatically. +void mi_thread_init(void); + +/// Uninitialize mimalloc on a thread. +/// Should not be used as on most systems (pthreads, windows) this is done +/// automatically. Ensures that any memory that is not freed yet (but will +/// be freed by other threads in the future) is properly handled. +void mi_thread_done(void); + +/// Print out heap statistics for this thread. +/// @param out An output function or \a NULL for the default. +/// @param arg Optional argument passed to \a out (if not \a NULL) +/// +/// Most detailed when using a debug build. +void mi_thread_stats_print_out(mi_output_fun* out, void* arg); + +/// Type of deferred free functions. +/// @param force If \a true all outstanding items should be freed. +/// @param heartbeat A monotonically increasing count. +/// @param arg Argument that was passed at registration to hold extra state. +/// +/// @see mi_register_deferred_free +typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg); + +/// Register a deferred free function. +/// @param deferred_free Address of a deferred free-ing function or \a NULL to unregister. +/// @param arg Argument that will be passed on to the deferred free function. +/// +/// Some runtime systems use deferred free-ing, for example when using +/// reference counting to limit the worst case free time. +/// Such systems can register (re-entrant) deferred free function +/// to free more memory on demand. When the \a force parameter is +/// \a true all possible memory should be freed. +/// The per-thread \a heartbeat parameter is monotonically increasing +/// and guaranteed to be deterministic if the program allocates +/// deterministically. The \a deferred_free function is guaranteed +/// to be called deterministically after some number of allocations +/// (regardless of freeing or available free memory). +/// At most one \a deferred_free function can be active. +void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg); + +/// Type of output functions. +/// @param msg Message to output. +/// @param arg Argument that was passed at registration to hold extra state. +/// +/// @see mi_register_output() +typedef void (mi_output_fun)(const char* msg, void* arg); + +/// Register an output function. +/// @param out The output function, use `NULL` to output to stderr. +/// @param arg Argument that will be passed on to the output function. +/// +/// The `out` function is called to output any information from mimalloc, +/// like verbose or warning messages. +void mi_register_output(mi_output_fun* out, void* arg); + +/// Type of error callback functions. +/// @param err Error code (see mi_register_error() for a complete list). +/// @param arg Argument that was passed at registration to hold extra state. +/// +/// @see mi_register_error() +typedef void (mi_error_fun)(int err, void* arg); + +/// Register an error callback function. +/// @param errfun The error function that is called on an error (use \a NULL for default) +/// @param arg Extra argument that will be passed on to the error function. +/// +/// The \a errfun function is called on an error in mimalloc after emitting +/// an error message (through the output function). It as always legal to just +/// return from the \a errfun function in which case allocation functions generally +/// return \a NULL or ignore the condition. The default function only calls abort() +/// when compiled in secure mode with an \a EFAULT error. The possible error +/// codes are: +/// * \a EAGAIN: Double free was detected (only in debug and secure mode). +/// * \a EFAULT: Corrupted free list or meta-data was detected (only in debug and secure mode). +/// * \a ENOMEM: Not enough memory available to satisfy the request. +/// * \a EOVERFLOW: Too large a request, for example in mi_calloc(), the \a count and \a size parameters are too large. +/// * \a EINVAL: Trying to free or re-allocate an invalid pointer. +void mi_register_error(mi_error_fun* errfun, void* arg); + +/// Is a pointer part of our heap? +/// @param p The pointer to check. +/// @returns \a true if this is a pointer into our heap. +/// This function is relatively fast. +bool mi_is_in_heap_region(const void* p); + +/// Reserve OS memory for use by mimalloc. Reserved areas are used +/// before allocating from the OS again. By reserving a large area upfront, +/// allocation can be more efficient, and can be better managed on systems +/// without `mmap`/`VirtualAlloc` (like WASM for example). +/// @param size The size to reserve. +/// @param commit Commit the memory upfront. +/// @param allow_large Allow large OS pages (2MiB) to be used? +/// @return \a 0 if successful, and an error code otherwise (e.g. `ENOMEM`). +int mi_reserve_os_memory(size_t size, bool commit, bool allow_large); + +/// Manage a particular memory area for use by mimalloc. +/// This is just like `mi_reserve_os_memory` except that the area should already be +/// allocated in some manner and available for use my mimalloc. +/// @param start Start of the memory area +/// @param size The size of the memory area. +/// @param commit Is the area already committed? +/// @param is_large Does it consist of large OS pages? Set this to \a true as well for memory +/// that should not be decommitted or protected (like rdma etc.) +/// @param is_zero Does the area consists of zero's? +/// @param numa_node Possible associated numa node or `-1`. +/// @return \a true if successful, and \a false on error. +bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node); + +/// Reserve \a pages of huge OS pages (1GiB) evenly divided over \a numa_nodes nodes, +/// but stops after at most `timeout_msecs` seconds. +/// @param pages The number of 1GiB pages to reserve. +/// @param numa_nodes The number of nodes do evenly divide the pages over, or 0 for using the actual number of NUMA nodes. +/// @param timeout_msecs Maximum number of milli-seconds to try reserving, or 0 for no timeout. +/// @returns 0 if successful, \a ENOMEM if running out of memory, or \a ETIMEDOUT if timed out. +/// +/// The reserved memory is used by mimalloc to satisfy allocations. +/// May quit before \a timeout_msecs are expired if it estimates it will take more than +/// 1.5 times \a timeout_msecs. The time limit is needed because on some operating systems +/// it can take a long time to reserve contiguous memory if the physical memory is +/// fragmented. +int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs); + +/// Reserve \a pages of huge OS pages (1GiB) at a specific \a numa_node, +/// but stops after at most `timeout_msecs` seconds. +/// @param pages The number of 1GiB pages to reserve. +/// @param numa_node The NUMA node where the memory is reserved (start at 0). +/// @param timeout_msecs Maximum number of milli-seconds to try reserving, or 0 for no timeout. +/// @returns 0 if successful, \a ENOMEM if running out of memory, or \a ETIMEDOUT if timed out. +/// +/// The reserved memory is used by mimalloc to satisfy allocations. +/// May quit before \a timeout_msecs are expired if it estimates it will take more than +/// 1.5 times \a timeout_msecs. The time limit is needed because on some operating systems +/// it can take a long time to reserve contiguous memory if the physical memory is +/// fragmented. +int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs); + + +/// Is the C runtime \a malloc API redirected? +/// @returns \a true if all malloc API calls are redirected to mimalloc. +/// +/// Currently only used on Windows. +bool mi_is_redirected(); + +/// Return process information (time and memory usage). +/// @param elapsed_msecs Optional. Elapsed wall-clock time of the process in milli-seconds. +/// @param user_msecs Optional. User time in milli-seconds (as the sum over all threads). +/// @param system_msecs Optional. System time in milli-seconds. +/// @param current_rss Optional. Current working set size (touched pages). +/// @param peak_rss Optional. Peak working set size (touched pages). +/// @param current_commit Optional. Current committed memory (backed by the page file). +/// @param peak_commit Optional. Peak committed memory (backed by the page file). +/// @param page_faults Optional. Count of hard page faults. +/// +/// The \a current_rss is precise on Windows and MacOSX; other systems estimate +/// this using \a current_commit. The \a commit is precise on Windows but estimated +/// on other systems as the amount of read/write accessible memory reserved by mimalloc. +void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults); + +/// \} + +// ------------------------------------------------------ +// Aligned allocation +// ------------------------------------------------------ + +/// \defgroup aligned Aligned Allocation +/// +/// Allocating aligned memory blocks. +/// +/// \{ + +/// The maximum supported alignment size (currently 1MiB). +#define MI_BLOCK_ALIGNMENT_MAX (1024*1024UL) + +/// Allocate \a size bytes aligned by \a alignment. +/// @param size number of bytes to allocate. +/// @param alignment the minimal alignment of the allocated memory. Must be less than #MI_BLOCK_ALIGNMENT_MAX. +/// @returns pointer to the allocated memory or \a NULL if out of memory. +/// The returned pointer is aligned by \a alignment, i.e. +/// `(uintptr_t)p % alignment == 0`. +/// +/// Returns a unique pointer if called with \a size 0. +/// @see [_aligned_malloc](https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/aligned-malloc?view=vs-2017) (on Windows) +/// @see [aligned_alloc](http://man.openbsd.org/reallocarray) (on BSD, with switched arguments!) +/// @see [posix_memalign](https://linux.die.net/man/3/posix_memalign) (on Posix, with switched arguments!) +/// @see [memalign](https://linux.die.net/man/3/posix_memalign) (on Linux, with switched arguments!) +void* mi_malloc_aligned(size_t size, size_t alignment); +void* mi_zalloc_aligned(size_t size, size_t alignment); +void* mi_calloc_aligned(size_t count, size_t size, size_t alignment); +void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment); + +/// Allocate \a size bytes aligned by \a alignment at a specified \a offset. +/// @param size number of bytes to allocate. +/// @param alignment the minimal alignment of the allocated memory at \a offset. +/// @param offset the offset that should be aligned. +/// @returns pointer to the allocated memory or \a NULL if out of memory. +/// The returned pointer is aligned by \a alignment at \a offset, i.e. +/// `((uintptr_t)p + offset) % alignment == 0`. +/// +/// Returns a unique pointer if called with \a size 0. +/// @see [_aligned_offset_malloc](https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/aligned-offset-malloc?view=vs-2017) (on Windows) +void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset); +void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset); +void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset); +void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset); + +/// \} + +/// \defgroup heap Heap Allocation +/// +/// First-class heaps that can be destroyed in one go. +/// +/// \{ + +/// Type of first-class heaps. +/// A heap can only be used for allocation in +/// the thread that created this heap! Any allocated +/// blocks can be freed or reallocated by any other thread though. +struct mi_heap_s; + +/// Type of first-class heaps. +/// A heap can only be used for (re)allocation in +/// the thread that created this heap! Any allocated +/// blocks can be freed by any other thread though. +typedef struct mi_heap_s mi_heap_t; + +/// Create a new heap that can be used for allocation. +mi_heap_t* mi_heap_new(); + +/// Delete a previously allocated heap. +/// This will release resources and migrate any +/// still allocated blocks in this heap (efficiently) +/// to the default heap. +/// +/// If \a heap is the default heap, the default +/// heap is set to the backing heap. +void mi_heap_delete(mi_heap_t* heap); + +/// Destroy a heap, freeing all its still allocated blocks. +/// Use with care as this will free all blocks still +/// allocated in the heap. However, this can be a very +/// efficient way to free all heap memory in one go. +/// +/// If \a heap is the default heap, the default +/// heap is set to the backing heap. +void mi_heap_destroy(mi_heap_t* heap); + +/// Set the default heap to use for mi_malloc() et al. +/// @param heap The new default heap. +/// @returns The previous default heap. +mi_heap_t* mi_heap_set_default(mi_heap_t* heap); + +/// Get the default heap that is used for mi_malloc() et al. +/// @returns The current default heap. +mi_heap_t* mi_heap_get_default(); + +/// Get the backing heap. +/// The _backing_ heap is the initial default heap for +/// a thread and always available for allocations. +/// It cannot be destroyed or deleted +/// except by exiting the thread. +mi_heap_t* mi_heap_get_backing(); + +/// Release outstanding resources in a specific heap. +void mi_heap_collect(mi_heap_t* heap, bool force); + +/// Allocate in a specific heap. +/// @see mi_malloc() +void* mi_heap_malloc(mi_heap_t* heap, size_t size); + +/// Allocate a small object in a specific heap. +/// \a size must be smaller or equal to MI_SMALL_SIZE_MAX(). +/// @see mi_malloc() +void* mi_heap_malloc_small(mi_heap_t* heap, size_t size); + +/// Allocate zero-initialized in a specific heap. +/// @see mi_zalloc() +void* mi_heap_zalloc(mi_heap_t* heap, size_t size); + +/// Allocate \a count zero-initialized elements in a specific heap. +/// @see mi_calloc() +void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size); + +/// Allocate \a count elements in a specific heap. +/// @see mi_mallocn() +void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size); + +/// Duplicate a string in a specific heap. +/// @see mi_strdup() +char* mi_heap_strdup(mi_heap_t* heap, const char* s); + +/// Duplicate a string of at most length \a n in a specific heap. +/// @see mi_strndup() +char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n); + +/// Resolve a file path name using a specific \a heap to allocate the result. +/// @see mi_realpath() +char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name); + +void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize); +void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size); +void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize); + +void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment); +void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset); +void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment); +void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset); +void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment); +void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset); +void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment); +void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset); + +/// \} + + +/// \defgroup zeroinit Zero initialized re-allocation +/// +/// The zero-initialized re-allocations are only valid on memory that was +/// originally allocated with zero initialization too. +/// e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc. +/// see +/// +/// \{ + +void* mi_rezalloc(void* p, size_t newsize); +void* mi_recalloc(void* p, size_t newcount, size_t size) ; + +void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment); +void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset); +void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment); +void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset); + +void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize); +void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size); + +void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment); +void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset); +void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment); +void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset); + +/// \} + +/// \defgroup typed Typed Macros +/// +/// Typed allocation macros. For example: +/// ``` +/// int* p = mi_malloc_tp(int) +/// ``` +/// +/// \{ + +/// Allocate a block of type \a tp. +/// @param tp The type of the block to allocate. +/// @returns A pointer to an object of type \a tp, or +/// \a NULL if out of memory. +/// +/// **Example:** +/// ``` +/// int* p = mi_malloc_tp(int) +/// ``` +/// +/// @see mi_malloc() +#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp))) + +/// Allocate a zero-initialized block of type \a tp. +#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp))) + +/// Allocate \a count zero-initialized blocks of type \a tp. +#define mi_calloc_tp(tp,count) ((tp*)mi_calloc(count,sizeof(tp))) + +/// Allocate \a count blocks of type \a tp. +#define mi_mallocn_tp(tp,count) ((tp*)mi_mallocn(count,sizeof(tp))) + +/// Re-allocate to \a count blocks of type \a tp. +#define mi_reallocn_tp(p,tp,count) ((tp*)mi_reallocn(p,count,sizeof(tp))) + +/// Allocate a block of type \a tp in a heap \a hp. +#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp))) + +/// Allocate a zero-initialized block of type \a tp in a heap \a hp. +#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp))) + +/// Allocate \a count zero-initialized blocks of type \a tp in a heap \a hp. +#define mi_heap_calloc_tp(hp,tp,count) ((tp*)mi_heap_calloc(hp,count,sizeof(tp))) + +/// Allocate \a count blocks of type \a tp in a heap \a hp. +#define mi_heap_mallocn_tp(hp,tp,count) ((tp*)mi_heap_mallocn(hp,count,sizeof(tp))) + +/// Re-allocate to \a count blocks of type \a tp in a heap \a hp. +#define mi_heap_reallocn_tp(hp,p,tp,count) ((tp*)mi_heap_reallocn(p,count,sizeof(tp))) + +/// Re-allocate to \a count zero initialized blocks of type \a tp in a heap \a hp. +#define mi_heap_recalloc_tp(hp,p,tp,count) ((tp*)mi_heap_recalloc(p,count,sizeof(tp))) + +/// \} + +/// \defgroup analysis Heap Introspection +/// +/// Inspect the heap at runtime. +/// +/// \{ + +/// Does a heap contain a pointer to a previously allocated block? +/// @param heap The heap. +/// @param p Pointer to a previously allocated block (in any heap)-- cannot be some +/// random pointer! +/// @returns \a true if the block pointed to by \a p is in the \a heap. +/// @see mi_heap_check_owned() +bool mi_heap_contains_block(mi_heap_t* heap, const void* p); + +/// Check safely if any pointer is part of a heap. +/// @param heap The heap. +/// @param p Any pointer -- not required to be previously allocated by us. +/// @returns \a true if \a p points to a block in \a heap. +/// +/// Note: expensive function, linear in the pages in the heap. +/// @see mi_heap_contains_block() +/// @see mi_heap_get_default() +bool mi_heap_check_owned(mi_heap_t* heap, const void* p); + +/// Check safely if any pointer is part of the default heap of this thread. +/// @param p Any pointer -- not required to be previously allocated by us. +/// @returns \a true if \a p points to a block in default heap of this thread. +/// +/// Note: expensive function, linear in the pages in the heap. +/// @see mi_heap_contains_block() +/// @see mi_heap_get_default() +bool mi_check_owned(const void* p); + +/// An area of heap space contains blocks of a single size. +/// The bytes in freed blocks are `committed - used`. +typedef struct mi_heap_area_s { + void* blocks; ///< start of the area containing heap blocks + size_t reserved; ///< bytes reserved for this area + size_t committed; ///< current committed bytes of this area + size_t used; ///< bytes in use by allocated blocks + size_t block_size; ///< size in bytes of one block +} mi_heap_area_t; + +/// Visitor function passed to mi_heap_visit_blocks() +/// @returns \a true if ok, \a false to stop visiting (i.e. break) +/// +/// This function is always first called for every \a area +/// with \a block as a \a NULL pointer. If \a visit_all_blocks +/// was \a true, the function is then called for every allocated +/// block in that area. +typedef bool (mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg); + +/// Visit all areas and blocks in a heap. +/// @param heap The heap to visit. +/// @param visit_all_blocks If \a true visits all allocated blocks, otherwise +/// \a visitor is only called for every heap area. +/// @param visitor This function is called for every area in the heap +/// (with \a block as \a NULL). If \a visit_all_blocks is +/// \a true, \a visitor is also called for every allocated +/// block in every area (with `block!=NULL`). +/// return \a false from this function to stop visiting early. +/// @param arg Extra argument passed to \a visitor. +/// @returns \a true if all areas and blocks were visited. +bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg); + +/// \} + +/// \defgroup options Runtime Options +/// +/// Set runtime behavior. +/// +/// \{ + +/// Runtime options. +typedef enum mi_option_e { + // stable options + mi_option_show_errors, ///< Print error messages to `stderr`. + mi_option_show_stats, ///< Print statistics to `stderr` when the program is done. + mi_option_verbose, ///< Print verbose messages to `stderr`. + + // the following options are experimental + mi_option_eager_commit, ///< Eagerly commit segments (4MiB) (enabled by default). + mi_option_large_os_pages, ///< Use large OS pages (2MiB in size) if possible + mi_option_reserve_huge_os_pages, ///< The number of huge OS pages (1GiB in size) to reserve at the start of the program. + mi_option_reserve_huge_os_pages_at, ///< Reserve huge OS pages at node N. + mi_option_reserve_os_memory, ///< Reserve specified amount of OS memory at startup, e.g. "1g" or "512m". + mi_option_segment_cache, ///< The number of segments per thread to keep cached (0). + mi_option_page_reset, ///< Reset page memory after \a mi_option_reset_delay milliseconds when it becomes free. + mi_option_abandoned_page_reset, //< Reset free page memory when a thread terminates. + mi_option_use_numa_nodes, ///< Pretend there are at most N NUMA nodes; Use 0 to use the actual detected NUMA nodes at runtime. + mi_option_eager_commit_delay, ///< the first N segments per thread are not eagerly committed (=1). + mi_option_os_tag, ///< OS tag to assign to mimalloc'd memory + mi_option_limit_os_alloc, ///< If set to 1, do not use OS memory for allocation (but only pre-reserved arenas) + + // v1.x specific options + mi_option_eager_region_commit, ///< Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows) + mi_option_segment_reset, ///< Experimental + mi_option_reset_delay, ///< Delay in milli-seconds before resetting a page (100ms by default) + mi_option_purge_decommits, ///< Experimental + + // v2.x specific options + mi_option_allow_purge, ///< Enable decommitting memory (=on) + mi_option_purge_delay, ///< Decommit page memory after N milli-seconds delay (25ms). + mi_option_segment_purge_delay, ///< Decommit large segment memory after N milli-seconds delay (500ms). + + _mi_option_last +} mi_option_t; + + +bool mi_option_is_enabled(mi_option_t option); +void mi_option_enable(mi_option_t option); +void mi_option_disable(mi_option_t option); +void mi_option_set_enabled(mi_option_t option, bool enable); +void mi_option_set_enabled_default(mi_option_t option, bool enable); + +long mi_option_get(mi_option_t option); +void mi_option_set(mi_option_t option, long value); +void mi_option_set_default(mi_option_t option, long value); + + +/// \} + +/// \defgroup posix Posix +/// +/// `mi_` prefixed implementations of various Posix, Unix, and C++ allocation functions. +/// Defined for convenience as all redirect to the regular mimalloc API. +/// +/// \{ + +void* mi_recalloc(void* p, size_t count, size_t size); +size_t mi_malloc_size(const void* p); +size_t mi_malloc_usable_size(const void *p); + +/// Just as `free` but also checks if the pointer `p` belongs to our heap. +void mi_cfree(void* p); + +int mi_posix_memalign(void** p, size_t alignment, size_t size); +int mi__posix_memalign(void** p, size_t alignment, size_t size); +void* mi_memalign(size_t alignment, size_t size); +void* mi_valloc(size_t size); + +void* mi_pvalloc(size_t size); +void* mi_aligned_alloc(size_t alignment, size_t size); + +/// Correspond s to [reallocarray](https://www.freebsd.org/cgi/man.cgi?query=reallocarray&sektion=3&manpath=freebsd-release-ports) +/// in FreeBSD. +void* mi_reallocarray(void* p, size_t count, size_t size); + +/// Corresponds to [reallocarr](https://man.netbsd.org/reallocarr.3) in NetBSD. +int mi_reallocarr(void* p, size_t count, size_t size); + +void mi_free_size(void* p, size_t size); +void mi_free_size_aligned(void* p, size_t size, size_t alignment); +void mi_free_aligned(void* p, size_t alignment); + +/// \} + +/// \defgroup cpp C++ wrappers +/// +/// `mi_` prefixed implementations of various allocation functions +/// that use C++ semantics on out-of-memory, generally calling +/// `std::get_new_handler` and raising a `std::bad_alloc` exception on failure. +/// +/// Note: use the `mimalloc-new-delete.h` header to override the \a new +/// and \a delete operators globally. The wrappers here are mostly +/// for convenience for library writers that need to interface with +/// mimalloc from C++. +/// +/// \{ + +/// like mi_malloc(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. +void* mi_new(std::size_t n) noexcept(false); + +/// like mi_mallocn(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. +void* mi_new_n(size_t count, size_t size) noexcept(false); + +/// like mi_malloc_aligned(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. +void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false); + +/// like `mi_malloc`, but when out of memory, use `std::get_new_handler` but return \a NULL on failure. +void* mi_new_nothrow(size_t n); + +/// like `mi_malloc_aligned`, but when out of memory, use `std::get_new_handler` but return \a NULL on failure. +void* mi_new_aligned_nothrow(size_t n, size_t alignment); + +/// like mi_realloc(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. +void* mi_new_realloc(void* p, size_t newsize); + +/// like mi_reallocn(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. +void* mi_new_reallocn(void* p, size_t newcount, size_t size); + +/// \a std::allocator implementation for mimalloc for use in STL containers. +/// For example: +/// ``` +/// std::vector > vec; +/// vec.push_back(1); +/// vec.pop_back(); +/// ``` +template struct mi_stl_allocator { } + +/// \} + +/*! \page build Building + +Checkout the sources from GitHub: +``` +git clone https://github.com/microsoft/mimalloc +``` + +## Windows + +Open `ide/vs2019/mimalloc.sln` in Visual Studio 2019 and build (or `ide/vs2017/mimalloc.sln`). +The `mimalloc` project builds a static library (in `out/msvc-x64`), while the +`mimalloc-override` project builds a DLL for overriding malloc +in the entire program. + +## macOS, Linux, BSD, etc. + +We use [`cmake`](https://cmake.org)1 as the build system: + +``` +> mkdir -p out/release +> cd out/release +> cmake ../.. +> make +``` +This builds the library as a shared (dynamic) +library (`.so` or `.dylib`), a static library (`.a`), and +as a single object file (`.o`). + +`> sudo make install` (install the library and header files in `/usr/local/lib` and `/usr/local/include`) + +You can build the debug version which does many internal checks and +maintains detailed statistics as: + +``` +> mkdir -p out/debug +> cd out/debug +> cmake -DCMAKE_BUILD_TYPE=Debug ../.. +> make +``` +This will name the shared library as `libmimalloc-debug.so`. + +Finally, you can build a _secure_ version that uses guard pages, encrypted +free lists, etc, as: +``` +> mkdir -p out/secure +> cd out/secure +> cmake -DMI_SECURE=ON ../.. +> make +``` +This will name the shared library as `libmimalloc-secure.so`. +Use `ccmake`2 instead of `cmake` +to see and customize all the available build options. + +Notes: +1. Install CMake: `sudo apt-get install cmake` +2. Install CCMake: `sudo apt-get install cmake-curses-gui` + +*/ + +/*! \page using Using the library + +### Build + +The preferred usage is including ``, linking with +the shared- or static library, and using the `mi_malloc` API exclusively for allocation. For example, +``` +gcc -o myprogram -lmimalloc myfile.c +``` + +mimalloc uses only safe OS calls (`mmap` and `VirtualAlloc`) and can co-exist +with other allocators linked to the same program. +If you use `cmake`, you can simply use: +``` +find_package(mimalloc 1.0 REQUIRED) +``` +in your `CMakeLists.txt` to find a locally installed mimalloc. Then use either: +``` +target_link_libraries(myapp PUBLIC mimalloc) +``` +to link with the shared (dynamic) library, or: +``` +target_link_libraries(myapp PUBLIC mimalloc-static) +``` +to link with the static library. See `test\CMakeLists.txt` for an example. + +### C++ +For best performance in C++ programs, it is also recommended to override the +global `new` and `delete` operators. For convience, mimalloc provides +[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. + +In C++, mimalloc also provides the `mi_stl_allocator` struct which implements the `std::allocator` +interface. For example: +``` +std::vector> vec; +vec.push_back(some_struct()); +``` + +### Statistics + +You can pass environment variables to print verbose messages (`MIMALLOC_VERBOSE=1`) +and statistics (`MIMALLOC_SHOW_STATS=1`) (in the debug version): +``` +> env MIMALLOC_SHOW_STATS=1 ./cfrac 175451865205073170563711388363 + +175451865205073170563711388363 = 374456281610909315237213 * 468551 + +heap stats: peak total freed unit +normal 2: 16.4 kb 17.5 mb 17.5 mb 16 b ok +normal 3: 16.3 kb 15.2 mb 15.2 mb 24 b ok +normal 4: 64 b 4.6 kb 4.6 kb 32 b ok +normal 5: 80 b 118.4 kb 118.4 kb 40 b ok +normal 6: 48 b 48 b 48 b 48 b ok +normal 17: 960 b 960 b 960 b 320 b ok + +heap stats: peak total freed unit + normal: 33.9 kb 32.8 mb 32.8 mb 1 b ok + huge: 0 b 0 b 0 b 1 b ok + total: 33.9 kb 32.8 mb 32.8 mb 1 b ok +malloc requested: 32.8 mb + + committed: 58.2 kb 58.2 kb 58.2 kb 1 b ok + reserved: 2.0 mb 2.0 mb 2.0 mb 1 b ok + reset: 0 b 0 b 0 b 1 b ok + segments: 1 1 1 +-abandoned: 0 + pages: 6 6 6 +-abandoned: 0 + mmaps: 3 + mmap fast: 0 + mmap slow: 1 + threads: 0 + elapsed: 2.022s + process: user: 1.781s, system: 0.016s, faults: 756, reclaims: 0, rss: 2.7 mb +``` + +The above model of using the `mi_` prefixed API is not always possible +though in existing programs that already use the standard malloc interface, +and another option is to override the standard malloc interface +completely and redirect all calls to the _mimalloc_ library instead. + +See \ref overrides for more info. + +*/ + +/*! \page environment Environment Options + +You can set further options either programmatically (using [`mi_option_set`](https://microsoft.github.io/mimalloc/group__options.html)), +or via environment variables. + +- `MIMALLOC_SHOW_STATS=1`: show statistics when the program terminates. +- `MIMALLOC_VERBOSE=1`: show verbose messages. +- `MIMALLOC_SHOW_ERRORS=1`: show error and warning messages. +- `MIMALLOC_PAGE_RESET=0`: by default, mimalloc will reset (or purge) OS pages when not in use to signal to the OS + that the underlying physical memory can be reused. This can reduce memory fragmentation in long running (server) + programs. By setting it to `0` no such page resets will be done which can improve performance for programs that are not long + running. As an alternative, the `MIMALLOC_DECOMMIT_DELAY=` can be set higher (100ms by default) to make the page + reset occur less frequently instead of turning it off completely. +- `MIMALLOC_LARGE_OS_PAGES=1`: use large OS pages (2MiB) when available; for some workloads this can significantly + improve performance. Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs + to explicitly allow large OS pages (as on [Windows][windows-huge] and [Linux][linux-huge]). However, sometimes + the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that + can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead when possible). +- `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where N is the number of 1GiB _huge_ OS pages. This reserves the huge pages at + startup and sometimes this can give a large (latency) performance improvement on big workloads. + Usually it is better to not use + `MIMALLOC_LARGE_OS_PAGES` in combination with this setting. Just like large OS pages, use with care as reserving + contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at + startup only once). + Note that we usually need to explicitly enable huge OS pages (as on [Windows][windows-huge] and [Linux][linux-huge])). With huge OS pages, it may be beneficial to set the setting + `MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB) + of a thread to not allocate in the huge OS pages; this prevents threads that are short lived + and allocate just a little to take up space in the huge OS page area (which cannot be reset). +- `MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N`: where N is the numa node. This reserves the huge pages at a specific numa node. + (`N` is -1 by default to reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)) + +Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write +for all pages in the original process including the huge OS pages. When any memory is now written in that area, the +OS will copy the entire 1GiB huge page (or 2MiB large page) which can cause the memory usage to grow in big increments. + +[linux-huge]: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/tuning_and_optimizing_red_hat_enterprise_linux_for_oracle_9i_and_10g_databases/sect-oracle_9i_and_10g_tuning_guide-large_memory_optimization_big_pages_and_huge_pages-configuring_huge_pages_in_red_hat_enterprise_linux_4_or_5 +[windows-huge]: https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/enable-the-lock-pages-in-memory-option-windows?view=sql-server-2017 + +*/ + +/*! \page overrides Overriding Malloc + +Overriding the standard `malloc` can be done either _dynamically_ or _statically_. + +## Dynamic override + +This is the recommended way to override the standard malloc interface. + + +### Linux, BSD + +On these systems we preload the mimalloc shared +library so all calls to the standard `malloc` interface are +resolved to the _mimalloc_ library. + +- `env LD_PRELOAD=/usr/lib/libmimalloc.so myprogram` + +You can set extra environment variables to check that mimalloc is running, +like: +``` +env MIMALLOC_VERBOSE=1 LD_PRELOAD=/usr/lib/libmimalloc.so myprogram +``` +or run with the debug version to get detailed statistics: +``` +env MIMALLOC_SHOW_STATS=1 LD_PRELOAD=/usr/lib/libmimalloc-debug.so myprogram +``` + +### MacOS + +On macOS we can also preload the mimalloc shared +library so all calls to the standard `malloc` interface are +resolved to the _mimalloc_ library. + +- `env DYLD_FORCE_FLAT_NAMESPACE=1 DYLD_INSERT_LIBRARIES=/usr/lib/libmimalloc.dylib myprogram` + +Note that certain security restrictions may apply when doing this from +the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-ignored-when-calling-application-through-bash). + +(Note: macOS support for dynamic overriding is recent, please report any issues.) + + +### Windows + +Overriding on Windows is robust and has the +particular advantage to be able to redirect all malloc/free calls that go through +the (dynamic) C runtime allocator, including those from other DLL's or libraries. + +The overriding on Windows requires that you link your program explicitly with +the mimalloc DLL and use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). +Also, the `mimalloc-redirect.dll` (or `mimalloc-redirect32.dll`) must be available +in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency). +The redirection DLL ensures that all calls to the C runtime malloc API get redirected to +mimalloc (in `mimalloc-override.dll`). + +To ensure the mimalloc DLL is loaded at run-time it is easiest to insert some +call to the mimalloc API in the `main` function, like `mi_version()` +(or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project +for an example on how to use this. For best performance on Windows with C++, it +is also recommended to also override the `new`/`delete` operations (by including +[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) a single(!) source file in your project). + +The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic +overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully redirected. + +(Note: in principle, it is possible to even patch existing executables without any recompilation +if they are linked with the dynamic C runtime (`ucrtbase.dll`) -- just put the `mimalloc-override.dll` +into the import table (and put `mimalloc-redirect.dll` in the same folder) +Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388)). + + +## Static override + +On Unix systems, you can also statically link with _mimalloc_ to override the standard +malloc interface. The recommended way is to link the final program with the +_mimalloc_ single object file (`mimalloc-override.o`). We use +an object file instead of a library file as linkers give preference to +that over archives to resolve symbols. To ensure that the standard +malloc interface resolves to the _mimalloc_ library, link it as the first +object file. For example: + +``` +gcc -o myprogram mimalloc-override.o myfile1.c ... +``` + +## List of Overrides: + +The specific functions that get redirected to the _mimalloc_ library are: + +``` +// C +void* malloc(size_t size); +void* calloc(size_t size, size_t n); +void* realloc(void* p, size_t newsize); +void free(void* p); + +void* aligned_alloc(size_t alignment, size_t size); +char* strdup(const char* s); +char* strndup(const char* s, size_t n); +char* realpath(const char* fname, char* resolved_name); + + +// C++ +void operator delete(void* p); +void operator delete[](void* p); + +void* operator new(std::size_t n) noexcept(false); +void* operator new[](std::size_t n) noexcept(false); +void* operator new( std::size_t n, std::align_val_t align) noexcept(false); +void* operator new[]( std::size_t n, std::align_val_t align) noexcept(false); + +void* operator new ( std::size_t count, const std::nothrow_t& tag); +void* operator new[]( std::size_t count, const std::nothrow_t& tag); +void* operator new ( std::size_t count, std::align_val_t al, const std::nothrow_t&); +void* operator new[]( std::size_t count, std::align_val_t al, const std::nothrow_t&); + +// Posix +int posix_memalign(void** p, size_t alignment, size_t size); + +// Linux +void* memalign(size_t alignment, size_t size); +void* valloc(size_t size); +void* pvalloc(size_t size); +size_t malloc_usable_size(void *p); +void* reallocf(void* p, size_t newsize); + +// macOS +void vfree(void* p); +size_t malloc_size(const void* p); +size_t malloc_good_size(size_t size); + +// BSD +void* reallocarray( void* p, size_t count, size_t size ); +void* reallocf(void* p, size_t newsize); +void cfree(void* p); + +// NetBSD +int reallocarr(void* p, size_t count, size_t size); + +// Windows +void* _expand(void* p, size_t newsize); +size_t _msize(void* p); + +void* _malloc_dbg(size_t size, int block_type, const char* fname, int line); +void* _realloc_dbg(void* p, size_t newsize, int block_type, const char* fname, int line); +void* _calloc_dbg(size_t count, size_t size, int block_type, const char* fname, int line); +void* _expand_dbg(void* p, size_t size, int block_type, const char* fname, int line); +size_t _msize_dbg(void* p, int block_type); +void _free_dbg(void* p, int block_type); +``` + +*/ + +/*! \page bench Performance + +We tested _mimalloc_ against many other top allocators over a wide +range of benchmarks, ranging from various real world programs to +synthetic benchmarks that see how the allocator behaves under more +extreme circumstances. + +In our benchmarks, _mimalloc_ always outperforms all other leading +allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc) (Jan 2021), +and usually uses less memory (up to 25% more in the worst case). +A nice property is that it does *consistently* well over the wide +range of benchmarks. + +See the [Performance](https://github.com/microsoft/mimalloc#Performance) +section in the _mimalloc_ repository for benchmark results, +or the the technical report for detailed benchmark results. + +*/ diff --git a/yass/third_party/mimalloc/doc/mimalloc-doxygen.css b/yass/third_party/mimalloc/doc/mimalloc-doxygen.css new file mode 100644 index 0000000000..b24f564326 --- /dev/null +++ b/yass/third_party/mimalloc/doc/mimalloc-doxygen.css @@ -0,0 +1,49 @@ +#projectlogo img { + padding: 1ex; +} +tt, code, kbd, samp, div.memproto, div.fragment, div.line, table.memname { + font-family: Consolas, Monaco, Inconsolata, "Courier New", monospace; +} +.image img, .textblock img { + max-width: 99%; + max-height: 350px; +} +table.memname, .memname{ + font-weight: bold; +} +code { + background-color: #EEE; + padding: 0ex 0.25ex; +} +body { + margin: 1ex 1ex 0ex 1ex; + border: 1px solid black; +} +.contents table, .contents div, .contents p, .contents dl { + font-size: 16px; + line-height: 1.44; +} +body #nav-tree .label { + font-size: 14px; +} +a{ + text-decoration: underline; +} +#side-nav { + margin-left: 1ex; + border-left: 1px solid black; +} +#nav-tree { + padding-left: 1ex; +} +#nav-path { + display: none; +} +div.fragment { + background-color: #EEE; + padding: 0.25ex 0.5ex; + border-color: black; +} +#nav-sync img { + display: none; +} diff --git a/yass/third_party/mimalloc/doc/mimalloc-logo-100.png b/yass/third_party/mimalloc/doc/mimalloc-logo-100.png new file mode 100644 index 0000000000000000000000000000000000000000..96f08259d182513d88708361f3026a76ba074a7c GIT binary patch literal 3532 zcmd^>_dDAQ7r?cI*h1{2;w!O3?Y)x-VnwJCiB+qFM$D>O6^R<5N>!~|MN6$#*Hz;R zI&QUfb$*4g@aB+m&%Ik-FU@!cpD-3tZr@$vI{5lC2GobM1%4j(#v_%Oe~VF4Z>enCM2 z0Ra%t3V{W{V34qgpoplTsF5dlj`iiwFyNJxMsrNEL9VJRNcBGNJ< zGEh;dtSD4YR90S0R$g3QQCvYuLQz>l5hkGolY||SgsDhNOUug2%E`$|!Bi9!6qJ>f zVKA7Aii#8*A$0@+Q2`(bHEC6(jG8(WsR2c5%BpM2Y3j&n>dI^DDd^}cq79Vvjg<6_ zm5v%K8=Arl&EUosa1%?ospSzf>m%kiDi*c~1Ofm6Boc{2q14sYwY0RfwY7D0baZuf z_4M=*mUadP28M=)=H})W78aJ4mNqsvwzjr*c6JC$dxVvPsx=Qsz}5+{#i-e1)f{k0 z2WO-s4?GIvg2K3};|Lng9vXNLO}wX;tGAXLQJdhS?M~A6AnADe>3AK}{SSXMF#zoo zs7DIY^Cj#11?&4!jvk{P^$#%!3^OE07?O`0Q6i10QO4A0FMq5O(YUYB$BVMub-cvzrTM#KtNz%AcaB+2?+@g508k5h>D7eiHV7g zjg5%<~pY3Ii}}3 zX5>3%oOL=?fXQTHP8VXcSlBacTy_yI`pIqO-JeHePe4;oz?I&B=H9@T{-D-@pteCWkM^77tG9wXhJrh9Q@ZX@y6#f1 z-=ki?AJTn4r00*2p5f5m2cf+qVf~|F{bS(+x?mhg(M|(3XEJ@06(|0f=)9yHp51_-E>EAVU(P=)B-?b<*mc@<6lAzz zIwDPIZ@D91g!G;=m6NNvki)K4YS+XA1f^>^Pub_LGE5LsZxr6s@3hh0uKmb-JoGjx z>)>ZGFmi>L-1n54?rS;~GP!hKs@5s@J$C?VP569==nQ1ZpF}r6=PR2rbxnHTo$^0ADj#0} zO_TAhc=u{8$ZYq!TW``X5)+um)$hBetPIWp$9yJG0i}MH%39l2J%m0Ze94ohe)I+~ zB$}e8Yqsw?S=@vCrcog@5YKT~8qhyc@&-tn_>zaQsk9OwSKTnTaIUu}Tt^P#E?5aR z&7nw%CdZiIV!=N>@|O-}{p0arMR!ACC=y}+blh`z_fZjTcNRV^S>|xJPt`hgH5fe+ z`iqD~)i-SD5}RQ;_m-=qU*H-BC*m&UV;w^*x5ulBLzxN+tLl;!XtA_0dh~m z#Cqzt=OYdTq6y;J$3(aYRB(Zc2L@MfP@;#S~mSF90OJ!0|6#5`+Ob$I2OW@zJftf>`x14ZoI zD}Xj;s|<^7prjim+?Jx&b2C}SL?&u6u;!y%gmSV!a3)87$!#flr3cd=;K9-7n6-~e zyST4m?qe33OC_bK)ovnAR~;(?XYzRYUl;A6dw?kXVj(^_8rOTB~q z5mSZy+m@f^L;)Aimd%GtDUO$uK{4> zcs2|@$oO|;H2TC}3r?&3E!=^Qmv*oF+6L8H)5pH6o&MYyw3{yd?#Hm)W<*F^b$HF& z;d@+rBUy?Ykm*Bq6SfN+*L_xt+6Nn6{i$Q;&j4ispl__R%U-(g2>sv}iw&Ug- zn{C%tHHTh$w5thCI=VNXZrC7{y?#0dd!cj=&UrLZS@os!fPnJ3bFmVblM6zQeK>XJ zx=d=P-AZk+O)&^4m;Sup4o>}cjdZC^5E8dWiOW1ZnO!QnEXBm^v_)Gzb5*^mJ~3wh zlYHRtCa%W8=^Ntd0FKqIYHNjGgLKM4#{TY9`hz zy=nzW8dLvp2}ohE`+TQC38p(%0!3YFPj&Z+MPAU$8wLi` z>Zba@kIl`15wx_5m*(3f3up}Rw(XrEMX!mK1M1jbE0OPKi7fWZ08c8qA1YlY19TND zqez^cBQPU2`CT>8Ylf!2GT!E%RM*bBlpwL(7H=i_<7UYg84gVT;A%0&M$q)7@WrpJ zjd^zUx3H#|1#ODqBh=z~&tPrA>0vboZ`&Jq-H+s4YKrBLGJjw%ThaW&t)#fByInHR zIUbwDb42!%WE4S373u{6kN6 zk6%&bMoAm$}QU@g45^o;QDmdR2*~9cyqmR4gsCT?&PupcP&HX?wBN1qq zzjV-8eKu(9zJ3nq7+X;3oBY1sZ;R2Ah_2ITrXL2dl&0@ydc0Z#K`R!nO*pqv^`8$Oug{q*L1M2iA4Ox2`X1%0>3y+MgFWlEepk^C;#pp zEcqQgQJT8C79HeA4K86`7T)|NQV$g44S%*fsl#*LBE~EFfyt$3PF^UqQ&v%;ieVw1 z#voTN2z7x?@Q0{{TTehi3o) literal 0 HcmV?d00001 diff --git a/yass/third_party/mimalloc/doc/mimalloc-logo.png b/yass/third_party/mimalloc/doc/mimalloc-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..e0a5a8ce258c29f6dfb5d0658880a2d339b5a47b GIT binary patch literal 73097 zcmd>Fg@Uww6wHz*8(D?0!qVzgdn9fQj3B}H`0i}(nw0b z_xb%B@8)M`&iS60GiT?_y%Vpmt3gK0NDKe~nU>}QLjZt4|9eC6z!o+g`#SIo-%&+J z1psPONUm%M000lrfAmBRYzJ^~!6yzbE)E_(E)qPItdz{qwIjQb*Q*-dpaPiY{^U?AM(D4h=^9s`Q3(@fj z)A0+_^9j=nh%g9=GYE?@ibybvN-~Q}F^NhuiAyt!Ni$2xut>_YO3SfIE5PA!4h{|; z9v%S!0d^Th5fKpy2?-e)83hFeHd$qMS!K99;;!O@yUObKRn+gPXx>L?ay-!HRDZ;& zsmraY%cZ5yt*y`V$dFgZh+of`Pv4kdAN(}sH+U-W*j(TVQqagk$ka;M*izWkQpm(w z$kbZc#9H{Njfk0@h`Ft(g}u0?gP4_*xTTYXwWFA|lemqu2cPR%?Y4GLb zDf`?<*40rfK;At-!81tlMTnA5n2L(Zg9i`P)z!7Mv~+ZI^!4=(4GoQqj7&{U zkw~P4g@v+rxUx@}wYBxLXV2{I?VX*SU0q$>-QB&synK9o{QUd^0|P@sLc+qrlzbzU z{UTHXq7Xqbsv)rtf@2?q#Hxn8d=UKdL1?^sc%oWFf_h|<1}a4}Dp?biq7nUC1NB-n z`n6`v8?CrB?U$+A@#&9Vy?d1S{$WDKqgNR^NtwFIIl8ZN_21^{y)DpxTVRk@X!!1f zVMg(j%#z2MWlyrojB?72^FEp8SDF-5nHE%;pleJEYo31ij4ZA*FKsX@YcMZsF#FhK z{;}EOQ;TI~n^jGRbyd4{O}lkbjrRf46Jsv1|Nc*VJp*)aTGT;PAEI z;p?DN+mKVoh;!#?WMm`?g^G=hjgOB{N=kbD`t{qlZ`0G$Gcq!=va)h>bJ1vYadB~3 zSy_2`d1Yl~ZEbCReSLFtb6ZOTmCue46=I7^^mzTlV zr~c7=ADjVzkD>14+yCDW;PDxR19S3!5peI9!_`1sqr zum?PCoZWo{-R*tY#RWwKCG0dz!Hje1r}aSPNr2_fb+EblM()OCAwSLNqMA)i0@G`| zkCg6Gwz6%Gyh)`gqbhXUV~7tp&6K<1wzAh*oH{+h!mRK>{I>$XKKr^pY;gUR3K zoA{+vYh=e-9M>7oY*zV`MY-meyXl6kCo+c|e$PtfHgzw2hJS6?(H*3ID}IGE8a}$3 z`Z9IVerxdTRSSRlRO!@Opm9$%sJuKW93KGmRG~OP=Qs=j00-hInDWQ)f7$sVhbv6g z?N?*o!}7k%oecunQXY$SUZDe9=_36}i5WXevGbi zxrbeCZYY%o;{p;OqI1~!uefTK;JxLkqzV%UT>@Rvd%=?zgOEI#!=FrSw*m@xftR5C zv9o@*Tq&E}VE4GK34C2vi=@XQJ+g_HS;{IOI>gc=+LB>am@Nnu-T zQU8vk#F^>Rz#O!8Z%3BE6dwVt84*&F_jBrh4mtBc?~pQR6DFPV;aQvp#e8td{ zUxR!+EQ0kf#>IMT$EmGpQ84SDAeede!ln?feYMg{R@_Ib(5^Yf@YNs2{=0ZOI;XMG zy^1#s0;GTk0@_uf7`!_YL_z0SHE;s4x+D^y2dY(|UBQZSH+89c_(r{^*Arf(s`&e0 z<3mM-2Df_ESlroO)B4?+6z52s02~B#@t`ju+vIK}kFYVD^vz>r0I(Mei`oti2`DNZ zf~4~VcZ;#zCTY|z?lP@Eq4!IV zl7pj`W%Csqaas{HPutgKmPVctdXk}F^M6XKt>yIF*32xsVj{bAK;HQLCHa?cC_@q_ z^_IRp6<@vsg*SCN3jO}7FK6c-u$O8zwghxGgMV!(g+5XWbfBq3sZdqq-C)_U)p*O?P`LEaQtr&F$Ep(PZM%5P;`qIw zD-3S-j1p5uEKb)=yhdQm;BfT*JMWhk>fsTUYI_te0bGM<(D1WE!W_C);hqcI<7vDR zCIoc$@;j4R4F^{LSN2jt$K8l_7-D2jkD~H}FR%2(As*W}8MKm53faAL{fBz`L`gu$3;tSpkIUHXgjRoWx8>|7VKPOG0HC6fA(b$5Tbq;B%R7X-70o(m%%0hNB9|dq|@A znUcq|iIo*mFzP@uv9ib~+oK45d^kSM;>@#=SrTPNTaqeQ3SM>;`;rYQJ-(V~{tabY zKkz{Xs`A-ZT7fVPLCxfr z?XfYwCKUpDH_QiMineYY5MUc)y2H)_g@4$muj0eWGWAVcOanfuLg6|y^!Sx_?$<9D zvj83h6fRJ0Z42|UU3P+hfWT1`p;1XHBacswa~=@?M{QJXEow0GzINR{s0i0I28Pg9 z3}j@AE@<9yo8d>Lv7=r`t%87DKjtHN9N7t=aMRNZr7M!kGDUHwa zxuuD9b8sqPh??Kuf{@LCH01Pklmrxh|LpE*e3TeNg~%%D{WuunjuJ-Ez*87+<2Aw% z3g_SYMu^M@q>a{lq_DfBFggS@q0?xiizZQ`dc=&8og4+ba~{A1BDnA?{j&|S#Tx)I zLfG6zoyc&PiGP`S1`@xdfTIE|yFH0K^9Pcu$C;R|B~dV1jKsUnV$taib&nSjaxg>& zh2xKi1cr*_E3&q694G?n9dbcE^cv^A+4+DTc9&e3432s#jOGr&9O5YzHp(lLBA|_- zHSxw&fP36yO&eMi47xr^sMj|ilDc@Y90-L!3T6Nyr>(^aU?|wLsG@os#WD+-Gn!Y0cmM0X}*$P8cUy?H5>xmh&sSvl3!;c$=$>GtP z8Nx?lbH@9HA&w-*Ns`o5v@Zd`-e)%ssZ zSU5YEl#q~6hqbni!N9BEb0s$(1}hVKhqjGhY!&ORgT}A4z*8DZCVu?ECWzhv}L~5m@!6-vw%! z&1%I~A7@V-^-jqxzG^2OyiHJT3pQh2yh$IsyCYEVu%>d67+JxN+B+)3`(bU^K2Kv$ zBj;D|bDT?NKqJrR(@ix&iC_GZMBbA4`YtRsw@=UdbOiwZhKdxJKyR~h?B{7UnEfqGs&ZL znbU=;sg|GCT8e(@oDQx5>53lU(gj@+Qo|aowtBQiywJJx~J&Y_O5faOp9EA*TQR z9ha7FiV&*)5Tdb~*FMTu7X+y&Nny?^3f(0$>bk=$We)WoY=R=Nv_dP(%jSF<0l$_?hbKe$7xN{g!s#o0De z9r%0oiAzYm96j{LEPqHWzy_ACv$I|{#^*5*+%Q%(FR?z$eF%u9xsWO)gqM$nc6C3^WA zcC=}`|AM$;HL={{Gryh~881(&#`eTcV^@jDqhYeC8l}^xYwwID_BMOf(!?bsls4^d z^J0BDSXsO~t{5&NqQ0=BbpH%))`t>wwQyYsvPaE|X)L;aB%%#^(G4@%^}UZ82~4j1 zFq{)dANlsD(yUy>H9DD+afWG5_;{IsOiU_VneB-uIcmhKA;8^xx#bSC0;XL@5TG|Uc#;r^j&wbQyB7U4OHRm(6a z`pc)Y2`$m~ub;P+b{TuPi8M$k{3YCk+7fO`7GEc+#0*8jWcwtAc4dO?w^^=f-sjtGIJ zXHqx{owYLm`y(|P{ZYkv`R<#M)=R&@vKT&!aSghMqT9rQ&@sB= z+s%iSu28b=S8ZdMlj@OPI&qMyo6*CS+#%i7V^%5A>rRzJ-tO~q-oHKZAAU6%qQyYh z9;DGpLXukds@;2>RP=;czwKV$l)cEe7fnL*oESdw z)LQu5sywn%$TTC3XgQl>aQJE2Q?LN;KC=O`vq04o}B2o zl190{zc8zKg%wyQcd;y?0cJ?x6Hyf?YE-Vs0%{ zHRGa$8gUn@`Y;@QbgsQ6=NZXi?b+V17OW{IW$V+n07GQE`3^o6m08nKnBgLRglHSw zHvbYwjmNVS3!CkiaO|Vraq3nyShmY^u70ZGp;X!}BPOW|-Tm{9Pv_At)%U={&Ozqw zS);9Fg%3T1=5H{%P;`=Ht8Jsxa^!lR7E%JICaC&MuYW=0d{+CB;`4i`5lIb`=4Xvg zdwx4q1WYj5uJ(CzW~;G8j{j8@KXIDi zC`qqwf`Iqb`#bd4IF(6&t8KF19=|2R;~}K`HKLUr)gb*lsw=vEtS5D~?k;Z4dzrA$ zqXhRz#JsvJgV|AjGh0^+g*WUDhIo~zi<>`uVL`#Nbg|U4xbQ#zAxW#lZ~H9HJT5SG zH#j2?nmUSq3kD^~8lN`kIjiA>zz}|M%t`^m9p#}Ti6cKD%4{o<4O@tyY+0jqby6O? zIGE^V?J9(#s%%awkxEVF@w%o)()i=1Ee@m^K!nt|j_A*+Pak zSxzUj($kK2iw((kpD@3Qa$pF>>Pr8^l`f&QD@svlvQ4u8bhYo{+0ag@Vi66bEDrWL zFpQ?HJNHLZ<6i+gSkC=-nAkfIJ*nuflSCLoaE@92rQ*A;yo9ZhvIOdNZ!dYCnUAyc_*}*V!kiyDJI+4t#7XHH!D&Q@AHQFE3pM}YyOdow#M@|@ znfo)Z1;$pmW(7>282-nzvo^KwU({ge4|ZuJe@yZMulN*mX*#qF1uH+>`h%~L5w-F-xR$`s?4jI z5oNO~rMjGt$7YULE7gFUA?E$X&5`~YNB2(t`Eoew`L$hwsuYA|J6PBqHvf%z&T3;| zvq@wy@dXrZsGIw!q$cc^02vB2#mLM)-5#J8>=hS#4oC5}VFwJT0_!h@%!_>?5eXSZ zEOLifw>|F6Y{kNK;<5-Bug}PvBF+~sr%Atwsg@1j$DO$j7dN(~+Ew*NBETBlUU6>g zn~M&9c^TbHsv7bMTnfnP5{K;B98M(ZChEcO4lmY)e2}Q%Z%T0lO_I`?+ zG-E+<7ozMX6_?Qw0haFx;&sic`V3P=%BU>i2LzNxdgW_(bh88(mlFZbGTFM5 zWBRHtyG7W6H*zrI0TdmgF)i)pEnE{Sq)S7vPeid>1s#d&a;}79YjHQR#?tH%1oxy!91AOd*?#6X8c5e_j{nr7(zElT`W$vPHXth%n-rw{vumD z=U770`7Y~1n4g%uVD`+2y+$(74V zRi-GT%Lf1%&yp24%M4=p@~>qoLzJlz+$6?KcBtkTW$7P6fng*jh{uF=)SO{)>ks zh=if^qw-lzb@v}$pYzG}V((D^Bo++MNn{D`S-L5!`#LgUK=$6WneJ(b3UB z0ze^#Xb4OLI4whEU4I(B`5Hsyuk@iK?Bi_nX zb^$0lg3ZnE2k2)VdKHS-Eq#k|N1Li2nvI5U6)NAclR?2!>ILPZvhNvwXcZ1S&H9g2 z@@B>HpQ?41QR3_17Auh@&or2Ur(qFOk6z27ge}M!B;QPX@;=CS z0yJ}F7Izc1raw21wGbJ`!b&z@-iy`w$9H>mC6U8V%#L9Wnx~{I{xSc+g-eqLk&)M( zoMGSkPlS{OALd`%xJX2|J21Oqr-^x}N zv2#Fe7{OVKdfM=CwnhfZ{;=uRCFXc^?AMlU-MgqK?5K~I0SS1r8AjuAgDODaKe>5s`9Y-N3Femp)gbE-=L9$QSEK_h|cvPW}S0h%U1Iey4OsSR0 zynox-(}qser8j3znOK9SyxlM!lhZM4x84Z6_<*gB*`u)uAPb<#j zy(?12T1ddHM1hMZXPb5}Jbhphqjvu3>l(x+Ea-Z=`_VAu(WpaP+$fVhi?l4XbMvS3 z0$0}4z>F!Y*Z2shKApi*qyBP*nA&LI>%d>ZK}R-=z?W0jHPWb&S5!=Odj$q#iU>9x zV3~^5iQgQ-rYay1o;77t!;Koj9W13gF*&pGhINv!$0Qd_;qiPH`CCQY)D0Mw5UZYYOnkw6X+#tW;4faA40do%qMiuO3J*XW#&=L&yV_}@$ zPqv*KNRXZkK|pZK#dGgXypQ)Db>%W6GQK6q`j425ifQGm0W}GMr(U@mD??!HiD}}) z&F3j|OMeZy$wq_YO!ny1bvl5ZKs$pJ(ZroA=QKYpqmqvRYE1qecT5Ga$JDOeLl&!; zLs#zJ+&oPZl3JOI+}_8<+L~`iQoDGHy!y_Liod=p=owF-l~;U}uLO*A6}Kaksx@oZ z@hhjnja6ZIt{j%c&+3C78-VSSpDzDP!=sN;1KF;2ctYhe>bJ#*-r|f5*`3LT4ri(m zQK>?y{>|ZC_<4S3HFw5U)>!dpI>g$R=TBCT&62IFLRZI|qB9;6Dx0mF27hc&B{pFz zJz*M%hV`602I1!9?VhJ z9&Jl!;K37Dz1`%n6%0*@pgY=hcBcI_SIWW36#WZ^iBTMHh$e8`z@`!c7sO}Ngjm%I zrZizzlxO5{!}D2{)yBndPelP;>pYjI=*_)RBMoOAh(9~(==#Al`NbBYZ~&|m&RM^p z&;qWkrn}srEAmQglwHKDFWHjg~n#d-Bq9)7f;5?1$lNc=@6WpZ_Mt3 z1;f=2#;oWQ78uGiS+Buv%uG$FJ?p0bcoP?hqrW{X4r1kgKm=C9j+Z`P@>oc(e#tPi z;(O-kd!*-irxc>V%Sd0aK_NBxCd&k#ANW*L*2jBIg}7Ikp(`v|$=SkFCo9BXfgwA~ znW7C)4SF>Z5=20D$x$DEyCQ0#5J;xk`Z)4}nLvGNI12Wk;wC#xrNdN!v_U`({Jr83 zQiW(=85&=5oXr!I|fos9cHJo(@M; zs16?nL9L!@c@TvDX~@-p;F$As+Kx%4%b#;#t==VR@#Nb1@iCu;UuR)$e0=LT~0=iPf@J zM(rVeCA_UC8-Ddma!^n?V_Gqp95>)mfO6yZRP`a2V_Sg%(-DsRLQZQd$pBD@#g z6OAE32g}9W@^qN#lPss!0`EiKP-dVrwJPW1U?EKut$Y9vadphWI^%`eHX8p+IZi-c zV3k2ds)ktO=*5_rJS`?g%Bf!A<+vISyThmA2xDU84|z?ON7V<$(C|;w_(kd0?>B7$ zAWyR0tRhpRO>(!a6jAo;+jBXQR&HP*V}~0}6HWV$t+>8KDu4&2q*O4D5>;P)>H`Bb zu>zHXW*vvVNRObXIK;nxRaxJW+d&bq%bn!RSpsslmEt_ONw7$?5*}<>N@V}S&-R@g zkRc#nV^J#8G##S8txIvB3$>bbMr1}MYX$A&0RzHuTex+vMk3=%GX6e-dQ_DtRgv#? zAE*GPoy?=o%IMnksl2_7bXeF|%c9khE`Q+=7(m++WLb(6lL>LU-V9d1X%^iT&IbH@zQ zwfNl-9BWE&CcYcl!i4pwj3Xx&zQQn8%oC;2?bRuT(Ll{h&uVoOZu4_^rG~{I9{j4b z`@&~rDaz&;sFALWovOYOsnl8+z=KaX0X79>-B=vy-(+;FLV7fXOQYvh|79xTK@(bq zb4dYHTXKa@gFY^q!($f{_2#UYj)n3;0CTzc0D{w5Ua`Sgm+3BWX^$*wN$*!J8}5w% z+$i!Nn`5%1i`c^K&lIQhEyF$Xm7`gony(Zr_z`J|jn5xrCY6fQ%jKG7QCxKl(gyLH zV7wTuwk9~yJZnv=Gxp^&iSs18CUP@Th#<#R{>gc^sLDs#$}s0tzoqR?j+W#~oFaQh zuK;|o`TY2Ofyi;I z3+<*uJY;iAii>JlKT%Q7^bi{CXc);Z&N6@JJn2{$yiRr$BJM z3-HwOp!xJzd~KjhBdaI_5cdz!ZErz}vEb>8)sIDjq-a*I#0xOHy~e)C=j8BN`Bbz= zvcf5F%J$D}2sD#@bglh`fHoFGGSw=G3d-S5os>T1>4lUPD zd}QxtE(yS%@XHFVMr6{mi(+^mb5H{Ifa$Ry>ep=nEr-t>k(eL}8f^P#2HLLQHSs5r z;eD>~Y8C9cS7G8U6P6t)Lu(O<J!lGn^2H=CFtU4rf>Qss4&t-pi0lF z35#MlL+OL0i5i~QDu4!266tBj`1s`HLh#nHQ^rlv9>!y_qU&NvjccHZ6D6C&ZM!3z zK#nW@z{%QU+$7p1ftZB&@aXVxUyz%hmuKnRw2`WPFt+ufQ>87}S&s1PJD6g1SG=gY z0+^+t=>3LWZk``x{Mn5*Ye$g7Jsyvpshf%YhZf(D({}%4Q8}33!BF^08G)8m5OO}2!AuhL%KUzfa+H&nXD;lhNGn3kLQ0=H~J4v z(C;0A;e6m9q4Mc}MGC(xHHXL5E1VT}oZO*kLI{-azd)+Y+@D%62_v&9Lfl5d#n=YP(2HE09yyKiT{h6OPRS+e0#xbF<{1r}aJLbt0=*2ri@Ye0O zB}_4@^*BLqPra^e5T$V=Hy;~F&cIwLo9#-o6pH1W@;k#?r~jC$qVD-phw$T#i#sUT zg+RT{xHfm8fGnfOo7$t1A0G6hUr;6x-gl#^bvmayNDjHQFRB3prym>e1hR%Tq03&j zIxqZK1kcCzDQ+V69t$~G>3a@WHyJ4vrGFNakwxWyY6$Kv>)4E{a!ds?g{duVC^F`} zkzy?OdC!i(mo^lWbLGyXWO6hpJ2WM6OpX%p5nsIf7eE^C$|!XF%ko%oJo`Bk5Nq6@ z{O1o$(d`Teu}U#ySuAxIaB}*rv&EAhsIzL&;@F02eYGoyE~*~eUWT#OUjsISFj;6p z?TBS_^iE>1U)=FL=hwR7Q)dm68?wqQmLFYkbNe%?0+tLXCS}JJ!C&K5^u=5fZOm#; zjl1K??gd6)@MNS=OuKu_vThq$Km@&a0ZQ@H;XhtN?4qyOuPnXcQNbQKQbsPtR@Lbe z2V*V7X^c1WOS3-8qd(GOqk8<>#oKrS9nHNZzkz%x5-`;q~OaJKb5_AL=Ab6fWU760ZD|+wBFf3;gq-i;8Y@kPyha=!)1{ zwNr1ddYr7wVCuNvd9lVezBMbtOos^8(ETH>H)2Kx_|QDJLgbs;6VLhNI=N|kj4ZC8 zww--<>2*m~2+jNxFGM0>%Hj9YkK>Jc%NQqp_I7z3{NpQKv{!-3np}(xT;6*72T{>F zf7>tsmX~-nmu9^mLEUl<9d=1!HfCw^`+v=U!t{l$Y+AKoKC%vk=sLXc*x`sOA$_vF!3@yJTFyUOf<3l z$ROxZs9MlZS9#wF(`)}a;^`b!MAKpqOHq3kWsn|D#A(m%YsKcDMmdvV2?w+F6)b$N z=D`l`Xrrd}97;^-&b$ax|7Nkq37*l;N;JW>(=vS{3N0pQY>YyoCPQ5<} z%-Wfx%9J*GAe}Ah8pz9Or)DTA~g|zv%A9lm)y{d8TDrPNDr06XRAV*<{@Sn##Yi2 zHDZFdc&!3Qw$`JuMHvgoss}<&( zW+qq>U&n@xaqou6+IWy`TnRPj3Yss}?TEYPcYk<4hzy!rVrDibUn5!31G zaQWv;`WGBDj#zNbHze}SPXg(v(_k|z(>+k0Kf-JAI4jOmK|q};?e_x)0D^$7CVZWC zS|SqbaX=QkXz#;j1A3ecHaLQAfr#iFo`T_;7v$MtA}^|D+o>GH*^H}w!Gha)PIuQl zhZ}EMGZF4HV_CEvsxqgWAu@H9X1t9YlSQ3cq%T^@Y45WDo@rvPw!JJ;L(*eUUFT$t z_GL7D$ObQj}Iwz7fF0QiA2zW9M!DKy#4UTt3L2aL&^H$EE6uE)*S%XXTh>36Vq8P9& z9qlT28&!&hv)JpQ-7aURZljF%XGbMb*=|gTmv4>%@*a8&4p$Yg;4i5v#Up2An~TOY zjJ096kV-B0{U#Y6knHXIdY9ew98f4JGH0v&yeqs?PT@pQ&7`CH?~NN>?hog@mm^Gs z05b&sh;!Cu&IVz-sx-oDDV7!MnP4HO9xA7fUBCs%eNHSI(lyRok2A=rYWIb0*5SzS zF6zUucBi@IQBg=9gb--72z~G7T{{2q$Q`xYYI$AGHmYqY6cjts&^-CdQzr<}XqAnC zaZ)kN*-V3C_$vRL6xu`GTCoq@kM5JhaHW4u;xtr`u9a?zuy9?lxye1s2t^qmAxA|} zy*n+8)zyp;C{9k22#SJy)K*r{&f|?m>%tT^Ajy(N)oE`2x&4ql5h>s<2End=vi$zq z+n?Q{c2^kvPEJFD6z%3+LF2muz$3=tDB}k`Zmrt;hbB%&)s)wW0B;EaLHA|>X=UiL zPmUSNk>zX8%hPL;R&px$5swmyYlPD@3?pQm0+tKOy>uAgd()dDx!kk0n38CHMsm&x zL6p!h-x5;LgeG8B8uPQ3TcPY;W&;bSdF6sNVi(aANlVjMnED@nNaEv9MoBV(2_rq- zs8~M}&Ith*`wC*&R!$2@X(i>iH#BpJ-yPWcu^MDN*(Y1#$FNmhumhtmGK`5)SN z08P6-tfA#bN7G4iA(fO17S`i7m=mQUqjpY^&LR7TpQDKo9wA3cGg^wQSB_q-enPcq zKRz@%5MB3k8f6I&SuE*mI6953QGqTKJN9F3E0JOSYNgeZRrzL)<3k36RgcPyph{2m zNa8a)!oW(+Gp=^`xY@X}Y4z>uG}CA*$LUUtnB?uf%G3G+BPhj;7-|G6>w6~<$nf|Y zsW?-fe%iDWq3tc$#_aHGc=WjyiEPZlq*2|=44T}phU=v_G3c4Pj|W;Io1M?9*rm}sTf#p$o`<)C7a5)i>8oRrV) z>fP78?hvgYr;(&{@u2RhmVSiq28HP?bHT&KBt!r zYj+*a6upT(nh_yh=1d(t-Zbv-Gr5<;cR?qagf>0msl0J3)qzTl7p}y-0dL+tx@Nry z;9FY??2=eQ-xD}ZL<`qtTB*MWwRZfxd$)0C0RUe33g;^>@m9{P7H*ayCi}7Wf05^m z1fA|#sDss$2G^{OI^8iQ9AN%;h(Ktk5pVc=)jJ7ysU0zR3`OghSRoTYrS@#eX~#6obL(?$OmZC@r{+Xs zF*65J)HuPLlo)~29eUq>X?7rE7o&TTSaDR~GQ^NeDZ^CfxPcV|X`CrBbS7g6oBB@< z_+Tu+AD;&n`G=4HeQ%w`HEUqkfBIu9g&VJ7>@Mn*G?)8avJDUDv=~xU&VOMP<(}ag zTvb8#^!T^wrU(iV2UE1|ABmq=c#kCmfW13mk&5fD{z&MxxceLW)Xl%kE#k96!)COi zVPSUtYXR53MgMu-uv_J#pn_6fEoGH+w2G!-hk4=ZEDe{*8HNbvWDvNv!d6oNRaA?5 zY(5dv+SOJ6go?<{+assUzotux8FMU15A8U>Wb4!x{RTJwI6 z5Rkz!p`P!zX&w@$Qnx7>^yZUsn~F?L(gOVIFCmip)1j=;I2$WqzPaVe5jt7z!^u=wM&aGi>43!pu@_Lb;j$>KxKs;h%)yuFJ5n#@6N+p{j$wS}i9236i1lmLjF; zy*#9TH6kdb&$j%bx{%m0ynvTq@6hhHYo2Ysc+{b?YU_{OrbTssT~7l&*_$I`0j<$vr-&Gk7tkO7|&Va>h%g zL`SJe`rwNYO7B;V%D-Yi^AMs=m;0_%7g24NvUBpHtaUaUPU{KOkg$fB0|_QURcys~ zqfMNzz04kzX4Q_@?Q+%18T!XDcbWW>XZ>vubBKTlOW9JZ@VJ|w0Jt-rjEf}sP*md^ zwbv_lSMK`wHv>b1Ma5zjY*wThX`Jmy?uXaOY;w`>tngaYs-`zM?#bPvv$+~A@KOXZ zzEms1shbd8{haRS`X5&tZ&?~J_>q~%MOpBR&_Qn>M@O*LZkpI z^`73UVcSjl4d3x4tF7@|jkiy+&UASr7b@SQ@crPArQbkg9R{`>kisXr?`7e#J?b?o#4 z;pum~`rrM}kT5AhS)Ap-_3s{&rY!@n4QeWsLej(fvj=u1Njc{)&eDjC8&C(ZThpNM z)QzEV!U*YseyW8@Eu_;QBY(Y&#i8YTmubCUo(#mnoX9WtP;8${-w#0Oa)Ai5Fwelk z+uENYNVx&qKPC;QdRa5kKD)@{6t@g#x1%LPZs}i?nCPn|QCz*>g;ap&!*Al%sG*ZO zC8UvYTDC+R#PjJ;^@7=XoSTB9s)@eJi`txUa6}(c6YEN%4)AaPJa3<+bcNw|y1sF7 z7+CXsV5vw6yj#PRhCy&;$@A&0?JEMd8yM>cy8d2nwr|M>Kt!6F*$XFypS`g< zh*KErM!uU|DOKw=4lo95SP98I$u|_7sfjy)wLMX6zYqac0zSZJA8Wc8s_$f@=HVeR z;n6kpCg6cYO}&X&KB}r4N`hz-Z|JQ%Q=7ixjj?$Q3}Dx>z1rxfpC9iRjyssq4nn(z zq^p7%72F_plDqvgSCLsg($9p+_b7hf$3>Cqh6j8Y{$F!@wtPsKhH1CiL78~(pyLA+VaQD*wPmG+#CIxsH z{@GoRS_<^?-2Feq@LNhuA?w8@4iHiH1H4(A&qNXuriEjW#E04`O%!y?`k=%H08pl9 zY=}44gai*HN?@N;;O2(0TIx8!pLj_Er|X}wF8uJv(3BS*u!}kHPlEyg?q^)-?7^w~ zGaZ=Mh``tx7JdGNY&4%70PJ4w*|QHKtgI*4T0O$bxl zeZh7Q6n2;p0LBt&N0ILaS8j!39&jXsak}_6>&VkW&_V znb5(KnurTI+%O6O0VK&w^aWz9ucAq;wp5VmJk16t*wShf{|v~Sy_y?1!0c6 z$+;&d;rdGxW2bf-R;Lt6tA%|DL@5;Ejg76|dS?C89mZ^rO&&LXLDy~$BhBXI7I)aP zI)}n*9{v$((wGD&L3Xy3YM00f~?v{)eL2 z@JD7X6B|~PpWI!yS-<_kHll`4c0F9md4&4TF5d|jb4b`~_1w9z8V_j~qDeQJXv_;a z3)Z~rSz*U&te|sW_RY7s;Hy_5(|89w&my7s|=-g29N-4KGqWT#xTCNIS z3OqJ!qVCWM=U;{?9^E>X^JiH`kF&80)LPv>8%>_FtPYJQqPRY1GfB785l}+j*?f8J z=ioId6)b4I4Ktkpu}s1uEhph$o3kv5s8+X^iaKbxC4&z2c=t0cV}#H#LJrTDK2}@z zQsKSF;+PLI@4Hp<{1u|P#1-kMdlnx5)wdE)`jq%*E?Jqwa-*{<%kLV*EsC?+xTW=u|4(8fj4%lAFWQtE}uO;TBZql0Gx9s zY!3F=-=k5Z`3?`!+T;pDLqwXh5GKPrRX>gXkFBo`h_ZPCeqZwd!J|dW1L+cw2GK(r z=}rLw>F)Xo51=$kN=cW5bfZX#GziktU2-(yx8SE9@Av(CH@iDCJ3BKwGtX=dUPI>{ z4?ls)1c(0FrFnIewLl}m&)s)0mnWM15%hSj39hMJzJaNMBXFB%;CORI{*M3&-Ztvg!ALf+eyyFKVvp^~{5tm?B{i?lc)T@g+ z`H!##@)((%1_pm_z1m1H8ox|-qWWEh8n9r?S|$jy9y1cX4cM$J)M5 zwEs`v_Gvtxn3alL+dGzPJA{L4`%V5er>1PHGDzc49HGJKvo=iR1ll zsj+Q8G#^$bsjkHqN$M$;y}2sZb-_POd^{OvL*Z53Gz({iJ^9vYu(otjbt( z7u!k+3$OIIy$RdKtpcX3n{R&}?C&$WhaxfiUmO*Jdy;UoPV)!iC_SmR-HZx;3)|4$B4dd!Jn*O0UIvR>ISmt7Y@U}*W#I!E%&R55$+O};cu4v9ftYUJks@*{FI>iGIF0o$dxu@GF3RVe}^Dbi)u0y3+eh?W}S7g0jzAE+{5M< z)1dU(b7}qVT=_S%wp$;DVizVd%jw^&76^~u?@kQpT@59_XI#>zwURV}T7mQFB+rCj zliiA1t;$xx6k$8QUQIy~OkHtVKq4!a_;8n-XFyfuk}si(BQ{Io#r?o=AFKwk4LaK8 zc?~6^Ml~0;T-K`NJ{P~QFVy++9mI1u+JA z?e6ORpTbvu*F@bwmX4Gw0SO3vx%q-^6o0LuM@4hpS7|%v>hIG%ihhAlP1#-jF{$HI z;Yr^Tt&W&}piFFwaWsaw7}kUObZ8wtQ~Bf;6s(E%>yM*7uhw*Y5Voe~H>8qqL!C+gcIoIRE#Hbu z%NDPF?v``W8&OZFirIwghF%S>n9tg++rWC3>t#X6O6zc)g^`v`!SJ>ce&cnu-jeyP z_y7D*AbYDeZ%Zpp6AYc`Jj3F+QIhTwhTUHfcaf`Oh*qU(<7JAtq@VleMVc(Ctdj)Q z83%{{mKw?Ir%7B~2tir7*8zoJ4(NQYw+S8P?rTX(yO>%>2ypN=>oxz-X(P*xslc%2 zOlx8(QDB~5mhg0=hJvTZZ+0)4)NE=__%aRL_!K#|W>V|B#59p1{K6jlNA)ay)HT+V zUM@ZLm85I38ogcB3NG~rR&I~gHrEo~5YKB)t)t{tHh(5z?ea17cL7w4szWr zcPjY2=uV#GplFDW^_J>qmakVc`Ldrd)*I?sg2Kq%v+fbmHtXE z^XDCJV*P^~!}-(c zScWFYb9k;B>;cJJ$?|WU)PJ29?!8j{y6lU5{z`+`lPCM}+U4$dJrt+) z^4}0rW6od@_FtGzd3?$582&vpaesWHFS#*?N(ED{u5F{T#Qv^Z!N^6D1Vt1SD5Q=b zmUGmSimw-y$NK0<&n-Rars~+U8E9)$mM!gDG1MorZ$J1B|6Z1!6O|i>oKcdGVa2`j z7^j*Y?Os2-n^p}L8XZX-VICo|uY;fImSV+DzC7N^P~KTm?`rbE4HQu(bxjZ0qy_AJ z(=TUw@@!-$uh7=`)#hQ(hMTbv#f-Pa_v&Md#H+i36VaoRY@=HpUQ0)-KC+WnGn!N; zg5=vMA0Pyv1`q!vMy#~qDN<<2D6uD6!gfO$<0HigSps4w$i4E_9{l;*v7UWvb3j2- z{u{O1VGV9ggIJHvmZ>(L`5F$8nm!5G6wh0VD~9d%CV$9DCCt5QG9+V(qv~R6Z{y#@ z_pWR3dlG)uzlMoSI0UjDiMnAj2Q0k$#t0<~%o=6MGaTp^UliYYP3H9@Rw8*UNVZAg ztL42Vl=(&`O`%5D;qv#BGKa7vyPq0vKX8cF(}}6oBrCQHov0@-Z7`z%2Dc|cMc1Nh zF}-`h2~3voN3M^g*aE}&Iw)T-edh1_k-p!q%RSzgQ#HZv&?#)V(0KP^+L%ZTM|p>Z z!H+i<8Pjc4d=MS4TD6|d;gzYfEkcyy{eQhfHU&Sa>B5uK`*Oa^^&xF?BLWtq+M@fP z+wNe6Fx?y1XC0=dpY`9|DDUlmqA;?u$r+b+RO^I;#WuOiS(@JOP|%eY@7sUDvh+p* z!`OpDk^0F+r`Aso72`raKiw*F@qbPIVhrxp#_yEcyF>~pMENf1o6!xvzy4Gs>w`y0 z+D z-0oiP<7#VTQ<5_wC@N)ovsTn7RxEa0sf#*U)?yoi3j#s%?g&xe{_f3Cr^nsdc1BC< z!jx~D=H2|a+-1jC40aiRY=g@@ub9S=NqjcHodot2DT+WN(m}w}_s+<-Icj-rDcMX; zf%F3(x~q?vm8W_N zHgzB#sYCkN^kfdgAKEu=m}ZOndOO%~kyVqQIX#J$nhW~8L}Ew3>~OjV>|u|_jAEue z8Du%|U59*?wr=13nHwE?C*?aRv-N4UjkNjq)y}GLPS)qPYlxK`j(A@ndQ7h z^Yq(gmW`l~5^LoXmAH$-W*6J9t3~D2{8MTe=lNDZQ8#0=?G`7Lkkvk0F5m^IYOJRI z@L8%_k$!4_sMa9^gQ>l%H`O*+o>a)u(aV#K8-sYCO;1BAD8RR)-{^^dcoId>nn>wl zB9msAy-{V8l!UZUT-(wbubqHiPCqS$kd|z>Ho0EsWo?Y2;0XIElFA8o zC-HXzRan#dte9x7RS-{y;dKjBrqRs8h<}Q^jW80H>NeH)9&|W$jcen{#Y7M{Qln;h z_Y=GxOfXDptu*IR4`&WEHrvxuH0zIEYEQh|jT*isWS+q&j@x({`Ihc=o!AyG9oy+z z$Ga5?4G(URv}Dk!(wEn=tZl6D+68pPbYV+5?`)18 zM%g2tQMqxwryj0Nt26q_QvE!Dwa>0kw=zDdSW|S3AyjS$vK@3Cq`xG!$3hRv%GUByVux@Ts_``y{q~{-l0-ZaRH6NL zJp3gS0qxQsY#+R0fBf|!rq3Z|OsqxbLhfq?Pm2DK7To%nt&22E4kUaCK`A*PT$7Hi zxMolf?T4zFH=PA#i;ppLODR9P*M}zM+VNW6WwmEppq(NmK(N~(D_>Yg;eG!98S1wB4cN@%PccsFCy@fErkOaLhD~|J%m}# zjJleTaJpv_R|lWZ^bI44=aKM&6qBYmHpyBOZm<;3 zbyqv%pKdPB6hDY19xQneRznNzi>{1nyOi+8%EpBLA?r8SFe31fZb&jG-Zjl(4Ddng z>p1*NqX6Y)Rz0;uW=4th zlcF9W{K+hQ+DJL2s=kUb_DJb^_<~BS(KVV#Tx5%Mf6QKu!JQujP>*bV;!H|3*`aWF z?wHWc4Kh*Kn#sC8*@Jg4k++dZm|kYq$M-fL{a_f#r+N21FE0G+S;4S0Q~`lPKU%&p ze)$&#Br5g^ulc@+_p_3QL9C^=@#_u z$qwq?;B{oSFQSEu>cDOO(3JbIo%j4cK6a7Zy#imQx_uw4 zxoW;&2i>S*hK2R@KbGGrs4;dEhkZC`7vCA>3xu0Bt{MpH%wM8mc;tf^ljueYB?o1aB!a$b zSb~9YB-N|SrEhLnWZuq@yaRbqi%Lz}4QDg}^qvt4jjVx9bi}o?>saD2Xz1dt(_Fi( zS!wkQ?nA0<#&g7t%*O8OH)_h%zz0=YZaB;1b)!02?OWRj=0p*quV{~7tqBXuK zgOC9tJ?s4&gZD=6Vz@|~fHKe5QDASbtSoU5WT0R_cJ0fT?&vq zWkS|xvNYxlQF_RHktADfWUbm6w7XjWHu>XTUDnS3;LE<-4sqC#8Kp3wfBv}`)5*vQ z`lrRfa7@ciS<^<+(r-jBZHjv^XR@^QrRE5uBJor%+!wh{<)Td#{2i(o+lRnw{DRtF zuG%Z_5>6stmQo&)r<)1R`YaE|Zq@`W z_I|0U81*EDz~AClSz_@(re%5MVsI-33-80!deidMIK{BOoHm&Y!wjLF6oZv}SpvZWku)$pka2cF5NE z8zrUjX0ZN%7o@kUb`50@cM#eEzSBNJ)t7^Y5HVODS)bpgoy7P>)1+2E`8Z;WA!cK?8K{m);NG)ij+-2J~t7MF@t zCQxWpB}5mI?gdjp-&mXLUR&T%L9gSO4FzDa^}PXOy%4@Y<+H_0z1F80zs9Q9YpXBTmMFPdxQ7U$B@pN!3{a)XI|b9iVAjS z-yc+hJLZ*LuE@FN%6-i0vah!v!W<3j-}6L=R>%GC?rab3?O>C-P9U>sIxNSn1 zf8;hK-)GIH=08GNJ=*Q8TZVt*o_ALrmu20|r-mcH!`Hr5UiD04>Lr}(#FU`!*{NmU zAMc*@3n_)yh$p0v^ol1l+{JWmG^$?;TG6Y#U)b#KJTe zddx_7QoOopp_|E?M8ea4`LL<}aj5L%?VAp~q0iuEt;aZ%@<=J=z`i4{a;7`uUsfBL zaJy_2V1|Uu<|MNAmq9lTiQS=wxMsI{L!8RwWFOA`@$FnkQPXoPOs+qNnk@U?KRQf9D z(Lp!XzWcA-hNjGtUpR##UmC3+wJgp|DE#okhh6q9Hl`_SH8bb=0JnQTN5ro-s)k|E z5>z>vZq&U_FOu;MlD`Gryg5xOB@rR@NVCB)mO`P*hkFR@wFoTc5bI_@-*^^}+U+s5 zl%W@txhy-xgS%4h?g6`zn~5p)u|!7;A`f9)cQrokitf=HG18d&*|B#;d*hYa7fpph%>%2Iup z2%$H2(kofrw9a%U0X8ohiX>Oxf4dN2{_}`ueYYtEsrT(2pyWJBla%F)P27J9w1^0|uPVlbZG zGzK-tXrdG*K6Xg#O|9Gy*V-wb4g0Be1#0-}li`oJRwrEU4HBgzftR?vd6nb+1Stn`^Z$Ww1X_V5ry*dT2$5 zgkkBJ86SD={@dJMJs?u)MV3u&NEcsO-VA>6s#$HI=~y>uOn-s+ zg`2zc95C;Pau(u+*X7^7l@@}AQR=yK)q9Qj{bYnlYWYqNp||oT5zFYOx1L+_5nsdASHIBW=nhi@+#HhJZ~+H~P7Dx3M3z_KL6{G}gsrWC?5E^`fh% zBjb!R+^cYKx{SRzC+pep@ju^A1@qHE z8cvTqt#)Gs>Y)0UyWu*%7*t0VqgC2p4bR-9oHiG}AptwWx%T2n5sB;R+8TvI=6c*X z7%3Z8N34EENx3#o7$uAcVK@`GOU3yR3yQ$v8GM;>tYz0VQ(Gf?na$==rKQJ?Ad6sy zofzz5lrPF-?b|BeO;_yQ^@)l?D9QXR2P*LsColT8g zWO`b$*Znxjt>+yLim)QUOQP^2ZG>!mDE_=7HB@x}$?=1Ny%}T7Xq6oQ@D;~n?vKj4 zyLR_s8@V^Djkg^%<|`825^OZ&=f-A4$v-*b8RIaVhFdWGn#LQ4bT5Po zI>g+VbK@yUf1(j-09$&XtfiUvdC%0b6i5G!!T*vnudv?bTh%0}XWM&vSMS)bI0IG7 zE5vItdTKNaS3X)Ci7{vpWjJkrMBT_G9N}>a328yx{jOPWvM_LKJJwf#%&{?8@1QJ5 zsb8-YN+L<~IBFT7h7JQ|lx;m(1f+V4Y0KaRS*`LcPZxNf&>s5RXqblS22HLqWMs`> z;&c3nsøj5eMx%A-x0o}wB{#0h|KrQCGXO2$SXqtSipCd2t;k6#Hxc9>f|{jF#7y564~bPNY}76`)DtCnWVACOnjB} zXW8@ToMQNg>uY_FaeQ2E9yo^%j@pv6Fu+o}s4i-!r9>;0GfOf+sDE`+)n1oLtoytdvNt}Dthc*_txbQch{FDgN(8;IG@dZG?Wm?(XuNC>NlrOZTk}d3 z(`QK2gLyWOAFjXTKB6quhu@d-_^CMaD$)I`9MAU+abQB=qmIjA@|ABTXrR2M6GG11 zu3jp8t~KQ1&vphkGyXtmluOutK#%Xu%+wbx3ghI*Ed@cq!P5*v*{_l6$W0YfMZx>V z^PJTB)34&-nh`hNU(1dTxN1$BKjfzF`E73}Hl8b@w`@#6JkRpxB|o^?+9Ca@l^bRZ zA;Z|NQML0Isp@*UppGwhvGirr)=Nf~gx=nh&l|mScra3xWx?`p=@t;MGKs~os`}pg z2TR4b{NQ%jN!bHO@ny_ze)2Q@Ppskx_C4b6uW7hNk6@N0rr$r-fgzfDTOA)nUW{bL zQiLJ;zrA={=xWfNt&6DD3Xr}|Yq)!PX~~SvWa0UV^~kF+&U_)U>mAn+16mI0&=Qjd z=0$oiYP`y>wYYsU?Xhy}tvrnF2H%Gy`2rI*-$7Bp zR@%y-`$^;a6m7M2^YIRT(pS!0IT&`9!{*3!k$isLG@gnN2u?(tN^LyEO@8V!5R)^c zUd|pN3RJTj6luGA&9r$!y0lVTHU@sPVd7D@gtwaG*zFEW!Hvj5E6)ybj5R)q=WsJt zj)Ob>JjF6+qa>X3HLG-qxm3hs;6!{`zC%)C!5`4%@t%B zB_U>OyjtM?rRxxBPWeM0Q=LOqq|E^%^CfXDd%jHY55(PeS-yNF9 z(jtpNrew|nSVi{;dHnv+L@t7&CvW}3*Hz1|3UZu{I^L-utK1>)HBKsfTZkiIa}Y{!`l{D`Dfx56dSI95Y-KN3M zCl|xcpLR_76q5JuE%7V8rgHQ@ylAKyZk6V)^7d(k+7&ra5u3mruE5R?dV3^g@;jQ zt0zkzYP51?@qjl(qqWMqbV|qq7fBYg$5Dr^rr7wh=mxS{sX2Omv*Q~^jmTo5{lNXr zVTVfQ6t=H&-7U6WJ)L~E2fMf@r}BUC;nm9euG2wIC58^kfpc8$#+y(l0+nw+_Wp&y z>Lia@XN8{MQkiM0>H+OHxUAl=M=35E&YHEUoY#xo2~+%~Qy6>&k& zjKw=k>)uOn)xq6oxx@PLZ8NtMtZSVBD+GI|3^W6C6~IbMsxkpEwbiYzZy4aRm*?0K z^<*Ycd-ioSRROb!j*S7LBw?vzhrzqO0iOin;L~<#=!+{0`R!7IMiz&#z{?HxQA96^ zzY-y-dB2Tagp^hjtp_T)mP#n&6707JzHiKdPMIH8A7f5&w|`A#_6s*5Na+?(ONuqn zf(6O)+y65~KJQ(;^}RNuE|hX*q?`ADsR=((PoN&$R9~5eT8*2MPZ=PGfWWy5m#3SM zC!)i3Y>Vl8+kvJV`|!^iQT8Y{)^NGTAf^CYzmiQUMT;+VG?8`H2sAGhad^J8vTBAoZ15HeUzdH?$jyeG zRKx&1BXaIA^Xf{<+cxInbH6mjvOtnGZ<^LRdphPxt!A<>L}aziKOS$eC-BpvHu5Vj za#D@_-lb6OWQ!uhYK4Wcgj+G*zSdh@@2&&QOHMV`;t;-RC^byQ3~k;TkJl8N(_l;F zG?RmEIBpW8m53tq`A{!;37u7yx1CtbI zAj*0pISTItez-DTOx0k#yVYH@cJxEv&7qakeSdx&r#Lw?7|wrblGz*#x!v$7z(FrC zYRA>otL(-uob`a5v;aG+U7n_n@m@yVx@$nv%xH;r&VPfXFioww1zV|o%xTHWO=X#|h%!bDmPq9mXiC-{L{QT75`6D99;3+@ z(v`ewsJ z|MjHl6psD@$({vV_EC2eFRfq;Y`0&e#;3}6tyEz$9+-3DI1gySGggvan%Nb$R6t-q zaIHis#ut*bwuROuS@gbS z2bHTaPhn3LABM*}35hrASeVx^Sj8#M6O|-dA7463F@<{AQ!mQclaZ$-&_v#aNSBfZ z+Rb+dXks%r{fkhMHT6$pe{wGDV|-)*+^~zXJ#Nwk4d$PR9!n>QSZmRnV9JFf`AyY&VSZp8sbOp}Jc^0h-H3^dxVtl6GdEE#x2`czH5| zW`;Zgfl^6!9xWC67<>`q2kR*NP0kjc@FKq>0aoPVt#O&Fc?@8Ynt!`RT|K#0x9<#x zazf2QR$8?Y@66mRmr3TVDq&OFi24;d!FHh&JcL8&;;nv3>rbp*@yyWGMX!N&>#Z>a zvI~9~ukEZKws>js`Qez*`lz+H-$r}OYKav!)P+kv+GQWU3BSw|@dPm&XInFyUcvg| zt{*$JUI~Kty`TIuP|J}_D`{69aa(YA%En5TPDfP@G}?#TV4sajuTfZau62G6M-bR~ zepe{}_n!xQYb15X(IuW?+4J{SpTYSh!ziv|l)+L8N9@yJxJdR0dCpPSFsp>s4<64g z%}91v?g076BY&cat=NMn`F%Uhfz52r-I@S_q^HGm3tn@$uI679%?6E4+TkLvz6e#_ zTum32m6X0E39IO-%Pw&BSo3AV`{-aIOXk4yQ@i_n&De>I0S^-QM^mG4W_4P8lc-k@UjTfMs*>ee(NlZ7# z_9_mD#X<|315YO0CJ-W><&)TF5M`-v7IBGP7s;RYq!4`g9FF+{5(r%S!T``c(W>vI z1F%GK$J9xlX>1Au(^`U%U`&$D%>U+QPf@U;L(;#uFv6N@;0=Kz4UDhK!FYZ_F~=mS zw|i;E5uIU*1xxv7d-9!Mkp&z9HKqO##DCA`^>1Lah=g;B>KlDBgmQZnVz!2(kV&M? z*=VBA8BoK_uiauF9-c}B3fVkE)*s}jI;}4)mHiJMcpnBDgK^w~@E+1GyJj98g%=YI zr6L3+_TH$PZm4M9PV)H+!x6hCcoic#<85=I^mxzwW!^V7ohh9cebBQ{vqIZL-EZw8WcRLSI zLIp>%!&0unQV4n}l6Yp62+CcEu%;LfJu{BEC~D7RLdE~yo+j?g83^KY=%^r@Zlp6N zlKujMz!%>BKe!Yw`BW9}hJndvW+LRFV_Bd;E$@ueHFN;i4ZSV0+vxz2wr<^wN!8Es zoK~%9Sdh)%OM*|Nmo~qXfN9J}v41Vp%f~`vhOfEA4}B>uvLMJt=tsGt#?AsR@(Ub^ zfTe&pBwjvGnm>4D`D--iQbiQwQ_t@B1(0Miz|W2*QxqxD@K*JcWMO?2 z8ZSfJGKgxrqa}~i;!!nbQP@4jn`CI~-|lP1Z>N&t~0@V3Hwz^QMyJi9O8sVu6;Zmfv5g zD}GqxIuS-t;fHqu%^5)T

!%kaPR9P3?F>D)&Dt=g&k|T*mVc2Ow3CW+rNH=%0tt z>aq*pG;JTffFaUm$KFWj&erR*NrA=-8jj(jNTOUt3^vKwI}j?!ENZvUZu1rzM9bJm zyK-09;G?-He0P_7ct)TlkuH%#EJm%Ay=ffQHcmY05^^(g;t&6GU zF9Z~UseTFPN5}FG+B1OIM-5rZInE0puLMqj0USvJN4j>5pUOqn`_3482xV?|S+2(F zvL*RnOxy82U(HEvV)%;x?AmrlX-OTFCyT)N&SVP#UUh-5Eoe|GPR&biPU@viDl%4l zC>eSTND%$ALaNAv=n^Bm1n$?jpzc0v29 z9%G-<|BID0f%W#pEUIPnw%biVyO}vdt~iAvAm-o4=k0Tx@5kfWXMbAvY|uwda=g$< zL;E{a^<4tXHm;~u!9Kzx)Jr^PI`j_91v%tj8edSQh(${dhAPg^2U0e6q6DO(c6;$Q zY0+fDjP3I zUbWza`M`Z>Hm<*YGncD;95m31J;k=D7TIq3{re>l`2!sKb$FCetG-|>kExBULMsu+ z|ML>bN!=}qk-I4h^ZUIQaa8y_%!p%%gGYP}UN6y*qWmI;P3#6>)Nl39A=;q|b}Lk2 ze*QUWV?K&hD#~-n(R=~4@BDKLm3vvicD?E7#N~nV7I~C8C{U<#sHl|k+{aSQ`4U;h zH|-y8D3{;hM1XkW1IK)Dipt|h4v$$_)H663?CqPdrr4H}!X#A`;7lI~PIhC@cKQ8-OmAil%*9tYWHA(B_yMd>x{9>t{U&AN6 z@(xmh>fvd>%{$||Iepgd?%ni%)mA&!GDJb=#Xz7!vKprnDT^JvZ`aHLrdnQOypB=wb zE62pI-G!U%GDA1*7j9Lc{9%Sd*p%R}+|C7t4vlbmQFVDAxmTAdHnX`Lr-R{O@?S=B zRgv?v=(4wDPCSh)Z1Rm3lgh}#7AQmb3lXwFwL__}Z2SVhk=nsLD)`%*mBCK%c$4UF z8a_>UznZgjja1~3P5@y6t-Ml;Rt*S_kfHp&2=j=ZM38Du7K39+6UNld>M|)lY+zlW zJD$JoZ1JLe_>4u|PGq&lG<4^cRleebl|7r_bCd9^cH%Z#NR2bwA|-fKb1SK3*v3oH zwzFLR)uS0YqLVDW9?Pie!Wi93XT)ELa#&-|3OS)}UJvy{96z_pQy%``ifU&L8Xd!_ zIzVtE4V+sW;S_13xuvLQ2tWn_@J!CsrBO{U=^LZG0o%ZFax9VkyW^@f-kmTcaIJnz zcCl!Kmdt0>91(&V0V02!i{q*p3F54`Kt$r7-$zKxVbPhlD|-0hyBb4%Sjkd3<6MP| zn(V5UI8x3czg!hBaoatJ*koh^&=dkfpSYsjvvzj}}p>sDHDWOa&ShG&5mdlt@LROW)#%ZzTNkR$H;RKoE8 z8me~ZwD`G1$*LO56Yp;~ebCYpFxj;pNL4(Fcgx8*-G{q@v*d z09>Ho0PpJnz-!@DFE1EhOaB6Q2sPN;KhA}oL#5A5X>SaJOGU@^sRz}GQkV0I+0i#| zuEwk0pBn0x-?WOOM22pmMBUkel>f0Kb3vQh&rPv@BcL}Hq7N4Mt$?%t_lYCi=H&U| z8Ft=m{|{!0b)2HnjdMB|b8NBL@ZpczpFgiVFhxa2CySlli+<9fW+2#qEC=SpqoAi-j9k`W3V8j43Ppw@{Fxp=G2X2OG4qq{>K`Tr0&k4Bphl{f+L5pE-3>hG(9@>Gi?X=XI|uR20X z`t#NQ{Q0B8@`ge;B{DcG83_CD!_N5dNUi6)O5jWJ`IAYYa+Dig4oT3Ez5tN+PG9Vd z1&vadxEbf}@p~oezRN;8jhMam%+$-mS!Xcu4x~}vDL=X#f<(<9!W(z6n1`~H1zJad zEtRQI$SES9Ip^bsyCn9AcT1U+->DB?z+`tfVS5U_C9Phj2rq^d$@ zU{h!pJY_2W4vy9lHU{_pd1%uRKpe2Vid8p8XAdrU{xobVE3(ei9_iv^m7@4S3x-HQ zyW&Ybq@|dLO+9L#C_Sz0tdP%nN;x5bj$%kp8rTEBVjfFb7+>a1mV1(HgDYqZ#4hpk zEbaw^CBWeHAxEY;6xy-9+3OUVW0aJ#`Y0a1G2a0zN~*yZE-UiazH*&Z`%*(!=}(EU zp&mzO6hEVvjgCV@(QJ&jDmc1GD*lYnaQ!YzJ%C3dny{LUEq+R&dE4i9MuoO)g?w8B zxl%~ieY9m#Jw;0)6-~+Ecy2oATGsNDFQK=fb9C-q}o~T=|P8quq>T=x4cFiL*7xVyJ5iH$2_) zbp!q?lEAUW+4@^kt8hE*&?1X}v>;A2*@+;f<21$1yL;pVm7HFB3wru)v5HHagAn>x zvx%|uY>I-EWTx4?{qC<+323_?SUdUoon3cw@uOg#dAG_J>zLKRrdUy|zR|LRgg{>))X{8Z<2? z;gFqRXhwoLNdTg%+*8%HXktP5BJ{1jGi>`c9lM(WI^Xcf^X$OyS`#8L6SUJp)r&Qz zHgANZ|A49BvotyzR`4BJuTQN@!=|pGY4+P38XN0yqOzF_JLgt~ZmxoR!D1F8;aZK) zd1|Sq#QBi>KT}Z69hg%KD5WS>hu0`Yq3eHTS*5D43AeE!;|wBf2~6VLiv`{j`D0I- zEv7)H+f){+`bWtq5PNqXmN(zhojMb9I8u( zQhFV4fx#74$w?txShV$NXd zLZavz*^9v#~nQ;~ihYHkL|4|GoRa?Y* zsYY-65NE_vya6D2H-ZfcdeMzcdq|QXphoD?8LpNnB|nc9{~}TJgAldnj`frz&*6W4 z#GFi!)>0Pc0eAw~8tpu*q^ zhE9SlRXsz|>XeE?vGY?~q3j_WtTQTkdgTbet0jw`V*}pW!alWC=9$Qd9p)f4^hD23 zFd;m*4j;S)fd=@0uEC7G8x!K6)iWNQ$;#M$VHNtkU2s7}M}6>A@Yn#_3ePwIU-Pq2&36K97U<8x>_2eR71==0i9BGk4rKJk2)D{8Uu) zrvII+e-X849)Ze>BnA^0aM(`epEWC)5rR*kDYV>&0qxjeFbhDt15=;-hjL1S|Mq`2 zOyv4kqDC3O>Z%ap|1hLm^$v6=zK{4Ld7~|KPMif19qFfYB7VrzK$|#dC&7;nx{|@> z&j&q%O@Sw3{`hE8C<_{(qlUsxLiAl8Ti~9<^J|spb3^DY`hG1IdA7p%{N?T%>^Gs$ zgDD4_N#<(j2!&rG#V~4GFAT1hZGt|{sWvCcBlwBAb1E&Dov8jJk^h3**%{kGQ|cV# zvuY4SIxkO0KpdZF!Ai_<&!4fy`wU=3ZpBkwVnYqK?Cop&^IJASx&H_V<1i7@yHn#o z(|UypPt*Q7eyPcfkt!kQ1+umqud(yjDUlIYX=^aNaZX)Im3p&)#mnUnLEHsYod1=7 zp2I+ZFM%+m1*kpvuU=SmUQYq4>HnVl3~=rz!9(15XksxZHi-#vwoE%a+1!97pBeu{ z_}(N#I+)~NDZOjfFi%3?^F&+KcD{4+2s}HVa;KUj%HEqEg}x)ecgq0dAN+piBoe#!|Izh2WE57B5g)LKB-{B@PwLK#ba{0)o(GK2 zU$Y%&)~f%y&m`JKN`5xhJ7$1hV!%_jpP#XL`a8_A<{P=fef?zjsN&Wtn0vi!5TrHcSmLqXJwrO?lkOBA$igCCGdiV zss$mI4a9lpZ}EV99YkJ-B#sQm#hgDLd?vE0<_|g39FVD5zBB`Nd#r@0iB$9H`vl z4ym!qNI**=DO0;eAK5{xh}ygi4c(GJ6v?m1Y=uiNF64^$tBfNXA}`FOTzdsdo_;0{7IH3qDM7X6b;Rl-EoXcEh1 zi|hX0*mo@vb9YCGUgp3|lO1R1V=&AK^vJ*5HD{_P_)RYzlmBJBMg{;0ny zOARTZowpaKhv|PR9B0uT8$J--DgGC-K@V@(0`I$tgXvf3MSpNDxbK7%l8u{~W7H3o?Vh@BJ#zp_{{5Hxt40Ig8xjgK0nb`N-Sh>_;+0t>js7i4siTZl2WS z7YnTPALra}@hPD=OgQ5#OxArGe+n%_*GotT`nJ>C*y;bs=o}-nE(C-gJw%@7g7h-z zT_Is0xtuWcWNfa@OriQ#$F&skL5Bh)HO z@t)W~D;lXDGDBI(JV{;;U>afrKc`%W@?(|I0xaJ}yNk4wPaH8PShhvlI8y5mPlMZk za(LfGK_ItJ#caam@G{~FiG+ISGc>yBCqt5$HLIl*)~!HyNs^38F?8IW4I9Wm!qzbK zKaKL5qov&lACrj13PNPm%W4kVV&D^>Khj*?mO&e8Z!{m4rFR1vin{|c1m|)rto*v> ztuBXtApf9{d{o%-pFcseuQ7}MG{J%Z@$|Qv+7W0;>I}o`n^QrNHKRmOxZ%UP!V7Ak zy3p@;o^HwNScR2EJLAvNBt|!c(Q`B7i^hDWsMcTOjgFh@>R_NE>URuX%GtA!C5_Gk zQ-2GKw$5q;O}P(KO+{yv+C-&(9oLtGVU5}%&|+kfAxl&;s^HN9t6W9h?c3+`W#WuL zx4b?}xml%*<`QI6+K=b*sr+0Z1AP21|j*1zi3>wI2Nc{!P za;eS+W^0h05UWu61WP*2Z~Xf^KTu98#7q}5k^Xi3-5n^o&oYW}P5j*dPwAv#-cn~N zIak)EfwhAze6;!Fht0b0AZw87rF5%1{op&|Kbu+HHp}WRCI?c?;N!#7Y!_fyEO^lr zWIjNx2}%Fri>R}VCJ5VVX51go{%__qwm|4z+*#g~B2_C7T9HGmr+LETrcoed8+-OQ z2>z>L4ke?7Yd=j0f3hp+Fh83=d=4o?BZTS`!uqRtX*fqgq02Wa8Cy_fGO`#*SLZbX z^3uPzvz^d&12&+4fktk@;$sFDjf`RjhmvNj zDYxo6srq((C_LH^yqyV>U@#<%=*Q5Bpk}d@<(r;PA~sMJxGVMJx~{M#I2ykV!koct zyQ5oYDRqwstHJ@#I@EL>+SQb1^dlZ{%FYt%v>>7G&M$ZlaVbI!E|v_Myn)tVF!t4T ziq68j6v#FZR5~jF`0#pBy-bHLqy-!CdH+St%`UyHp^Zmq8>-?!pREQGcyi2n{2<$5 zFNJ1pa-Vwt`@3=gykd2To#B&(i=_Qc;NUS=_o=P|H3QgF55T`tM>fIXAl`MhxR29l)(XCiX+1+tA}%}Fv$Qu_TXG}38HqG+J4%rRL45bYL_A}kB2^(aS6;z>Dp zrY6n7r)&eqw6{d|*?)v8R)vL9pDS~TDS1>7|28p`WC$U)&?e!3rQh3%=t-=fhRc; z(LpZ&z8CuQ&#mg@Iqf)Qk;<1|k8)nV%5=VthnCeAoccdZePvvf&-b=pa{;BKOX*x1 zX^^G6B$w`9B&8J=kp}6OZjf$}MY_9L=}wof=f>aj;{T4%4)>XJ&YYRKXYO;Ziy$7m z3?P#SQlOw)=dn!QH&l?GNB=)yDI2x|bph*YRsR6!!T^Shm)yD#r*B$(!1@2#<+w*P z-^zm`)&BiYBp~F(UUajOr`r7gI7zWO!_qbJPtGWu(k$7`9-GokWg_4qK_M{qqhAk@ z%xcm|WW2I8+J^DdE-;->^gb92?`%21SYAKy0un^EMBZcVP+7cyf&whDAu=1B z!<`^5Mj(moB`_NlbJ6FUy=oPz>psW(JJBND3_<0n*s)@DhsC$T1)+)8Ra=vc=S>UQ zfjE%O;Pg#?Eeq;(`!8@xA&Iua4@T38mc#K4OIkAWVhv;eO$Qz&&qAfjl^XWNQHiW3 zZdHLlzhB$fTF$mo#kK4&3hO#a1|3)euqFRe5Duh{DaQqeTjc`&h6b( zZtaSj<^WO{LXPXlK+19PIt4Y4no?wm-p@CbvD9r7qx z^-(XF0sE%em9VzyEN37WJO_UtNilCn$1%GvkD5XyAbv&mAf2+o6y(YxGe;7wycJ`C zrefq4UGyjukf>V&-{x2};|wxLb1GecDw+yzMaLBro$SrYmu=0s#f!s>8ULj}{&%IH zd*iw(>=ng+1!eKOPW(!Wiekkj2a@IUlgra6#xxLif*XPjNFTeyu&_Ry!=W%*4VNg( z^O^C7+{BcZ;ha#tj2n{imHyqVAeXqKgrwH5m$R16 z0ESJF=Af1j1wisGh2+%DvuTn0mq00+iPR`Ai6jU-muTy9Nu+~iV6@puK)~*!yXqx5 z34j)@Z2{X}62^Y99{0vpY{JzxU&%aDSIH#wksBdK^>1N5MrRn!B_5XQVO@;^xU4sqEwq&05 zM_jGP*76acEn8gK=f-P9dY-*qHsct!&gMA~isBe>j&E-FouZCZ%^R1>_yg;-&eI2* zw-8A&NXA+_MVJ2X(Z8s9o4Be&{!yvwRCB-2ZIf~Gd;%3Q20wJ4l?iEW%+;_DPyhV< zN=sy!MAh&k0V!Nf0y&f;S)`d2iqpjcFyd|1O)~8t@6{DqQTJCvm>1k}@))C4F#6vq zUb=7pr@wKcrPKPg!Vu1A(Hu>o2fkP0Rr+8Y#B47V1QN~sm#OP96whdb<#D=*x8Ii? z_~XN+hZgG}nTu!f71F1hfQYZ#A@Fx(RLyP;F%){G{@ub&JKvc(@7Ra6DbS%pp|@e{ zz3bdg%8~_#e9TZ=(91>WOyD{M}sUIzT^0*=fPGFJ3T8~Ig8mNpr+24V3zn%R} zt&cl0fpdfgMy-E`6Sx02+?tk^_VQYanQoEq6BQlqf!R#f-P}{*)ZIjT;Ac|%Sueh0 zY19$Z`a5I37P9H4Pnb9&^2VW-71)(}-`P#~eGf6aF4M1g8p>x|NEgI+wV#>FDn zA=EpT?@nB2PYz6p)gt@H<#Ew?j^Rh*5B3H|ZQ3O@TO1mErqV6Gk-wZ1nf`iowU*sS zXxQ0=GGfImQ)#`cuyw@iZdDzq2g|y|TJqhllxF*_ns|5`P^i7(*LW{4mLx#v*Uyer z6>d{Om#X@?lBn!jZf~B|73Y^b*ksax(j9G_Fh8RlyIsdZIdB=nc@X1gI zq&tl}%qsOx>7!6zzR@3_jvHLDm%~(&R>~X2NT1^C2w8ykcpem6use82~G0bu0_kv>pUJXu5;$4nguDW-^!QfDb6Io zP*clSIx-JaA|yBBWPxbgrkG$lIw3p2sTnV6aM4VHQW5UN`u=r4oCzk*tdS!`CLn9} z7u_XKw>iy`i07IAGPb+_aEkmdiyjEI;}eY?(-gRzw91~#-{;FHy3D>Rs+-cXrMF@B zxNb~~Gz*GYra{g&mQWBk00V{HTVJM>LF0;od9!GloSZbzSGal%5?XUGsCag*%b#6+ z%0_5hMpU8L%kY^D<;pd_6mgKAqu_==EHkBa-n;|R2H)Wi$8iugJXarBprK|vY=_~y=uZPGGXI$V+GvfOv&2ifYOtKY3eSE!S~_ z27%Y;eZwQ!p9^+ZYk(PNm|M_U_X)UYxK`<3y`J~`VFQx3$${5QZ|iB zR`hBQ(?ZN5esI=B4zJq`Rv|x>`5xSa^x05}N)NTWYvf2}q?Shs_T>j9ivd+8y^&g@ zX$2Zn?s4%Uyr;zgjetwObVRekwkk&=`%!)WFi}hAo@ND=?wrMa)9{|E%OmcsV4r6S z11*3R0O$*@3@##n+h9VsVtQ4peow{Gy56Z=PejnEP`v$e+M$Kw5VA~<7wb4J`KmSv zvX8P`W77jrnRjNEAV3O*#$u`)%%mLq{T;q!F^o(nX zKZc*iFRQVyN_S6C2q`lxo##I#tNf-nn%PrxQf$Y=LG3{e2vCmPC2(kvnouprYCJkm zab<&(WY>~nP)DR(P7AFA^8` z%t>DR=<#AH?~S9{!8_xmZmC*8*0#HgY@o-Ln5Rg%ezy4GE{K=LxDEulnAxxq>|0YWUNykuGSig*hj}N#l zJ4?5%EXK@H#n?Zq#v5BNCOpnNNo3Z$K+V_wDvEZ~eXh>rjw8QW}Fw__CCu{*Q#oGJWTn%v!az zzE3`Vp8gF?wwhI3JYZOqI&*~!EMS*>xn?AjK?8Ce9KKIuO^L{Jj zd{xyVqlc%|MNrYA_OGHpl;RS+>Z0{__9NJ8JtRMz&D3;qyDZ83G~4BkZeu-fLb{beSY;i1TY-1&T6KPAvL6sCGJ0=LW9A0$EYvvv}k~K3wx$!-?)uafU zT#a4dR|v{z5loAmHtdPpTD`?qzLyP0PG$)2qSbmh&<7Iw}>xBcCh9C{|A?yPt{Xy%q}*71+R~ z4lHDwk3O)aEIB4^mmnhqrgNG8cLQtdj6KLYVqbm?k=i$eQDF3VL%WjTIa3oIdELc8 z?`@mCeNd|EgbTqTX55m6fW!rEt{n~mK#}j<0d1Z7AW~#ktz5b-JFzXrSzjN09bGyg5ShU{zAh^;D%za|( zDJ0zVI6$SU21bMMg)l0eV$kLivcke z_r9;>Sqf)N(9`x8=RXiaz0KQHNxXj+CZWi0% zPEDbzQNL%Q1I5*UEJpmE6}sDae5s#M0?#wc-QDEU1HP(A}fl0Q&len?a?ocENb&BKuH3i#5yRXlvE7# z-yn?o;-4F*-8@9JuH^Ofu`0#!TC)eO1z^8a34LYPnaEa?mZ7% z|5r@E{H{^CGenU6^W#Uq_Nij54ahmoUoe`4RuKT-A4KpmxJ@2Cd-Qo;@_t$^4HHKf zI2(#VUjBIW$gypUF|Yl#CWiDqT}8;j2M)bFibuar5v2-VQs8i^YBB)Jp*-YPcURI5 z_wl28-8FcSONBZMPionSF7eFaSS?!H#}E{|EX2RBLKglsx~}@nF`bTp}h8Meb)> zQy%!6@!KLz4+4O{$FV|2box*5Fr!hg_N(Rc#K#76&VljxfEPw`yA#jCNMg%#Sb~7- z#wwFb@S$-G_CiC$elaNpcGkt3q#KiJ14R!LXn;$a} zk`E-`aTlDUuQN0DQ?8M`$Jy(chC0Sz3|>07-CZ$zmVe6~*=uwkD$8XfVxFVlwtUoh zeP@s-oXxlvey7Nqa-a(o)$64ZE_Cc+u-=SVPDYMIj^iX1KraG4WoH?^1D;BM3p+Fd zQhYj@`FAN7Am^ot20NiSS1$+NVVIK;9 z5$k0pp6cDkAHv55)o{o|oO_OI$;jxR?n@36j9qKg#QnpH0ixf15+GcD{KT=cN5#=% zK*F0JY@JGAF$ET&m0l-W#C)%1G0f@NROI$;%v=l|z*;}huUWy|BYn0Rs%O`mU+3c8 zK2e9vq7Nk`54LkL-DKDTJ4M?f_QN6>^$BvYA!Pw@UxnsZtd2f>GKX7F9}hCZaHR;4 zZwGp`6Nc_IABl`cOJWlBNIoFz%$MaiY3<*ONQA!p)xg>owfDsEV}yf^25PZ00$Ztr zfBHv*NEuokui8KS9_g(+4d!jc9l2v;kC}Sqs2Xh{X6C21Y^zb>Y>s^*$bY&RJRkRo zeUcF7l1!9qbG#&<@UErWx62~6=&v`RBk3!nM{N$6Bjt1-M3w5Ejf|d-j+q&5m}LT( zLSx}c&sEGmq~``c|MOvV^u+AV#jiG{c)KKRp_a9G}0k);I#%e62K>m7^3Ai~OPMu#-Rj z-&Q9C#6`+#kPn5vR-{01!m+st8nD;qxRMcng7L(AT%*{tZC+A7wXCX?ypRc@^nL^U z9s1H~pSZ^RlSy-O#yQ}G}IMmxBV~{9xgZglP0}?HP-Ps7ZB*9t ziFi7GLh~Q&*b#$pgp)=h7f$NShrV9^+$RALg6%6^@0I~ZU zmbb)nHC;K0dSA3|1n5otp!YXDqo&8>BzD!-fC3VDa<4N5S>Zh9!vs{Z;JBUvhA|t!-wVfXg+%5p z%qanrE+0Wa`HLmIgnq)BPah4BEsV{<2X2YK^{v;f zxDY$i0b#K4PG`t9!n}+5Un8Uy2)HgQ{>OY6fPD_6Ci8C2>R{D3cuEQXy?8JuxlD`LC;Oz}-Og53M+(3d3K(^j zuc#E-EK#t&nX~Zc$sDoRsV@Kp$LZu}pW~F}2<5WuV^^l}Cadzu^}qhb{}3RZd&*a3 zaU1Las;qTT6bxx4OZXMnf7J*G$iCd?wPNtc{Mml$-S6_2rauEA$M>Lu6J@0RuvK#Q zw&@raC9-w&MQiA|`7dig9Cio)w2%={hzA#74L7RGy<@uk+kC}$nI7ftgdwoH8?Cr|#%81_34L4q{j zRi8&#wRB&dX!}V6NVp!d#$pC`I6*g8m zVYT*@^ej!YrE0Mg3g58O8gCoOf1%f7sCNCE(O$GXVc6|mJbk^t*8+>D^{JU;gw}e-5a1+1pG4$6`Y~%Nef%0{r!RWK1->;fi+nBwom7C;k z3@JRDImMnF_AuG*_(b~t_dAhI`+}wqqAC|T1B3m@|FN{#r0?4}V5Ht#TFgf{c?MRy zbyT)5JB8^qG%e*Klmkx}ukUs!z^w&|)Lu-FQK>8p1%_tkKHGwk@goH8KbnQt z11206s9v7mkD_~$F+YwYe~n5Z0iEtMWzolnMMEcrgV7>7120$=Gb)0!Vks(s^7+kQ zKQzyb*-BB*HxJhIMv48XX;2W2z84wtXL<^!Ts}S}Ei98h*Y5k3w@x~%cm;b949PTOA&m{5 z5H-)=kPGHkbyNAK#U_|brH!#`-w?~EmW^LS?{&QvoZpZuu2cKu=C6dJQVI(N>{pi& z%(pfCC?Pe|*aPB>`{u#)%4de=&QP_v=8v-k%U;S$DH}n;b;~S^LH2ZKOVY}8`0^Mj zUl;w}OXWJM7a8V{TdI5?qai|sqgW=KkGDB*ZC6W89+NwZ!U@k~^X|*gz3m4~(8~?2 z8d7{2U#;K1%VkF)n(cRD*LYtJ>CBimtvu^4Ao`I3T`mG6M!WPH?_F3*we1YsY&Lqb zWg{`@R?7Dp;Dd;2MS>%HM*GZv*yqyQI_w3BO=V8uJ$FxciB?B-uJ`DY)%j5P%1WVe z#R5_%d)KnN$=)(_w;+( zUr^abh){XQ_Gz6xmimxG`=Pzgk~_semUR@{A$PO=$5eP#zr=I&t@X}|YL6oL#>MoS zcLir_!B{rol_8@B2AdG0DFIUUuWr45 zUyz5cF`@7tWO{h2Z$9#)m%1juv&zc&5QAL```}ckNNelHT8EHjy-6th4TN&B{AH^Y zqqC4U{sB1m1wnkxlkvNJmKzZAdFH2XU>PjUp-22!rUDzE*DqbsdVV}!6rS@cABg5_ z{rWT??pMy=qR#qSTTtS>c*e|Y#}dU>R8rUEGTIRysEPHu5y)c9(LG`Rnbl^t+l$4@ z-7BO{U5++Ro^PeM_OuwUG>a@(kqYjDk8?BEP}V+Pk!^ANjCd4LV3Ir4cttCli-Hp0k3#Q#D_B=RHAcO zUU^Z!X_Hmbx%}6!cF9;~k?8}yg?8V*%QIcC*8<+5M{?x@*#T9=@mN)PgJf6wp6rF* z__wUk{}|0$J1>s$QVi@vK7okyb(pQ(XI<5@DwRGd1h<)=#h-3PNF>B#Yh*dGCC+Lo zVRZ~lTiN>)Tf%U@>f)zw{nS6!9(Rn4vu+<(ZN{QzWI^v0wTqr9#>++p2LYi$Ise~t z)(u@x<7RI~_o3&0@@J#%I_N>$Vrv+#Ox{bOoUJzYUF^~@9Hj|H{m@%aQQ-(&!ud&I zk3EVSo2ZX_@U6E9A~fL8Is8?uSJr($88ln-4wi%aH6`w#tw+&Zsq%w@cg%5EbwgMm zrTh!Dv}Q@Z4|x|yO7E8go4QE1#bS@NxYDrq9RUik^ejBIC~Sn*)~utI<8*qkY_Dal z&RJlRJz|s`b+6GYfBgLwK{yX5gkk!*`gAO>(w^j(rNaltUG-ht=Eot%SS)~rr3Y=9!&8Z@PoEo^!3#;OsQzkDUeF5z8N&1uL+e06 ze)^MkUTDN@J;^*AsHcmx3E!1s*+r_tonKrlhC~tZQ{GHEd8)l!;PyvQ1NK5&z9Cm2 zUZ0d)^kcp1K;8cD_S}Z9;!_W6mL1OM?@8%7(%D%7rZdjxI9)R6%VM6IWK1vgt7Nz0 zur2WVFMzLwtqBK1N@a^wsUhVeWIR@Hu$(#|)a{Q5(k2jxZ9>swyfv3xf z+8(coEGOHle4>N5mX9+N7@f_#v;;RSG)fX((XBGVs)XMz>*8(vBp;t9#ec%E49R@E z%FDzU05UZLYJP;-Fa^IZo}9(tK{AF9htU;|uYGn}zZ12Zqxf|0%o$s!pI8F?f`@kC zMJ=1K8AOurl#Z`o9z1t-42<9odQaLqkkm;EZQO1>r@(m=%x)lg9`}8lw_23h_NpPj z>cl{^_`+XJ$?WdM(349j!}~0Nhll~_H1HZ5G4Fl%epFoXdWnLpg`HpIBtsy@pWpWx zI80c<_h)9Q?Vr7^i?zVax_R5sN;Mr^$H)h+_4Zp4XO-k84pQD={YM^k5p0p3+asQZh4Z_wB-8(=xBn5c8z7b#l@dDgt<`*zXVaX){WI{lgxS7H_zfQ(rfrf z$5n8T&{D~_hro#AlMhce`k9ta&pJIq5o9*Z!-|)o+L}NG!}+*rYud_bX~M-phKm`xX~Ia+udirnE2k9iS8No^B>MEa#imNJ^Myt=*}3-{n0& zo)Z`a(5-|$PZ)sFU;|tQ%%N#5!rGLU(zlfhJ4=!jT?_g~r}l5QswN_zO1X_KYOgYv zh`J{E@}vg?r~=%`pTJ8lH?ra;&?01+&AR1%8;SJqJ+G~@iK(yma%O#jy|I`S-Ra)_ z+AEz7m=o*yCkwZyt5k=!=ltFL{L!g8=6O9Bl$+a}0Nc_9NxU1#r(sgojb7h(%qa`^ zFChnr$dE)P}Ly zKj1`};XM6k7w^r-6G)F|qr%8T@93F042Sth!w)F$lgSN}N;h3W-WyL?X6|`wxyU6G z!!%psA zf1A0|J1Md!$>b_mlK9!-C|eHGgu|u${r6uHcvJG3Ewqd*FE5vyzrq;u!Up-`GwDVT>K* z&H_0U5NftS+j^i|g*y0MA7yBY21hideEz=qzC0|3J&@5n43YPDD#Lx?^Q3rcU4ug? z`-0tNz%H|*_1MgoHu^P<+&F29h{d-244$E?EjluNu-ztfxaG&-4h86(OK?noIY)e{ zkkd)L$D?r|VNQq`;`cMsck=s|wxU2#J16IX&8?`f%(<~-&TyYXWUK3TpyP_+C^^kK zvoNueeJ&oc4^5!Bg8D+Oq*bf!rBKemj#<9apk=KtQaE|a?8>-*+TdgnnKm~Mt8Sl* z_?wz{jt#-Ra`N0wLG=h~NI8HaNB){tm?p-z#N-k|sLS~~PrXqtIy=(soMBlX&ebKs z`k!doO2^Sz-JxmHV}uH(1Z%xhhQR5!!9JAzgSqQsoJPh~o^o||&(7GOoGF2bbI%Sz zPo$3uToM%*yd=MWp=J763cN6IL~BgG%~EGg z*L|nzIYYvB0{xT9CRC(zon&yyJYI2FWhMkg-;E~_TI)krQRaM z-)F24Ajo7rHSM7+Cd@s*KX~7F*dkLf*e6ImVzs^iAm?g1I$+BQX-GMN_$?E93!K}% z>lgz^?o~(tyer^}?`jF9m8Q5FaNDL+&rnlN7moe)e0{u}K3XFUVLUj?Dw-sRre7<` zr(^eqqc&cQ}LIbX*ooWfivZQyuEe(y>@|u-KY?c6O@?+hh<_ z1Be?H0fwprkM>KzQ22;p!qg^MKn3D%6y0Vll(t{HW!kIdZm1nCRrnR0PH|XXNl?It z@KR^UY^nu;GZv%fA#Kq`GS%f1D`WZzJJ=ziai|p83+Caxzv=07g1S+thJ>@6s@y#( zts;J2ka;>2H9tJVIh{2|HDf2*>lXMDa9@Uc9wsqR8@rUI13t{Hb!uIA!ddK~a#9`- zFU1hG-ZvxgGrw9rEe~{uww?I{{nCS_nDUc+DP5yUVbB(n%lSf|I-WMgwuIl6FNSg^ z+h-nU*vU5LXOF$IWS05()!B`wFuzX*bGh6gXR`-LY+2<+D-9Y!Mjia>-FnY7?TVGt zl0iS0EL8tx;)Tjft?@(Fq4UqVvd?iFE?)Hqsp;29S{+o?iVrg7sf{(aHsi?lxHam7z&*Hj?CPB|``91#hClJ`2^El9-!a52ht z0n@%j4z+i81G6bB)`dU@LETy?qcZF3C0pwN$8DHTs4u0WX%ou7C^m4s%r8 z_X}!wKD!bxKoZcBG%dsEzA25Fu~E}(t8$qa`p>FAf8jmIcV+WLs3zOb5=2o)!q zXfnvuxH%_@+I&pbn@|9@_gm@@a$%_PF}C6l@wF7m9Qw-_#*!hsIsN*Y8znkwGXnSM z6@|MMpQ;BGq3}#6=3Un8i8*Ei2^zbPbvic~c3tM_RW1qqiXY2`taR4o&Z!PxQm17W zA8@Cx4B(km7V=d-jAZS|U$c*gP)xtqx)_-s+eYv&sgbbS+K_Y6_&eWG#^p6=`ZLCZ z!PXI(w)%q9uN;|dx8nng1kXSJuD1#uxv9qN z3mHw7xrjFWB-H0w2at?r@nlzOWE_u72jH)LHI;k(ts_z$ieWtZL;Dc6SLdQioUuhK zm3^Ojz*w>HC6)r=J>NGIIS?XtjA_vHP7`J=ni*OP>WHWN(|`5VNH$OMn&s81j(|j+ zvIC-zkkHUk=As3M7^dKOG2a72@Fezpt2vDR-a{^k1B|Aqf%0%{;5b`|H6z?`2iH4# zR0M;Xlr!v)b-RXCHRE=?rIDXpB+eDx?UG5@cUJYTB7rf&>dN)KEHFZqS+FV$J-WCX zNzh=t&u&~bsjfW0dKKPD^vr81e;(r-O`_XGUDh*l8jI{xc|N(H(Vo=dd)Rk7{GNF% zVR z9VW=EvF$G1ia7?a&eiN@?rYYHhL%KNVrq@RSh@_EH&afuotORTVP9jW?)eu(M53+CYP&&JgiosLp~`S zrb_z7&)kH*SOvVtTN1RsxlCDeVXu9E#iGyAyHfaPgC9eA$-OP-yz5k6f1TfXiA?^6 zc91o3O_f0X&se^7l#m;hlqbl8p6rx||4O?i5PKu7wzZh~i%bE>wp@o*6ua|L=NhG* zf_W#m!}^CLuQSha5|8FEH?mFT9X@!CggM*B*?tv!I*DrATJc|=#=R8 zCtx+a>vVSc+o{i#JI{vD5}`btHEbdfsj4&9w>}o`%#91zH|_buVo?=$$T!wQ0fdt% zyqTK8dr)bKrWzYjov)d)?s>8;h0#h&4VOJPyDC!WbEj-(yQJ&?zQWpA^D=sAEdWg) z@Np&kVUjBAF@yG6*Q(}MTOn(6Ok3w~4lRi{i?~Cv!QxZzD!sy>{N&0Mv!?VWc<`gA zyPy!GzG;WiZ*kbZ84k>E=Z5eQIoU{`I7XqcIN@vnhQWgrUcpCmZ?PK>hExqh;!YaP z%23%cSlWC)wH(Y9VrL7w*u5skwLgKq@r@u(_FH&HWyFu8+gaKxTR!Y@f@Q-) zQ*`68DoXEl-mThRomnoMlDG!3Py2INNXBe0r5-SFpI~~zRw5O+Fir*At6;BdfWZO1 z9mm?#OIG%6-hqr;gT1Vx|3--b5g`WhbDcp(aQGd0@wck6aNfcfv;4z*$?e*7;#WAbR2UDL_?mg0ryrUXe#I z>kqdPYmqhw`SYw)BWAZP&#CBG49w+mF9GjLbW(5WJkeFv;vUqC1*al;uTnJ?>?#x1SmZzy7O{8NiZgNjx|s z5gx6e2=Y5+*)=fDP-laD0ptv{uH|c##I;&ZZ~e3r4D-vZn6CA#`!iF)w@j{-DM%^nMfBcUpQPbjOHw;3RQxm);ngeQFj%j;{| zpeOc~ymk2!;SIV!>>z7nPlKc>6=O4XJcJ>6&gNi;mCHu% z4lvXz^=ZDe6$^Q?mO&$3U_|eldNW>YF%^9J4oNUXx|n2ETd?*z{K_9!gg0tIXQ^Mw zj4bn4PaYE-#*$AzwAxxn-XQznA36X*g0><{>C)CF(hf(r;i-C3a^%Ey+P#*SJ1Ks5 z+y>fRpIDAY3QeAj;}eKf;L%5Cg=v&dW8pr&E=tYQnz9J{mD@y{dc6m%aXX`X=95Cv^hFytUSwS zv25{G%phvpZ8rC>Ikc2ah)+XU_CMYPmtVZv^^*0>p_-`Oy`yps06$xvK%p%b!*l7E z)cjhb4G0a-Ec)p&j4gvXWzr+!gp2rK-uBgiTSa0T`>8HZFrFTMgah#0$C6-WZf#yPk!s)|) z8j26_W2?o+Hf9xGCJ$_<5>T;A`L{g>&(g`eT&Y8_1oBz*SQKS{)T|(Q-Bu_3XMz?xeSLvN*w_OZK z`3E@VE$oE0EpJ-azER#U(|D!WH_QMZc|%eX_wjxCotSmJuKcLU6ViaAXO{Aa8_Y{N z2w)HVj_X#!TT=hsOE-!_kn+ydyc~YZgwyzwMy?b)eFUBDEvo3^u_6d^9f+cz0h_D4 z*ATyZp4s6urM(aG5jlAeIMz3Rw0aD1P8}BkyxY}Xc|iwupv>jqx#D{T`|a+Bc)ORS z)tOM$`PKG`!)SIJ@uZqts)Ao!xEPU#oV#1UT7#5QEdd5F*h(T2qBsfVS%znC*EDf# z^cy3$$5LM1GRGV%`zhUrUpOg^+iq6J8Q@PhH!D5O+=s+r62jlPYNZ%kVjHmJ_{xh7t#Z%>gnj!Us;EBpSqlg<@5---&6=%O z5tt@MG`nvnOSF*eSeo=%1*hwy!Tl#ssGIVqT&fAt^rhOx*Xru`dikQPG(vt^UkvaP z)NX3+Pg$aRoq*w4*9pOg&XB-^Ft(6?qWXNV!py#euw(AmA5^%uccGD{n|J`hD=N=3 z-RiZy*fw=rUS;gLpZabrbVW^$4jb+^UxdsZR>NS_*fX}p7Fe(h&lPnLv*AO-HzsR4 zm{g43+mI!FE*SX%{;H9=@$Vkg(``BbO56$&kmzEG5vzuAKq6zMcYlLxyPvI65!52K z<(R>vIVGR*odz%^^E7r$`FEWCZDF%2Fv`3;2H2Npt~QN7CH<~-va)N3cZUcVa#3nO z6+?Y>V2tx~_QRFDAo7E%t_@y#JvT7f2lz)KzIfhz4ROg&f~H~xek`z7AN@VH>fIN{ zSx=-+Q{tG+3G@b|dK{xWo-I3s;HAuv!G{I)aN~PiA#6+tzG_|U|1h$Gf5vcx2bf~HDI z;SJEOn)nkL$pR#9PERW=wvE3CX$n@|G*WaIZ@nS}LA*{kN1^+%X?P576@nlvfqBbt zaD37EyNCV{sIG$Ro$BV;xpv}09MgQU$K>tVZGl!|1ntW-Z@60U>@w{g&x8!- zF(C6m|KC++8Pv0R9&ATLv}I1~_EFn5*l`jx&}yKY_zVAw@RTHX@O)ZJYnUSFL` z6ctM$bEo~u(?4EZ(AMr?guI@gp@2uXU?#xZOV3K(t4IKw!|XDCi69z?Fd$Fn>Gz-*gTkJvA9m}Zi>x-K@;`g6TCPZU-c&7r_5_xd z6jZ=S*^`}Yv6(z%X#=kqwu!i~rTv5E%%b`16_K}M7ftzOtZ3uVoLP-XAm~e#&!K)l zWv>WqZNmXTgVNn?x&C(GmzD-_V;4p|OhT$jH!Sftm7jhskYaSb5?It5bpQPQw-V;l%+(jDzVU|v73#nmQU1MNP7);y zy3~Lx10tGgXH;Oro?7R`Z~oH1L=bpt==J$i0IWmbMt4xV9=727SpVNjN>7)0G2e{z z$10*(YW@Zmg>;U^4z0-(k=cMKs3hN{GhvGmt6z-0SV7zE7cieqbF)S^@#!sf;0KSE&{zY{@A0CfvA`K=l8ba`4?Kp&tn{V}X)2RH|ooi-H-9F z?L$DU!Vi>0auH?ucH=&c`+sM!hDSm-x!(hVM!u1s)`DQJo@_0LI&wH({J8Mw4y6S6 z7fr@m6(C-vxsK@PM_uPh@s~fyw9Q*q=nG;4(_6WWhwdg)dCRvv-O#kd?mq>^G$z;j zJmY?mOzesTF0qSA)_kN0|NlhnPLPA`XJxjcLyi{QU12?XO}h9bv%(cT%h|6K-QQ%X zNwGbXPrI)e_u7uTjir3&Ob4Q71Lyc9s82`{yWwV1g>}D=ta)Gh>Y83i4Tof_f?zLa zT6Z6Ue@lqFB})cxoc^jNWY^*HWi#;l@MB|^4vIta+aMV_f^Ayh<~frjCz-;x5OxY+ z(I)Xa^?JubdMYDnWhC9wHk;0Xro7`eY$~@>HBo(vYOIgFnJ#I2_x3}!;Q&;ARvOKO zH^`sespnzMU{}aU{{<|%Jk8O*G-ZMzP4}cNGXe&MAVC+uXkBw{oz}|pG?-ris_|mtaL)bbPsI{GobEDY;@)q-EHJo(j@fPxN`)%;wA^4q}=TzojZ_%9j zv&)DqgZ}bYkr-Hun<(r9J+Uo4Y$;5B1Kg$i#;slR2N8cHAcD0MUck<}av{HeKU-HzA+BnCCYGqmKh+nb%Rii z&*$w#kTM}Ilz7L7e_M9^hX=^We~!}XH>=Ufpk0<-bPudJ^&IA|rbhK$Onk?o$&8~b z{wL%G282FfSnmVMyP7 z^Ywace=k!WVJmU&Z^3V$NLr&(FPyTZ-F=asx8~>{04#gG+Y#9;GZtcDGa$gWx!tQ` zCjW5F47J0gkfT&z7;*f(rH`ulAJO7w$@1{S)I07pU0N~eSJrUlxNSd20_cW{dJ?hH z^JOhTa*?8R#&wfAGD%4y3 zhf%3S=SoDae+@%XFsV?m`x+UdnCuk-mhRF>V|FPm(WmzOBZ|3|7GF*6kiawn8(wIs$K8@UYQKV~e+D?YEJ8IdZl{e}Acy#i^zX9-UCJ79UawE+05m7G3)4;UEEv0 z&^Jx>@kxAH#>)>ldeSYA5Ck9w1-*bRE;Cr_36K2J2A5{ITTCR0w(NFt$dPwObjMRU zu;3I|iEz&Shwl}-rjeXs1L2ja-P@CeB(@>Q$Ic za{VSpCY6HBF1sUkf$owxo3f63;g>(IijsV;Quj|oth9H@gUx1K5~3~Q?j z3+gyZ;Ur9>ebV5|^h_QS-H)R-C{NzX{B%Z-ebfmaoc{lj^wa@SK20Bel$>;XC=$|0 zO5P#e(kV!tbW7(^($dY*NQ!g{NH-|mlF}U#-}8IFKksIDpV^t6*_oZ$S$aj$*Xom- z>ly)4ZZSFSKY8EhRXfIX6^|=oA}F6CK)})IO9qOL;nzm^B^`wU+?JY3KueL>T-KewwPU=Gurp8 z-P3B?Lj5<;sJr2lFL4$QWM`fNui$iM(K4ZbTmG^OS(sPP_xle{LN|<_W=mKia?Y1j z3&c^p!0Vag9?b*Sl`AgF=dW?GtTl{<-{_X0Er0*s`lPSB~*0`uCacJ9{n1h2eW`n-Rb5Iq^8UXZhe!A$zqHuCA!Til{TjXxRxGIV9*h-oTb}_*R&h zl}Hk^4$KV$>)gRTM&Is^wbccf9QCAxCui^biL9>u#AzdY#1?0-2PAn4aP_pUYM$Rw z+(X#~E*drWxV1~Ms8tHNCv$v@!u?`n+&c1dxS2HNB~ns>Q~vls9D-IUJ>_B}Dx}7O zOBNRxey~(bbaJUb38TWw1a89%gCPjDf zUb)}7wAn*A>G>`M?rIiiE3OZEH!@R1SF# zCy|+Y$$H6>f2SHQJ$!tLs3=u_RXP>7gFCVK*xE;%~THzPWlEc>DiKhRBzz=i&S z>7o{DI^4TXq3pbZXDB)zZQ`2pf|hp(S&~|drU9Z6KOHf%az^uq4q7?R5|uj2c?11l z_MJwrvenu;BfAwM#S7vb$#bB0&c$a-d%p;i-IQwyUcrslzSD*F<{`w?FJ#_z$4|G6 z%E_jcMd6_Q(waJLAp$%0_M^|ZuiRE$Lpr(c=SBwX%8A;n&rfXXceO3%X?kV;irnob z%&&4!fAtO?Wg)pnW^9CroMz@UogL&yIa1~TdlB%GW?K~20#YOmLWV+n5nAsndy_(| z{9*4?N#D;_MB*A$^u~86_;^WX2|E1R6H+&Ahv0vwnJXT#l)g4VC$k5eHhFjuHQUGK z&DefFV z%xBGYdLAVu?4u^2KHKI=hb_jh?FGPYY`KOBz&@V#CcoRrYsBFEb!Z5)pLGdJNvVAR zxG54t6MZ5w^j@$jU4iB!&vnDfZ=v|b;j-~sY>d7@eU3j+Eaa327V4~zN1844O=R~5%(^ct>S`iyIe}f z(GHI|`LLc}IZQ5vVbr^QZl#5?N)?C+Z0cx^sSo8FCHPh{#VrKyDaf7M7X;P-)3$9? zP~S{=fnAP_4fUK4?mac$gvFy^ER_>6rt96}893Ln(n|n)~ zO@9PWtEL?5lHX^)!Em+Mhfjevg9&fCU^D`vXJ0w(=}5wMLa{u6JDr{V)^ zr10dM=RLuH8nGC{WNg?g4675m)t*v2TeeIbZAD?dilsDO7MjzrX%9@CfD**+HUyS( z3rnBaM;?B7@Z7o@j;30KKP9o{rmtmgpcanLHfL@s`L{^Y`(5B#GN*G_-Oe`b1jqcU z`NZ6~f_2B|yB+7qZ%ZfquKux1huYpd|0HJ|V?|(+mW|8T%_?qA8aXKb*Yl1BybA>c zIBSg$>t`L@!m*wxW&fg+j`Sy_6^adMTW){9SVjGoDE9t4>@$Q=a%W!t_`*v2%Z-1% zS)w01YWze-O3dC0lSaedl`VT$Q0QFS4BwMaC!>c8Tvfhttg?Lo`8!1O3T{?QzvONq z2==M6le{vT>TR|r%xZIF>xKzUl+a*W^zHUw8zIJK+CD1TE5zdRr{2IJKDmni8$@&t z0{mZ+nc5ED?M<~sRBAZ?ZFZxmM@Sj4MdfsED_V+u-RUy3te@5oQFW=)y9?qz%{+^q zPSt1l#Y0*GCQM)akcTj3w^_OjL~5Y~F7>725B;u$sh)IAjWojytew9;2+E#QaJw>{ zjC}n0a|5C2XZC5GUq0d9ZnG$xd(-vhGbWV4YqNU&yM$$=%Qgx{$8XurKo)tiG*6xW zK-)@4Zj^^u4KRcF)u(q)&7RA|Gs&tyrRFbR(b$wxxNM$Y{^k+9w-FH&_BkPyEXz~~ z%xvJ6W1D{9&bQ>7WAD|#{4M3(p1^Ev01N)#>~I5D8|L>8H`z@ZBfpC@>#rYb(*2@& z9=<$Y&&y?A8D;D&=-YXqY@$S%WxNTmavLDUTh`x&uuV5YLe8HEmC6<5|F@L(Wdmi^|#V!LP56E@W#z zMm_{t(Xe+Yb^gE@gKkV9qD%Lbj_~S;yW+8K2avxX3 ztUK>nGjp7UT!Sn`m79KG0SHbaqOsC@K4JuZ9&hBim$i418+uhadu*cNviFBW;$yji zPI}6zj!`$P#BH~ot~2@dR*<8dzq)^2T9)4o;=XVo%GcU{x4AjAD0zB%t2dNT@pF{B zc%yu^*l+pt0a>L!k8=m2WXxsSwTRR)OTXi=*KjIXb2h|Xph1BL<@h4#_-1R~4 z0*NXCBd^<3r|sR{SMCI=5^%JLM2SCT4oMV_X{^1CP{)8W#!;sJ*MIU4p3W^Tbt>I$ zY+xm>iX8487sctTWX(!INEqLqvRfd`(sqyKOHCuuT!iR3=#>-<$3 zc*#SH)bPEUiv8b?zISQGpMNK^j)BMT1TRRmzTaO`MB5-lh zyoXZMsZFIoqyt1Id#k!E8!Nq6;~vlYsV34A*{mSU*V34@2F;|OT)Asp-bkWc&NUz_ zCb~;Eww*M`NcsFv^@}*)l?-Y|Ndm5-_sR+uX>&`N5}_XBy=&$81kTy=H!1H$P&wdZ zA_h0yw`E%vD2ckBF1`4x7t&9WyaK9OTi2lcchVfs3g0s4Fa{0HkZz2TlX1<_6Wr$JjFInE- z3>ApHw`M*c`_-WT@S5BQsLfmI&l^@a`MIIV5jC7Ro!&K_x8d|A<>AdT?7D~gi2;kQ zWSrea30Rrhj<;@M?n!U3;~m4&8UATQ#)5>CY2?FMMp40~>Q6-9AT2$nOMR+=Kt_LR zL}+cP#^?DjemPhIO9owLANYc&n$BlV0N+WP-hgEmork(N|2dycW8l=67@H`ZHS>#fRhRwP^n~I@JDbkwTlMHRNsXT?*<{0|Hqs?M_ybE z!esm1-=VLvZvQ0RZ57npd{*&}8v2RoJR`-vJ0|LiO+NG3o1#(C^CV)zB}0AG_s^QZ z&^Ry^1s8jrkNsg2Y)G>5Du-X?XQmA5_xihX;|52QS1nK9ro1`pW5d@sObeb`x_a}F zD5S9ViC~TYE||L}r<;}Z^-{&o)Haw5<+;7W>o%IT2~1lg`1T+3aAk7Z*9m_}@?`B} z(+icCrqvUt+L?aE1IlfxCc)-&dg}DHB=OgtN<(In zd*WX|H+xSH@)%d;1E5i~oCTkLsz2OrP>|_jk(u(=cFHVa#O#}if}?3iyss=1hGZJ{ zo{jvIw8p{6I^g~E#?<-rS=~#eS|E+zMojvl#{JWlNn>PR>3I}KG3}}X5*l?Z6!QGd zr9++LfW9-6t6;dnyQjw75TuwkChE{h$Xfa@U?}~nl1=yI?~TG*GmzoRP-{7)01oa3 zEb_6Rd{C%7xOCI6@{^s?By;$-yETPvw+x{#*;iWsDT}MrF;X02+Ur7!VTk(_nUXx= zz0Oh>^=|_qCkUQ;=oa>}!{gi^Z|jQmssO{6w$lH;!GCzIZkCZCt}Cde@H=3Qp;hNo zozfw>3)z~4@=mPwMXC6Y$y*!SCntKOX{PWO@4e} z8^ZVeM^QUP+9yw4#sHo2?AJ(1@-LC>D1rI8F01x7ZK#zr3_26f$ z>tBMg!dNh`iC+Mo-s&mUMy*peZPv6AL6!c*=MzD(=I;4hb+({t*pV4QtsW`h;bl{c zN6Jp6`$_u;abghR4&5sGwcirqCh2gbDoZR`uB=aFDl>_cr}H=cX}P>Rd8py2Toic9 zFP7WE)A!`LZC3cx7vL9TPIK^hZEN+u6?5giOWEQ2^&j7N1CMqI2}a0=f2f3rTpSke zix0JXA~2K<+qvx@Y=)v5S)NgTU0-;Ar{GocyfGNP6#?R5ZS= z(8VjYi)JmmdXn)_>Wh}!)BjiX9;hhC3+#eZG;FK_Ah#W8Zf@DbWBAs|L zE%w;JCH=uQMP*exT9+6(#ee>9U9Q2_(zSQajEGEc5%)#)`M-!1{cCFFU$2j%rWyUd z=$(+aO*Zas{q)c6SopjyzUAs&_}lGw>%a8S-G&9*ytc0Su>Q`AtiwhAr?2lVsQ{JF zQ?K$f960D__N=9~IQx9)%9DxO`LnLk&QL+>3$Zx#wWn|_2O*|7<(|7b` z`oYY$-lfw9?ys~o@?dYS!V9rXqo!9ht$z#pzig=A)6vl1_Qkct{BEE+}e$P;t`tAlWvM*5*>xQ?-&f$;lBR=;zh==3(ZlJSQFBT5SHY z#V(7#%;ooFkzZJci*>hz(TC2B$}bvqzM9BxYuhtxo>=3BC^Ecp^TcoxWX_2mSGTj^ z*^-n~Iq!0Z$n1xulz+$O28$0?o4)7I*?vlpa*mr%rTje^ZC+2T%37VAE>mRaP5<-K z)T?5yM{Di%!Bwxf)aQEUtI$>#_QOA1{!-QlzqfJ;>-E)iOJ5R-(bB{3x=)HuNw-In zOnU0ry0bhEGELf-_%87!Goc%G#xhMgXY`-{IC;T0`3hA(Nf#P&DreNzh>Sgr7RYr$h8^>buT>Nn)Br1BFwr>kAFO>NLax)v=cP< zc`N1@A7m1G7X}?X|K_tVpW_d8EZF#7G}yz4G;8(5Qv}GG?dIBQR?!xQ4hPyFTTwsn z<^}nS@76{q3?3WR#)_~6RVi47wZ<5mo*L31SX*ug)q^ae`!qbIy39nr);K({-}msl zbMB<>SbuHZC=7smMtO!jWqZKV9q zbH4B5GC$${Ez^=dlY*(_lc6xgTD3AMXyudXl~;MLA5n^_U|r9s^#^Nu7k! zK!x>z^356D{5u2cR&8?Z!Sp;Q`)w`SCxqftYVi)%;{?gm^{xv7FYJQ^CZ^tLe0YwZ zw3D+uZ*u@#B~y;oEIw6ZzzRu0%u)oY!6@~*t=giRHz<#|{N{he_x%;roDz-$B3f8leSWd1XGc({w^$6`?WLZ%?}(-zcEYcy=y&fp3df3 zHVY!@vU+&a_ft?7-(tTI_MYGa)>@DBIj1nYFUgD;skyW5#@ol$e&mQ?uDh4(&d-Q7 zo)+2B@!5XK$5q66&L2cmMf8=MsD48&qu@haalv+q_H+Y1-FSwAsEB~;`gKGDhgTV# zIltrJI$}b8Pu#@L_WJGP$%AQ2RoY%{{9#2j|7K4oW+6)A_R`*3c+z28W6c~v;wzH= ztc0aqoYk3zzpSLS{$n2xvn4ljymL?VI~m^=KD->d;Yv?p)>M%>h61sDBw=n|c;=0e zvd|k4Yl3yCM*Hn*Y5Lz92}{59*VdaFugUjTt5w&3KEIQZWv86+l}#b?>}d3~?7Kr1 zmsqNf=DYSvf!oV=ahAXL1Wxz8|8Yp{CAA*Vk`w zYjwB%(KM(JK8l*LI-zp(4a#hHjWz)`+xRIjszm8Dj6r*ymbFO4C7&0nTAN;+;a zjh=%*hwk>Tt8kF*S$xy@Z(9M`-KsSX#X3%ffLhn;{t1gg&4&;L6cGV{J33H&BVcx8 z@E>f^;r8cyXD>;X@o(pX5FW5Fo%Wcn{;rLO5*TEEHL5EH3@PpuQ`iw04tQXil1FJ% zcUmgX@(4kzwQ6j|-%~Ib;X+dV(2e@EIN;oPG6@~D`p&4;9e9`Hp07VrdKl{C|Fo)# z&C~9GBML0N7tX;JEyisOaSToU&!@9-=Bub3I$(Z>5s;#s^HtDS>6;ebM*rLTf54ps5KN~vZaa91Mrn&r?C(PE}0G# z(TD3#1;96M`eqLbqD+_|3Ov<%M{h|Ma|izC_7E9&mke6MUv0`MgrER#1l=7ygq$`H zWh9A?v)0Gvp&V1Qzva|y57b0^es{IJooHvLLr$xpaEi%de=B-`92I-{H+w^Z;25_H23HRW{Z@?0UqX!yiI&`uCmfpy* z+EW9TKL*PA&8bMh!^CfWQ^7-C@(eYE1CsQcf9i*Cfjp>?`S}Fy*Qdsve3rwsbBhLw zGito@Udihm@uV7?jN@eK431l%YEzd!U|>&01L>sFf0Zvrjd~Ld%PA82w8`VNBL{+;R#|yw+vDX*07VO*V zv;&x+$-kRTYP}7iGEU#*f%u3NEwk*L*aFjT5IQJsNCbm6*7%G02hC)_eA^Nd{<^qA zm1lzol4P37zktP%SW|ILKsbfugN{$}VsZ-g0}Jy(P}J^8&W01Mh_A>;Oo}Wb4$cWr&Qn zvyW20b9&#-Ab`qSRbW}?4>x3BJDd!VAj3RWVruRx{_Dhxo1;7C(#c{VW7dGV0mMLD zM44rih}r3|KoLr@zpR`LSzb*1EYSo6r^m%v(tuk(i^Z@qpbiup|F2J&3!NcFR1G?H-6m8EA~SuONoyfI zvhyDbLMs}hZnfv~j^H)I4Lo?ZEQ!pfL+7MADDKC}t4e&Cq&hxJMR+B2!;6o>?CkR= zlU){2IOp(%+%1c|m)-jpdVorE;kcSoXc?VI!UjQ$b|)VBGkV#@c@eG2p|<~_?&eH- z-+(6RcoZeLeX`Fz=;QsdF*(!~cCEnBHsf<$7W)DhP15jNGzoy?V4cec07NPL!;J{5 zrOf_##Uj2@5N_NO}>1WB6Ky5yiPzdoibKJNk1>Kb0#sQ!&$6N-HjrIj8&H(!I zfaoP0UD1=5&5{K%Hyx!Uke&ta2pE)fpDHdx`;CJ=z}|657tK zN$9oO?WYEkdW4TqCc-1LDqv9F_=Ej`4|S%8o=pCCp8`Vop*M@JG27@a7KjJY_e$6S zYM#a?peP3v%r4)&OO4xXP#)(8gMN@*MV~8qW`|nh0%=jtPW+VmKhHaT5CeftazoE# zyo&bo;`L;RaZEP~L(Xv>H5LO&_Rs=aP|fVriH-4BXr_t7z$Te|W!6-oudXh zlF~bjZf6DC3Puln`62=&bDbFdG7Q^qS zVi_?(;(#1}78Xp5zXwp+2R6T<`xdoadk~;Q43(T6xCc|=`QKU#h`PqK$E`Y5r=<`g zcpy>a8&5&X=y|FC2lbmCw&3a=$3Ja;g$BxWIjnPDCY0Iqm!y2@55YSINX3_vw?_>6 z!K$x{b<}Hpnho=KK>;W^E?;_D6q+DNNO|vG{E;^C5kP?^NG+fbiTFVXdNOr8eKrQG zkL@)rZDIgsbmp(rf7$>AHalT`f44V;60{Ws>bmxofQCbRh{e@=sGoB6MF-6)d+*_( zZ>++pgI{jP0P5;bdq&(hZ$}mqkdTE5#!+xeo(~`T(CACp_2#QvLyxW#VC#ts)JJoU zqbqo->&^*i4xFK#;I+~nDnXlvmxEvIq5Z%%Q-*KkW{?m5Y-9(A=yH=N!iQtf12%ipPv7>#F4?0S z?bm9cQHI2(odg;~fY6!nt2>Wt;ahxI1dKH)taEjz5Mb?;;qr3*tqI}fslBy`9M#AgbSXH35Sn5dv z1cx(0oUc9c1Hh*Od)@FerabZq$~|5n+}|($%-7b3wI@rclmhgQvG*oeJ&mucIa)DT zEv_&>ibI@NK7|(~`7Tt@Vj`d`jfNs>)X@g=dBTR{YY7Ixj)AGrznB9s9Xgl?Ndq>D zGK{2mF_;lEpx~nv4F+H8aebJK&XtoLkc~$%8P>NTrV6Y}sPMc=1&eTFdapKxGY~O& z68_CK@f#LUPn(|*WD+V+4NIhT-3^+c;`}d%h@m+~=07E(1;8?=bvP8BG*@@;xY+|R zdsuQzO>44Kb0r!mYSL?_cnaE;Ve=n~Zh);dVbyP>GDT@L2LkmXmK1@<5*@EZ3BMk0 z976BRg?+;pEd-+`>wr;zHsx;u#4@JVFdAF}h-LBAH);{<86|MZRuB|Wsn*v)k}swG zi<(+}X>b$RBv^h1Uho7yBd~T2!~pGp>rzViJRG;%U+g-pT>|?v$Ka&;)_cJKfb&vJ z4W#%9!T2Bf+_+|NOwD7tzvDlA_5;{J$vvmfsPx2gT=(yXLn6TTP!~|I78!cv0$@Ao z*Ak~8R1Kum@=L=Qu+1Jjw`Gp9WBEDl1!}av?%em}6Ad>JjLyhjEd-{eEBEXh zq#6YQRSK)Kjo0H|d;i2;eRz>D}NrlVSkoNdTIrDh0 zS8q`CA6E)cK+4A@GIYBP;1%oWRO7X8fLeTtF!SZZ6^xnm1%Z|e=~y#VgMl};5Uu9~ zRamCxDUvjMREky9T>^NZSds4s;(_(b9_x(D1|FQ?7b4R7xx-s2|B0ih-TI05BtjAr z6eckFezOhO9m!td{{B}Tjs9auhMs)qypx5?uApNu7~nVFZEH{NhC?SX<`}W<0;bv6 zgxu~+dO%t&;`N8$Zt?*%4iNk(wuhV-=$sX&W)|y-{4a+uO57QSDX_|i2jY6;{kC9= z>BHqX%PvPY(2?yqzd_`w&J#fVM6H}%zNrmKQ+a>sm5V*G1e7)L*EwplzxO+xgsc2P zZgVlG+GcfoqQJ%d#wXO~# z+BM*sbyly>l#x9;u?2#F;;M0jYTa1njZSzf9!e#*AMPe6iQqMDs-K6=tl z1NJH*>XNuKd7!#07J)d25&z!EdGXR^(&%`ce!SING(0=AIn7ycny_zQ6N05=^O%CUSR_mjO*d zJ<*>0&18vki5;{mqraUKQLhww!#NqZqfSo*M@txg?}QFuYPjc`fmkxn6TZeH3^(4q z0GL$72BWo>r}zT-Q7A({N_My)xS-mNvhoxMOOWsPJ(YI`f#~ey-@-ImKC`^6!ZNZ1 z8Os0Y3!b386C(`E-URa7d>D<1YfL!kNzof)16p?SFYjZprH-20WizH^p< zAtk(P5AO8fM+GTV7z4ORB%51?Tu01SJC|r6 z3L)bJ{#bj5!2O%_P?)L|q_>sJgt&%Xlm*j~utM=?A7JG;b7RAV{^Xg+PLCaPbq2$M zqO6P0*Z;uk_Y|qo#h$=~_NxDbsVN!*22kSf=7d7Az$A0p}H zD>0nstwjx=y0)Dj=j@3ih#;P&|DJTRrC$3q=n3Y9? zG9MtC0!F0swT)4xM0o)0jhZAygzlY1-*GNP5>Ap40>lbNI#m(l!IYS5%GqSEaewC> zc&LG)S%J5-48_uOgRvj`gQ>t>8Dh+q#5I}9e_U8A2QdHAspLaO5cTL>>F#?W9wDN=b%DCetS64xrUfYjP-x99YoA{fF#%$RGL zNYTg-GF~?st_^4b;Yl4ARr~Ong zXcq}!lwpnXlp^U7_QD{@(A?;$YqNS!@EOh@7^L^Crcr)}OHAPt9SF=QdZ`;AifZ_(9y?HrGe5U;E#1?a*U_D!WK3*6r!dhZ}OR`BDMfwhu2bLCt8~ zIrLBpv|7?3V6Ynh{Esm8C=|FLlbs~qocfmZ`oXFBDOx~6^=z5>e?I>(lpYgRsr8&0 zrGKtbo`$w+T`jJ&s>RDrB3yzGD{-jnmoG8bBAPe=gGTY)l5uwIN#Avz@x{S>z;KLd z%kh^l3vPN0)JV}Cc~AfN#1Fyhk0FF+aiE~_@GLkpinyGnFoPJBN&5FI7~HB5`mL** zFC6w943~1p&q5BrKP6a70j;JU>ka|;f?@Uh&k0TAAVK4)zG<$e5%MFY64F;pAR(!} zQsTxs#PF#iUjXa_7#@5{F<$txNaW7qk$EW#v>I^y)*#}C_l}vik6bs!4sc1$O>&>n z+p7X%q9))B|arCykF1!(q6NcJMuLIhslL`DFV&y33GLtZ)s%TbXC8OWA6dy&Zp z2>vMslT!vl`7F{_iLg@Bum0c;$D+^5i5Ro}2b7f}rwoGf$!?gXSr+EAU{v>7IBi|ewS`< z0ID9>wfT0u2{i~8Sg>ROD|^4YV^kWFxl#uWeH7Eu%afopw|zPvnH~z|bEujEHi3QDH7VI_YU0Gkc16bIr(NkN#_ggY~ae->cbgjM^H{&=%Z=s7Su&Q zFfmPeImsubnQLx(76AV0n%6eT?FTbiR@_8kvvOm=QZuEAefTwNtg+Qe(F-_f-@LkR z3jXfnjR-(@fgt7eIXk=21v++4&T)2b_I2r*^rR9#n|N7pzYdzbNMf(Ez`&BN*Bwp!P%@x8h zh!G0bIL4t8Anh%=84*fA|G|Fn*0+)%(mYRlo$po?fI91*fA^|V;M(gdf`c@A7k)}u zeMoj-j+Tp~#Wb~Erg%WY^DG_S7E^)#e2A8Q+0^pdeaZt<9u6tmsUclp`LM-hpHW~y z3vwlpaE@hfaA#6^rbHIRt(8*|zv-@yo}|l%Ve)e$hWAml+3Bs$HF~sk93Q6XVEgim zK7r3H{KfW&^g~xrxRi)CNuqf?L2{%%It&q*Vy?$avY^U^e@n$Wi3rXK>AaZY--ua_=*_r$OV--yeLj93U_| zn^b%=I$1#kGA}E9A zT}lo@kki7oYDFz_Bc6_!v3O|V(;e#B93<-P{CtN2SP&$6GmBzJo!-Ox!K?xrBtM#u zBUM!}l$1BSKpm$%09rd4#>S}6OJc*XF%eur3eq4sDEyLH|CaY^A{8SUnBHIfkGB;g z?}D{_`A9(?S|u&-zJ>)yafr&jc%q8|Lsa-D6|c;*^B0?9p5?EazPr%KsM{xw7BqQ^ ziTZ6EXFZ^lDAD0bpbn-lB;?6vJal92CcH1Wkg!c z3sVMn2H*D?<2jZNj5-g@myQh1%ID+FqQMZ%tj|b!iA<)|2f+B@VT?o?3Jt@+lA*9#z09CMuC_)euHIkih0y9la z*AkKI#tEQ7dX?2|M8U9PaSlSEwHZ+&cvg#4<#c%rn3C@L{uKp0;jcaD0P-$vmu+Oy zM{9@75$5wnf%=tbN`{j8u=SWm>1&?_jiaerO}^yFx@l52<4XkfE6|jWrKzUW+9q!D zeGbTiAXPj{)h2h0@IIeb!+D$#b?a((kikCe`MklS@qAJq50&af zKiZ`fpBY2V=Qcpqm$%usOFd1mcD7RwSn{t`SGw*F(N zgCbopg6R)CCMsY2OWg>8`jP%hEDL65*4{r$FZ;VK#lVjA<_Q zK_ko@mHleKERL-8?H?>5OxR9s&VZO0b!3-w<_Ds@H&c3?-UNARQGi7n0~EIwc+JSs z=8BUC!F2Ufl=q@GDyr$KWx)Z%w|zPOog->rom12)jn860!y~49{N_3=KIkXnV=@ro zyc8=q#y$fG{lY^R+%+Cb~|8d3MuWCKDpkS;AatBQK2RY0Os`G z`I(#(2LJu?NCKZYnh^At@%m@kU>+UCa&9*9lc z*3X$eWleQIb!y-s$qGqedK(NS9qF?dj$daQymQbI4K7+Vf$uoX;P@P}Vdo2WC%9Gx z19^VybGyBHjzybFzR#kaKyPlTPYv}5cW%nOSy zU6U2le$oxXU`!Yb;|#5a2b(mZcH8I5#W=~5{H`pGJRW?BTmxEU5R^2*CbBDybyB&F zZwqZPuY+MmS**_C+m>Q;CyX5oS3hvJQKu{{qG*Uhe%!!l{Htm6?R{HtCGi#OTVa-0 zn5b$)L();Dcx`bWSobr@U$d7T;1c^R;^=qo{>lvkU>q0=x6UDJtIP=woBQ1tY}j$* z6rGm+qL6>Ebjof$HW~w1+Px>q8?Q>^zKP$+3c-Roh6h}+&OVyf$^)#cc2oxSTO^cp zVXBf7FF#MO5k)g3C&qsM$*qZViN|B9CU!Rzn}-36b(MzS61xn)px~$No(RbsR?7y!w(cEHleHQ!!g5>q+#E>?%Rh0H}dhr*|1#X+Goh4!IpB+@) zWx|eU0B=?g;!B&dED!6|_J=iafuF!wqL=7(wIwzEjW0;db~?9s*G@*$Gj9&<7`@866kYd%jv>g? zYjcJr6XV?Bu~OS-(&Wv3vm}p-dN$BcpB}oiaKS|7QS{Zq3KB!KH`3) zu=4PQS4)!4P7xLr6ueo}tD){8Ys>J__X-nFDoy#4&!_LGg47&~Yl;xxSrVeK^^R%D zIvaKCGGphNfvpucbkoqFg>AGf~KQWN!xg!7!F$<~BizH!r5Wikf{X73D(;2^(~_N3Rfg zHE`E2@gzZN?{x5XrqAf52}x(3^{|%)$b9-m=2=G?b$W37*g?wa%W`g2wQXP z;7j8`7sYNpCN2Zm!hKFrd1i~)+gx`l=J2&>NQ{K>)AN0)&N3g>589fRV0aVV>G2&) zgk5=w^?``gA6_ArUo-k?e{LXA`X^79;-G*|+rMPMv+CmZ9x!Y3u-wU=@}JRkPic6D zXRH+-YF#4*8FYBr9@oe6_s&G`kIf%Gm0;i6Zm>wfzgBv-N2pHv zJEx_f)C&D$DzRN_ODQc4qy4tQKj9@Gs0b#?+k~x$mQ+ib>#O}<-`kJx;P)9b{x~74 znbj6u(!dZkH0nad2x{yhdfTJEOT0g|5!ut}>$=#>u%qXTCCA)LuB+H!_$R&SszWE1 z2|fb$O8Z^<9;MPJcIP{0za#Uh!o`A4NKJ;2El}{1B~~7HAcFe#ELp&fz~rl|$~N|! z^j9_8W}JKsE2CUH_82LWkmgZShwOQqpf5#Vg*ORBxV5}6?8=MXx_!U#La0(xYyRRb zS+7z9MD#+7#k=S!Qn>eA@kT6sbBY~Hl6m2{5WLyMf)TBEDZXU63M^3_FFbWbKizAm zR1NK!=|4U8eDiXu21a`06w#9AxjeJNJ1Ss!ihgkXNcEi1Fh`Zcp73V85CNg9z)WI6 zS-7M|wM=gX1X&rP^_riNrC1dl6p(MR+BX;VWF=PN^@t_{A|r#b1K-`dE(QnTpSpg#Vy z!I)G^%ck0&n>asWi^TZWK|ci3&~S^ZT%`=^aq_)&(8j-b3#z zxVT17M|z)$JF;1bD+>}gQUs0TYnz(a_WV}5yZkgbEjE6QFzB=C!&aH=__4M4Q_f#? z`*OPIHvQ#G;fFK{hIGH{xP!Q+So`OWuQ5@A<+%&gw#4{(X>Z^8KJ4dBe!g&v<}jES zY4N(f4U=Jp%ZHf(w{^@{Jshr9#J^S4?RF9-S@bjorZscbKcczhxDWk|!F7;?c=b3p z*LC84Gc0g+N|s2~aee>M(?aXw_Svs#yRT*5)hF??)EWBv1NjnujJFaqGt!T$G+)cg zX0#HMNViw$QaP?HNA6U5t52tRs9JWn&>Ey5UVXgHZGkBwTO&w4rFp`~Bf?4h>y>dL zH0E}#S)m;&Fq}v@Q@%k6#8_nVCpQHO7Tbj6K_qi(Xswhy@Pfw8#%-}B$MU*eTLNSO zpp`@(1R4muGpLO=2H-ocp&Nzz{@;8hJhID-D+p?%;9CnU)Z zg`U^@dp}hN8yB;Cryb%PvtXiT?iV7l_-H{M@iqHo%@jWX8y3=}DJO*lZH@YG7MjX!}tPcuBXR;6B&m=F;C}SQPYE3p=R3@uDik84B)kN zu9-YsR*$#6hgCZ@IFgXA==SB{QJYmXtbgq&MS3V63@q45@9ij3cpM@zzWD?=a{)Ja zJ!@h8(UvG3_84913JQWE7ff+k8xtKinO);C48x$vCKKSdTJPxCE;>%*(;&#!%e&^Lw07R|P3uE0M?BAsz= z3@Gn#L=Y<&&b_%;{B4LHGv$VKlp0e57-Je|=wmAyB(~;`*r)^~1N3OE8uuamnHcDo z7l(<-z_>}QVvRn*GF1H$@&zCVqi-F>KUPU7m8u^sg(*ND43}H}$AS+d7|e@wRYy++ zD5U8VAye=p$6?!HHwaS>I|%aboHxcAJv-6jXsQq#$B4;LaHz-r97~1w(qs%;KmY@4 zH`ZvN0&#!v41OJQMMEz@2htrqu`~)+OQgegCozE25=i-(ArqlGNML9-a)j7~3X?&1 zcZ!)u8t>BJ{;828NE--Y^>BW0VVK3!$TQ&wz(=4WxlSgkf8$D|?7;F6@d?r@FcGt0 zj-kR7fr2}kQf9_?uuLMfo{bk`nebpT%x>Q&@gdO)5HD4zF#iE9w2#(E-^g+h9 zS7QZ1_T4FmY3;ASB)c=-bmKgZ#z zSz~ah#bNOf_J6Lc<0RT}d=MJGf2cl$7T|YSTe!w6C<=JKv=jt5k>F8p2P{HQP=oFz zuMs0?6b3=3c$RxauxPpY&(#wo1T5B8L*r<_0>>q5S2YY{2=d2-m547NUQjWs&A2j+ z5FCR3zGEqvFqFRG7-WPG%7lUgJ9Ne~7rtv77DR#qo?!waF|{;ncO*1Z6+xMhC{}^%hV~G z-}R~_20@{~fPdFt?en;jS-fm64$Orhq1D??jctnTT|O*efUQB^)XakNl_rAj@&U5| zwDs{QnBT*kr8J6v2a2~Y1qmYc^Y_4_r&5s7J0bS<0(}7f;`96DLH0^Y-n{&cQVw;% zHPA0>v$vVoE5DfkX+|mpJ&wLV9T}a*xhcl zIQ$^U#>Hl!h0umEc(5jTWjqWW3Xq`xV2?uQ4~jtttA+DETbuwS46Jx8afwFaJ$d2d z3ZZK3XLh!f`4(wdy;qowJzU@ zE*?H~VE^9TyLRndvnp!kvZzH13knJ{W9x8~jOhQbpfL8oToL;}ui3e4=dL|ZKYQ@S zSKj~dlh1Gh5`euBga82GX$V390Pr*fApih)8iEi206Yyr2mk<{h9Cq008c{@0sw%A bMI!${0bO36huu=@00000NkvXXu0mjfmW0V> literal 0 HcmV?d00001 diff --git a/yass/third_party/mimalloc/doc/mimalloc-logo.svg b/yass/third_party/mimalloc/doc/mimalloc-logo.svg new file mode 100644 index 0000000000..672c7e41ac --- /dev/null +++ b/yass/third_party/mimalloc/doc/mimalloc-logo.svg @@ -0,0 +1,161 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/yass/third_party/mimalloc/doc/spades-logo.png b/yass/third_party/mimalloc/doc/spades-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..d8c73fef436b101d2a89b70466a00ad18633bf50 GIT binary patch literal 34583 zcmcF~g;$hs(DuS29U|Q#B`w{dNJ&dK(%s#tfOI!3sWeD;NaxbsAS~St3*Y1Kd*46d zBKNz%L}153*{g zz?Uzo=?@U-HAwd3do|C@!xaxt{F!@{)04EcBgTR_2#ByuOZj&M{$5eeQ_pIz^U-Rl zXZc*k+^WxsYXK21!EkSgF#>H{0)%x6dJ^+eA$i3V&)q!!@WBi6pqG|*=&$O&CSsw8 zfJ*)S<%j=+AD6l}zS?d{GbI0Ht3F;#4LNOzF|8B%es)fq(MAoU`#OVw8m3OidhYiE zi_`jXQAERnnl@Q^gQf=&$Dyow9ahr$C9m^$+k4qICxzY%4U0Fl8vp(G0KHX-Sr7%B z;}l)V+%~-;`$M+q6xX`@AuT*+{)5u zc-5qsz9~fv2B>}pCA!_-u2(V{iH0`hpWu&%$==Tsv^K@L&==gtx8;2SO$%wLVLWQL zs2qc4!=IE&&h`qegfG3YbNULgIxMyK#1yckWFm9Bq<_ekJnnzv5hW-~ut_JN=v6pp z{T2L%7Fh)li!@ip=HqUYC9H-o$m~-N*8h|~jR0e1(>D9e{lOTUK=cFV_5Dpc+5(ODA0+bA((&D-OyFrwzwGmdopm8v>}yBO^>w6GWd z^zR(C8KzuorE%0cb%F+UbLnTx#=i?iFfy`?y($Nt4)tW3IOEw0d5*tM6+76b)+}T4 z8BSdP-U9;k{kXtIR$Az6>4JXwa|3O$da0>~m??=edo{)CVsVhyW zNv^knkY-%09r9brK2(}uTB7c1=^pw`oSy!V0u#j;;X8>u#Z7-Al)>Wud3F~r3_Byp zAmn6}T6?*~U3|o&O5JxN0iQH7|IRGcJ$yt~<7N(dt>5z4;>Mlx@0}_$TO1~r>$eCLbXq%gd<3_SK8$NAuHSDIU-WJF zci;&hv`J2AtK@$Qj)`3n2Y$TexBD^?6CCyFoNc(q(P&=0U+IP-gQN6l9M-EaRhKpT zp$`<=Gp!tRcZx=0fum3lpMRp(KG1>47P7v56y0A#qUQLDPDS0a*Gl)(C144cn9$cy z2_|mvwfyX8& z2s_NVp5u9_O@~5YgH-}C*m5quGf%|e*9Ur92Gh>ncx%&<3sDZ=VpAmm15xT2Zg$jA z=g=Wh%?ZSA#95E6irqn!4PiK^9nFF-`o{D2zqAN0wekquG>W`BTKcA&tWXW9Eh*S; zKQ3gTbq9jI00pY2_9$yr<1KUQlm#AC#KlAIABD7QJ~k^G>MrK>AP7nOq6bPWx(Q0W z#jyB`GZ>aaTIW8av?cD8&$-!UM(1#OC`3(r4fsq!Munm7^`p|vldR6^cad7c>a7cr zu@y(pMU0L1c8;9qna!FAOz^UA${ywhCdGOq=L{X3r^CS_g`vIjGTR3RBd86aqQ=eX zl9u5UK2@I?Ek$ol30{=X0uw`a?vL9|CM$OjIgk*b_0RPfm`?Gmo3McUp0WlMx_QHz zh77(xASAWu?H8;!qR)Ou1wW`FV~HgJGoh~BNp6Q-G;qVY)Z}nL=bLssD5mq3dxsp5 z=L+-4UlNEQNAXw0!p5m6m|I810z^B#a%4+XCYVJyyQvL7&qj}h38|}o0G@<^K6n)Fgw;FQIs*JrpCUv(Pk&@iLGSyF4&;jH)86q}AmFCkz>*tMJs8#0H zF3VhUsG^J*YkYq^52W+Yv-ip!zCoMN0`{=0e!LNp5Q};FF8P|~12ny@;yh%<%l5-P zGmy1hH}}=ID+6>UIn|fVaG|raGX~&8b#{e;1)<6Q(o<`vAt*;CX5dhxhsU{yqh8;z zM8Z)e5{~;B6ZwH$233&KB5y2&Xf*J(8t=@AX=V2`m(zgfWLjx!s3wW5 z=EXA>65p9H;fej>9s@WZlGowVzKWQd*P1ov?r|*9yY(L|HzlFcW~(bJ{ns|jEA0q- zk&ZLTGAyZ(Y!3bjY*tGoMW=*n*hnzlbHwb$%gH$)TxbW*za}Sbvz83FA8jCkFy)+V z{mpGPVw!PuOBJqonN2qZ7m^Dm;TE}#an?RE7vD9$o~EV;{8!hyt&<5`+}zm zKLZwfhd&vH?_A(c5&_zn7M@YrJ-0W?4x$Nst#MOb2r;P^A-cIdbI{TBdSl{PCq`d2 z-corIUD$6X=BD-Qp!ZrqqYtFLb(=dBa0UPb_XV2RSgjAiF)u+}I|P2JlLeII$G&+I zr%?lrdLCOFjLbgwwuDOI}26XHQ1w${&@{#vfZFtH3~5el!``U zvjsOPVRmXi1Om}l7f{2`)f*`kL9uw#0zi_!1Qc^v^|RfvX}J()MgOdW|1>vGh}>Ce z+A! za_OYzB!_H39)P^Rl3q#lDLeuz;i^N|S4h_=s^y_E%gaLN_{j(7_w#J~jfRo9R|Gv= zkHQnS%3i`icK7)GQcCSsW<=18PX*+Q`cN@7sbFgtH>h=rfx$64xC`l#-wv9oh3}a z93qa{`Mx;%#2_u!#kv+!Gg(Ui&XwG;D`$AZ`w?0u?032vhMijhZ(x3xQ!S~Gb|c-5 zF&1Db28{%~9{HbUO}5&5HXYLgQ(qap8&a~3n0;+Nt)5znI(#d$Xn1bu4D@WluR}Z3 zZ!*$4zvL%V{n~NFl%0Mzn0>{bp7iZyDc~9$+si-c#(dQ`L=3T}rljhnRQIvXv2I}4PQs7M2(;w5%}>%9d%F63aQSLLju)KtGTrepG@HP5Wi zISr;_hHJhR{JrGo{ANDF{79o9PvXOplwd&pN5_1w?=S~&v!_#>_8 zG|vAdZ=3FU;WM-0&t2ieHJ=i>&H>E1I=(1*pSdFPI)ALALV|aJLf>~;Kd(@X#SHtq zeGIwaMW9HLp8_yrQDi1ZrDGgH6DkLoKy zxWT{dMHjB{Zd7je|Ep=NZb2AO9lgioq&rj&L_j9{C{VR;*@ga=`jJZi;3@ngupUp` z_ty*Jz0*@h774AXQ&BX1Kcl&|ytb=39OZvp(aKMKL)HD6qEetB+65Dy9&KoiXp!Ai zG#~2&`K}d=PPOO3rla9IcTyK*V;U8zllTH;L`2fJ)8F>IUKCk6I-vHM9fo4DLB7bG z;F6Md_NedW_8A(C@>3w?cl6Ec++FgHX{29S7qWw7$-y+VL;vn&JKn6f;aQoWQDj*} z1oRs%GG@S$&CgP;Prk-kz;=^b*z!OGJ5@BVhO1I(D9v|39Ht{{L=j(-iIHLR2!pgW z{xulx!hIzP#6*YFPF~)#za~K=GaxH^R(2~C)3w6S=>wG3W$NtjI;({?JF_q7boTG^ zL^rbFPRm#1)?2`ES3wIzER5~L&92`+qWm*lrfA3RouSS4fv_VZ#v zCWkr926H!h-I@iJrKF`&dRRBXS!sc<_sr>3q~lj$v;t4ik~u3okL@KhwK)J$?ptdJ ziA3+Dzxk$>ohf69fXXEuY$33V!ag$!3-fy3B!d|Oy;w=YW#mJVRY<(gV*6k;55*1z zXJ?`FwYt|Zfo2(t-#nn#AtjSH2|c$?CV2+ zu0vqGeigKI?XkvUBA#W4*HL;c<_a&|ZqO!;jkV7M44278L{Rb6 zrU)rGKV=nx236f1N3lnb33N?6Qrng!<4L?QTTd>VTEMGR7!cix4XWs^cjtp6A#)Gv z>FCZoKa3s>o&0`BNxcFTu1iKDR@S@6P-`z&i;MwuOyabo$NS={h0wGVmR94+bi953 zZ%w+djw(_ZOqD3&_H;;m$CPFpYnumz9u{YQKK}-c_OIEvBmpYS{r)iL+C-MyOLWY6 zvEKHLI<%(bhRa1yq>WDHn_GU@VZx&0;*>s%Y){u;HWfm*knFsrj&e+4-NXlKIV_e&GKN2P)|EWbvw8xTYB1mfJSyt zX0g-G3$fYiA54+2so!U}?oDx$o__G=0i0qkUs^Ma zF)!DbzZdykgXVzQiK|@^#h#fu#eBT?qV(MAcw91rnO-`q1lDh1=;fJwA*muBnzals zvDa;0@#v2#9j>SY4E_ZuENzon2PM%NoDDgy7tx)uzjax1>XND)!nc;@7_OmtC?JLE zV{A7)Bfo~;xt(AqffS0gJ^yJ&j`FQVNb}J!`ZHLw{B4?H7|N`x-^2om7nc%mpPZ42ZM+rh8onETP`$w6Vb{MY zXSO88`W;)z6*@15P6cWeWxV+VS3Rm(sqg4u2tsf}7(Mf{8iqgX={m&#+14rs2q>PX z3(1+{1G(0dncLdf{MSp4*pVZS_TFa3#|Y+T2gM3*qJ>b24yZ8pfH{m~k{QpLN|3Yv zE{AsgU;e&TV=HH|eww?o89J#4tQY`{U%Z14=92a5O?@t;XGB>KUzv77eY^*+PgXNC zg+^LkMH-rPWTyp)B(hwtC+))A`&IK@>?x99UgHF=|J?Udve@Ig42bz!AV|xB=5#o# zp?0_Zsy7IB045bYSp7>r?-B@a5h(nN!^ggI9Onk3Vf_uBM=378oSPx1I-H` zj^n#JIPHy;Hc>!X@=1rO_TJI#Pnz%OR8{l6a&l{{YIM(2rTf%;v3B|UXrG7|r0NYq|=zQSQ+w#Su>Ov@1k z$Qwy2{Y2!iek{VA6MT$rTjL^`Y`B_{KF83^x1`G5$p4}8r(Mdup{9ioyZnIn=C0n@ zO0;{vriZ;bv+dg}1NXuBYFOEbv@sEZsBsuDJ}T4EU(Z^c5fTIc)~r5ix%stobsxl% z%U#l}k8`20`0;WN?J&;MRc$9gk$lsCoDrR3mO#_(2boQ44HRB*OFi5bmkPv1>hWK% z7d9O%H&M7Pw+CAwWV*$6UC*H$(V4b|RqV>0iEi84ii;=1ThRKSt!jG_`hKqiP-s!@ z^UHNje*f|$2SFRB2@}9(|Jp;2ndumefCZh($o+jBr7iM@U-JRl!ucnJA|TY=#Xv_^ zQAbY&ER$S@K2tHzQ}O;QS!@-njx@W7ysbHk^EwN^&%*tafwaGXR{B>OZ$plg?zdLM zROE3(S11R|=-ESN!uf;gjkyo@dwJ}eqx!e4J=5<9ecbh~r)apc8d^SH&$-KEii>Z} zt8I&!?R3Qx5cbh<0GG+pTOjb_{G`s=*TUs8u-?ZzdX!3Wc&$It2pNxAtyIchYW%9`aa&7!e7MT&TK6uO0lIQqotx zgt+a=umA-V(`6V&FeS=<-KMgdNAIS7p$4tejQ4wp2ij(ChAaWZ^kdYUa+ivm!6?i8 zjR_U)8yq<|gOtCf)(^quV5a_A!MjM+9fFE#?$>*BKnzF-U)x=cxCOIP9iCcQ>Fbv| zM+0S#?m}sh%2jn4sY5!BHjGZudN2dv-FXnXqrp|Y{Nvnx;Om(S3= z%}TnHc?MkLN%yX7yz0PTe;UIf_4K2vz4>k)Up7ol!K-@6Ffl{+j18pn=C=+zQ-;e zz}w)sK+D0ySHalg;Ab_guTahAu71f4t&+}b^Yw+m5M3tI*gI1dFjTLQ`d%`i!_CFo zSFZl19tz>eyDcZK|2y&cB3AoPMS+dKO~GqgDbbgF0LW5XzB8LqwTWnZwoY{O(Zmvw zgz>-b-P7l%5j)t&wK|YTvqb19S;UVD)hkLska0z`{-MfokQ62mbCj}~eq`N{#wu|T zT2PL4zJVvkwru-Fcrpg?nwBF7J9Bo%gHN}*Xq1P*m-i^Y1H$vqwh}Ic%}r}Q=Tj*P z=q1D;fNc@o+>pK8jIXumMCz-8F7=4KW95_b5s`~H5*eBoT*sJjsucJNk0fpa5RM|uUe`dN-D){JfdgJwt z4Q_pW#lI>P5E>GXMGrSW{+7Qn3ByJ>ipcH@D!&TKSAuBX1_MrQC=C?a0fmF1?1z$( zfGo|K7V_5OeR3a$1wV$9ILG@AY$a{0Pn`yxCJD}hLMQmer*1pJcs2I?+@p7qoGr|eXYTt-n+7;64xi2mcYq5gER`m~z;PS&I(|Ja-06~z z#h4yAA{#k=EjRCioH3AQmGW7C_M8Wrt-K%pZD60;R&I`m!?-l4q;bdYxF5@>R(D)$8n>0##jV4~ z0_reg&Q8kITIpr&267(9G?F{-d_qZ&?*}8MksPE`3nBnq6!ZfI!b_AHr<$rwc48d> z(pOc~2k4U_-+~h$yEMDbr4X=J&aXNL8*b}kn@+e!=MG}~okjY>ctU^mU6RG=8|`Je znWJZ_MpR}PkYu|+{Rgk<>fO1dbWvIzAM3lt?jwVHZ2(wzj}G7p)fzG8fb0CVbhgQs zMP9$c_hHw4!BZE-9Nfguz##SmJc2$Vcp=+G>K`*pNaQWhb3Fy+0E1G@H7F;xTXxe~kS{?DwUXj|? zJmBF{ljqTN{i`p?Hs(8t#1_MvyIP;ID&jS6`0O65@Glijn7vPc&T8cQuu0D16{UEN z&Qj<9YIAq`d1#rOv^OwDEcoWrUe@jobr`+5=V=>Wm?0N$)>Zq3?XJ}Ln4`AH#rk7J zD0{uqhC4`vbSF#L(pd59rNToDE@oh!%e9s4up3_mTgOnfV}T~nC_FijE>_nNJuNtP zuzw}c1{noPSn>;Z`yJQOOr?-fR^Y^Umo1Ni92HkGCu&Xm*5kgtIub*AZuDbH;Z?h( zY<`+LpJG9i@&=|DNFc}V#p;9()(eq^H!c>mtRgWA%F!d|%Vz^zI&u!QvWYkO!N6yIc<4;SjHV5v?GzRARI%UaFbqH8MY=4g zsMrSCBEB;lyM;aZeCWfY?>VSRRBRR$07cd3Mm6IoLWLZ&P5ZC0dt(r0L`Q21a?BuR ze`G3w?#1XSIoVBJw0z2qZb=5waR`Fuu zX!HG6SuEB=0_#V4?1`5nKSSxug}h=KBc}8=qpv#<`vQjud8{`3Ut|D|?hR)0<*@bo zGHla38m=2@q}J9G=of!@A|l57fQXlO<>ldT;3jVY69`1s&R5AoZgTcq%;RXPaN!a* zHaSI!ouoHg+q;P1)xwXuL{z&qo=$9fA(qI^_>IO3woN|IH`BBSxALlGlKZ|%UY4*u zq!aK<0X=^%JIG#$-evIp9`vGm)3)Vsv;8NVV^6T0oUwI)%sHR;MsJRbDC51iO_sAV z?c6^3Ji~&)!=LHpFH*35C0f*DMGUIIX7d$u4T#p&Et>WPj-tzI((dl9Mxo34f%Dpg zW(Cv3K%QmK2SB9}V1PR7uX;>1F(cB(+jRDMVOjNDHBw>b8TBihn>*xNzK({aV+0x{ zMS>S>93M999(Q!EZR%HbGi{@pBxWheW=sJX>2E7tFO~hUZ!y&q+E>V(DDjkaU* zTM1WUy}b}H&G?LFDV#?bGLS=+vE_aHv^8b;PEa?X_w{#!$})<$ zKPI2atEVuwYC^E&T=0>`xQoxNkFnI-Q&;%~03Q1QcKlF%e_J`+<}mZ(jjpSXac7)1 zL=u?4HP1x)!gXQ8PK!uV!DEP2kVHOyA;mJQ0C}U%Y^-XN`^P-%QfJ(Ye==tA`8qTw zK{b0S|GNu-zkuW>Ycwx5Q?(g67W`Lz={ zJudig|IgNBwh+@4Ja9lO)V--QP;CEi{unSM`(?KAW25NwN_A?7uvK@l?2u|tqj1l@ z*xlBC*{<(bI7M?GnHEvjFhbIluqcKIr{Kl4ti+}Uvct=t`{cU&e_hygRpbvQu2t1; z?{no+QdU(K%b?S5md{4bOrG*<^q!l)d}LZ#j)^CXQg`!;Dw{eOGsdCJcG`RordUI! za?o&n)X5k1f3TQIC?AlFlw@!9-?`^88E2$DBymGve4a zmfcMk3-#fT2LvnjpNJ=mZwhy!2BI30>Yn}3FpJKa=9MSTtIYoV_yQb@rt(ou=qv;M znHiXK;x5mALtPJ4>t|Mq9&MjIb>t{2U3+g5h;Hb>A>8eiKOB;L-TyHtw^r6E`nhOb z6Sy)H;L4;|?|wdo33enC%T7!QIEtrl&GFI03~YU?=ci|K-bkMKYI7Mwd}2mD9UgOT z8@|PGAt&-NKV%A52J%)NNdsEo6|%DEXl>11hhN@D@>e~!6?;99q%qFQ8WVBd@fY>Z zMJ&76_Lb4ptUO~-9_BgalxYjy<(d6_uB9gl<7M)NcLLE~!EUMis{|L@+-V8vZk-#} z-S_KFJ)4d%CQF|FNtl-bWwzg~foy%(dXj#+DaF!07-zY$-nootP=;G&<6w*OHt(>p z{OTM$>O99>Q!C(^126BFvJf%1e1{l1AZT~afvi(nLnQc?<~Xm6pK-kX7BHrz%EB+w zHMW)9f|J~?x&Cri3}3aQnPzJ}5J(N`WEpa?l+rilC&D%VV%AEtuw=?LWOesA|Fb3z zrBTP4$-(UbE{TL}LcFM-!G;H+B#p`k_Eyou%V0_7aMGEeIBfF;t_ey{o6pCnTM4+J z-w|0It)?QA+b@1B{5y@N!Pi>qX$MQ)HsA3(xrl6DF@i8cCSSklx>C85M>lCBJzB{a zu#79}5P(c-Fma@wCMCb&ghThP<|0N#1fGl3Qnc`VJCwFxJ{2X6wS7@A>sj)RTMahw z^c(M(O`^E5lUxs9Co<)a2Z_>7Tz7Tn*X$5yMAr6$enwnOjs(BRA03itc33tV!vc8U zKb3VMft1fS^1W~^hr$$bzLi`nOYNmH}jN=znN9tD7yfAq@$~?mZSO@Tp813?RH0@YrRcFV|D0M zdtA_NB<0_8@gQ?~t5#lH)8Z)~C}APP!;ZTb2J46Ebr^=5*f^Fd!8&i$pGa!oSOW6+>;9ICsntWO`j#677*EsVg!y!y8qv}=(B2BQ3>P{@TUSa6abF@>DL!B^ zvQ1F=>do?N5b~`!NYYQb01+J9`(m_m_V#LEcEhyuz16j314WX5%h{mJTQuImr3DLo z1(Xt9z@c9#Wv5m-j?wPH*9luY14%2*vw0gV|5F4A7#gUS)~$wigkV8w)^yP9tidno z#p}L;R;v&cnlE1Dwkd=kd<0>rmd#6L!lpN^q8uk*sl)2`mA?&r%Gl0l)6#K{xGR=X z0wjSebW{ZGe7*^?dSd-?6n=HFO+SCk$xE;o4v;!EhW~b58X9A)f(Gm}IBB2A8)^@P z39^TRH|e*{mF!I>)oK0cfX-1w6i|%yfbB)b&4H~q{r7(c~i?3pu_NKxQ0GRbgD!4Lb zNspqDCU0e0;H85_>brTr1jauyDX+-C&JkE6&MT6OXDSd|8@zeNy5O|gfk$4}9r6}4 zP;=w9dHI2=)H!Ydut+aJ6)m=QS4PA|ZX$uPNp^iNl5zJ=h$$mK($27Y>r)@2)Xn8N zkJSnUOJ@*;l(5V-CgLVm}YpH2Dl6@aF3t){B)dFb|`}UXmlbH zz;|b#_b|3xpj@IPVTo_@b!NQ$8V#wz<9WL>9&#vYX%Qd$Z(DB*mw9tJrkggU?oOFC z)yG*w4Z_h4*{|HRhp}+$E|m!e5R4YT)F#y;#oI1ve&+5gGELJ$#(w;=&@3U7uG^}~cMT-hf5+;N4P z(SwK2Zm6Wp&E_Z$0uzTG z*P{(Vz_tV^g$pN*_8;%{XlgE{8U8lfmza#WNVX$(+Yq zik#4pF^}E5ggW2+BK49fHw2r2z^;xJT=b|Q3vweniSwnW0Lyp<0hT4d*$!#!$G1A# z#(%uB6Mf+d!44l!4>Y#OrA?c~{QN#c(Pn9Ba+M=0Qy6WPEu=nQxMG`n^_Vkxx9Nsp z90>HQgFdi{cY=_(qoW8`6F`UV8!g6?3oA(mSK&<^^#!}@tYO$^g}H;*YoGP=*jK5O z7ibL*HE%_u5d6f|BOspvSg@_>Wgn5(f=Jr$;1*tq3!z7y&>#q+$!+nX*!~mNOuoGU=aFjZd0h0rDwoOKaXPnVjrbs=&waxWO6hqg z$bP&JEEx!!DGE-iO1HO_7pPzdjA?9jiZVv`-7&!Et}g}HZVPV1^31Pl>9vQ+caxb^ z^)|^K=!8<>2R~}-^!<+P4AnAK=5rugN9szd`>2MS92F9|qb+l*VGJ(b$7L$AMh0CQ+z=MOuHHP^DjZXIgUSfFX- z+S>-+xs|)+8@GISn3Kt$(NG+{W`~lk7G;=eR79}CK1&T(+&FMk7@2jAe`m98d_79M z=Wg^!z|O+;b;F8O+>4R6w1UxI9%PkQd%L#M*Zi_Y@cU(v&w@Ug2O7x+${RDst7>Ee zal{fDnU_oR?~hR`#BA5eCZN8+HcwQ?EHd_tK?VKagZeE(afVw;>e-e}GD zxrk-axhQ7AC-A~4Mq$@=MKKDP(wzSVM(wQ)uDac{pxcw@k_rfCq0!VF!4pIMEHh08 zK1DZ|o}{#SqoTjphuLi@7(fXgQv(Y+ORXgUXFXJ#F_#j}b1(4F3;?k#Vg8AbbzLGv z@KAbEVyQ;bgdbo8&B9*#fL3^+7}+$KCau8&kGMjF*6r{P1xw4$m-Ce!KovUNw-0#) z&T9u)FdrwTQd;04*7>jebb-1bx93I=Mfp7cQ6Yh)#x=CbQW06|9+++9a2G!3C!Fh< ziv<|ETn$yjGzjWO(4W)~$ccB8f7|aEd7sq1th;4#J-8oI1s1TtE&f!tet*bgceJD> zD^ExhO*?2t=@@Y%zZi=qMW`E$j{ZrL!$$;oyntI6E+bdjTW`=i{t0)NR{c(P9t4p-K?-=)mvCV+%W)#F`DscdNree!aqKd_oJ;WH_SmSXoka#!0KvoczFbQGYYi zco>S(+^YaO^-~W#R+W4WW%GM!t;lAA*>M`eDt6gQqgeuGs3Z0C-2Az^g;V5G+^T$e{?I=fI!}Sr;X!$ay`@ zlus|>-vy_7J|J?L+Da~{N{1{36^O2hU$Z*Pj*$6gB3LH#+&r_gc+Y&*LUN56;{@2n z`|X>>PJjKCr7h5B{a4qdMr02|J>>g4hYS#SvAL~!K5v`BY`-ibEdI5BoGnp(ztF<3 zVI0LQ3%7Ph>;Mz^p_X1;y2nI8TZIN0?<5p9E*Z(A{IB#bRB?fjDzb`;`Qhrt z8Kdyl)6N-+(MSJ=ma~C8?#VV!BG7QFC`EmLKeCe=OmQK>X3!bC-u ze*5~#ND*JuZsmWuxQ8emH8=GmG}+akWxm26UY4f5 zH|jibJEvs#H56L7ROX08&mJT-wUaNvo5hiJVdoOXrCv$c!+kcwEV>EyW@gAe%=qis zM`C>ZO5&ic-OX&24^S4^8ReP~h>)XP*ifZ`{C?{CepvhqiKc)I*}KOail7Z^ysJ4c zErU)GExV6&YH|vH)-UpTh(yq}As~Hg`i9AuzT}coQ*&LnJEEqFOQ_WQQ_8pd1dvd=*!&QKDp5J=#zzBItqGbs4>mxcLgPY8K zg>MQTbW9bDEsvrdlwC7syzaw2Ga4NOX)bjupnslhV`I1lK9A&eS;X_aU`_d0tpkFI zhxBVa@1hA*l4XU(LW<6dA@vk)@N~rpqmR2k!xm1sjC30>Si?mvXNNd-x~Bk#lLz2I zWMn}d19~86)h4CvO~GvGb{(8i4zVrRRk}oO`PH$X_-tcgiKM{l^2e&MCu;>D^s5&T zk|N^t4qm>X>t(Ir6U3ty*^d3b;S0llb;et@urKT4dpOII!p< z|NAHJJ3`#2GCRaiK%Tfs|K|u*Lxgm}9LPZ&jYnm(z$XU*$~8M4iTdTSZ$jRQgE>S? zGI$J0RAG%do?r-IKUz*}CcIGhik0el<=@(=$V!G{~KGz=6me zA;Z8J-`=|j*jyenVi;v_7T*%)0R-Ugvc<)AyUYQK*PLqes&Pb_c z1z5QKYPUWy4b{Y%SQ{i(rWywr<%Lngo^TT2xjQd|Y2GmUz5>6}+r$e&c~cNNi_bka z?na9kI1uRCpk)kvc!;s8K`T|##|=8ca*&cr&InMeT;sjNLlx!c*0|-P0-edh#usq^$Y5zdKe;LE|DSDUIYixmVz%Wctp)U(at%6 z7}>(4T&`7b?1}_WV!XZN6UuyA!yzp1Dm-b6|Ja$Z?%D6fs5Bjq(I3Z+m(H2D8Jq2? zuO&V_ky^8(m7T+jy{NR(irHI^Pit@Jef3W5zkK_;j&>A@pn$OA{*ao#z zS(LI3wFKTyRP9XQakABOyAmgj?*C*(5AD{!CAn?^QHfjoWi=eBn&&sWw*6X zH{)XV1%wBhhAiWs)Do~ljbm_cF(nD$+^Fo1g4>jq_M&5+8&C7f%0@@sZ_Y+btc)sw z?w08fW;Uu4qlmgAH3QB zc|)1~6rGfI56+Hc7tR_9w$->3$J^?`XYxh1O04_A8)t_3^2G*O{>96S$F;7!Isyaz z_%|%xM;yp9h~G+FY>U-51B$No+p8w|EGGMx#9Hjt7i__s?$+o1FFMrfEu9c;j zUNb$wgc8>npO6Pv>!1xn;XkTAT|oI@1<<(+zNZ!otl8?q^$#(tb%QSG8iYKI4|OvR zSuW(MJGWSyfIads?^G-(ubrb`ihIz=wlWC3;bTqn#Cdf6TrS;YHvj^XCzM z$;>!<0e__C?8;r`t1p*2JV&$kkF0jbd)5!L#p z15qiI(S2J99UMUp1SC@5?=vR&QBXosvrC@yyDWfRW46dkV2yBQEGxAMIw0Ib4!qZr zsXbkUjqb6^inh417i|F{%XZN(x&U^3{H?WX1lxxSA-a=givrT~i1Xf=xvkogk~M-H zgN%ygHyhUrgNkRv-=?xk9Y_}7cZPG7Y%PCbi=7l}>UTBT%5vOdf3AlC3+=5MB%1m3 zq?Zs9RM*>VPYJvnf1|q&am67J1wU$6ntt>y@AE2-)BfQ_OcZ@tEd2KfjBGSU`%t*? zuaR;6u+t9`MRpv-Dji;1s_hPt`eJf0Fz&dLYZqL_mXpgC2rg|~3<{WKwotmYd{~h{ zo(~YT&AG_O!1;97Sb7Kx_$&;I;9M&(m-MLb;9C(4S3PG>g(%&mW99_G5wXQU7N-K( zuXD~#FI2Arfb{ui>P^!pBk__c4Kr6#<7j%xrI?G^iUFhefkfQBXz4dc5sxRSBWWP> z)!)B-7edaHIj|5`_iXBp7zwf@CYt(}3Qkm~3yv?et8OkisBCX6`sI@B$(AS?K6Nm( ze1BCrw6zc4e~LmEyNy{C-vK%XeRBgxY}%%een%wV^l|&>S6m0gH8+&h{Yj{SHLQ6$ z-hpXx;BB331XI@P?9$HEWC|q=_%#tL`WV^__fivK>GM4QR# z7Zl?DUyp7eqQ_PeTN)h#bQa6S5?f>Q>3ct*S5CYGdn*Tt7 zPhtel?r6dB@7@5Xw2HBw{H?RVbjpl9Fi6E?e?Q~Z^Wrx%sly7T4L}qvY?6FC&%Y=t zU|yiqRlQ}zQn}xb$1XaR)c%r?No@UH<1MDVsMT0O9kTQr`U!rV1q4RU_1o6QB74q9 zpp!_@fQY67h+!k?@uTe0O8=C=Zx|@Ucl9@ds$p8J-QFv!D)T@&OY9~JAsw+uz2>sN zNG^E)Tqlp^4>e|Zfy-C$smzFtE2S~zGxUlDK>de#fiBoG{$oF2tNDXBwD)$CVGpof;rrb0ZA2rid(>6S@r_tI+}n=7C-(*q*F((In8X*CfA&@?rY)J8dSgL4W;J ziJYT7Lqp0X{zf{OeP|!CL};=>K*8dfiyM<8iTqbDkp*A-Y2D3XV;PFP--l}5ke~eN zqRjT#ZSE8(>FO&c-bMb=&4T%nK@5_v6yOs0xzxW>Qbc`x*l`l6pfb)$u!7vp8z(&Tq}&mi~SON49aj_P6<6 zL}mygmjmtk9OUU=3jeUed+3Hg^@lJy029-&ONXyLdAI@uk}%o5M(^UumDu_>$jZo03QQ#j%2RvK!J%7#)PS<4LU6ZGggMD%Pt|){OCy^6#%Nu96o!)lkr-bG^%%5Q%CCU> z_m-Bheqo??dHshPi;E9;Pww*F2}&Mlmx-B6K(Pbxknq3uk*oQHhR^4V1m@5eb{-LV%$zPFNyYb= zP4^$(l9LA$fLS72HQe@k=m z(uE`B_l=2Z*48AQ8AEzn=p~jq0xQ=v1AQEarQVXYf8Nu-$MD?Vj>SER3xH%Nakw~f zI5Z>jl7e+(LtRis^{Gs)Ur#b5)((W_M#~|3hIeaNxkI%6x!r1Jd>hkb(f|@DV3rYA z$&;ZiyUawo(IxV|%!N$Ro}Mx*8y|K3>5V}VlMwPOf|!iVE`I6{!${E~G= zUHy%L=a^HblFx``N(3}mV$)8AJ=UYpSIod|q>T@PLXE0_VVpebWgD9UnJ`7c64k>V zw$p7z-$(7&GwUtDqb8v9D{%i$c*pb`GC;}en}z~^QLtb1JUKsoOxP>2abQknj+ zamyjrI>P7>!USFKb0nVfEbpm5{ZQBaoYnO}*b@fyWO=qSU+qjh>R-hX4~uzNoiQRy zy`lHA@^wrVXZhb zG6ObLpOs-hJfEVJw(HHc*ZK$KdxUurzQE3l1*KaR^453|Kzg^bgJprgaBKr2U4XF} zP;8O^K|ox~uuC0vm}R`xUiDKKW7wCqT32kxitRMOD-A}Q{e2^rt1=Q0h}U1ibQ9jX z1Fa9Dgh@flKKaYeXg*GCju@p0A${Gd$x^S8p%31C*P zBMe~LkY%bPaAO9HL9OWhl5%noj(4v(WJCH520Od#-?7*T4~_^Icre*&JmD$}xx0iT z8kyJzOU4>5XiuMa z4oNEf{qQ6I$M;H3vM1X3|Mk5xtqRI-Sp5;f;Q;lA<6kQDXI#0*F|@Gh6S3BOlW%oj z++X}Xh&^VDxx&H)76Qee;iUy6joa(QOr~X;km^GZ89suMxa_wN z!`fG1`>?w%_>LR4CvA|!WvYsYY^4$wg`RS26rB z#NV3XaIx8fa0LJ-C*oN=6tx&`sou8NXe4Lbf21^y7D1L->bi2~0wF|gZB#P-KhC~_ zAa|xDKR;28L4%rboBEvS(O`)i-7w=|eB6-#SeXI5EIcQ~DE)hYSG(cM5M`t1o-ZVr8bl0_;s-ctGMiOs7C$|>7-nGw zSGamU9iC5*jL3}Kj6kRf;SJ#NIPiu59=P`PH&tPif(NwVIWS{cXg)PQAvw5O^e=u> zHCI=6u1=q%U-3fFi30*smfN3J&{&^BAEHvD;2Ob1GnxMiGq=xTX()?MLL?m5{XiBg ziKIhs}zrfr6)h24(Qrs%5B*2BuqyH(|)6HD+ zbYgH2!P@~51QI^wjyA)G|9-TN#cDO1oTb6tdeaVYW=r*Y^iUR3$RKwPz0IqGwNvaN zDPu}2dn`sx6~8!qKt;jbw*p+NHH_CW5#3=Dg9x!zYF9VWJBL?MP~k6lSZ3Cgcqj0b zMyT_ynweVXxIMCYjp?cT*4u_eym@|58C~p}xvkc$kLwD^LOP%+J&09osV8gbFF8xw zUnF#U&e%+T-ghcj+wOL52zlcbq&RGF^%^AmQdajSGqF373F6s;CUf8TYdIr(X>%I? zvCV6ltafF}0bUMr4PWE!V9!Z(o!9BW7~l`vz!2i8`M;x_Equ-Ysv0;C$3i^y`~1j+ zh#q+R*?HGFiHBqfu2`fetgWed7vvboM({7-46jFhiS-tJXlyf{Z!PNzgI57R+sSYe z1i*0ehM2?FGvIv_IF!Zwq7RWuDL+>_e8OKgKI~-@T%6_{=F(&s!97HjxySYI_?`)G zR-Z&d5cCgs0yuR(N<}YSBdEoW8ubfD^ToOBA6|?|o87NPmRd`=en*yIb?NZaML9SF zFYwGADsb8jj%OWuXg~91nSc0FdHD3E#afRb=Ig(d%fIeLcy_DkCigmTt-39+1mi<_ zHLlBI-|Q?;?$Hdvd#K$vL5{^cJI&hGBc6haAGK_Qp;4{8-=~f`Iej|pyRHz z*;I`Y&SRpVmFcrNwUvW~A(ziQ&-gPM5wpdB8tGsB)7%t>m1Lg4n$}WsHmvgL`Fp7D z5dnB@@ESU*Cff?fjG44XX7zGwjiWlHqRDO9O3+6Cuo8%ejwF(xnAPDg)tbv5aV+EN z>o|TOVPssL#GCt#J&HR8o<3*_58VJ`WuT)OPHf-FH+ahG2%a1vg@@tR{mvV(x*?xm z`szS$0vU#C6n=<-1TM@`k$bb|5gv7EbAJurri>ph_=l zhU&hFFZOpy#70YBZh8a)0y;8uCS(O~`mbaTgO_W5{3gE2JPJ;UZLf8mYAZc0y|QoO z+mckLvfQV#-YNxl`aiz&YZ84}Sz8frssq+pTjg*wgIn07FyFUfF*w-&hIK{VFIx@q zcZm-TGA0H^1VTugPq(fx1y_IThZ0bqx8V+BN>tmVo-_$>Ig=#?AAxqKk=ld&cs7UST;`T#l z`hk}E>8Q26iAD#pYgu{R?W+7w`Rteb``55!#`K5nfefQ(Z)xy|uY`OD84uGa?8)9b z{>QsR@vlss;uC0HWIxjmKX_!VcJ^4^x7KdN;BHdn0g_??80t|?k~)H@AR8*66aIr|_sAFamH zc67GxDjWYuVJ=3H zQ#j|^x(kk?lrYl18Ju!_ewNI&J#Uy=I-q_NnrB?hN_vmQ@Z*NOZC~j}|9KFJ1 zuZ}K0>KHw`8G1RNOgH5E;cyXW2UQ{eVOI5Y`^W?K)Hh);-MarBS>-hwPN{nDnRHr7 zojoVmcoe~xVkaaZx>G8OL+kFkl@IN3dZ|53{suh^YWvl5NLfm1oG3l3I;@)Qv6m2< z6WuTZ&ej{9#TL*TJ3vSF^`a4edq9dyBk^|kyq6j_b7;9NcI{0!@kr7c%|xiBYCIp6 zBv)u85%6($IqOZ3v+FaIGElE`6u4Gw+SsrLPq;vE$0>#Vz4~7hD2?4-yS#urZ6UUD#KCqlDtrc>HyO(t|QgfwXf8=fs=f#-R$> zV%8yD4Hz80JQ4utE-g7}cO|d8q1^Cgoh_x18ZD24$15P=P-rpr!x6}0o+CX4E7EfResJ|kn3E%3BGA=Jw{h5=SsS{E*Scrcr-a+N$B zF=Ir+I4YU0VXBjUcMuY~62IbdwkE!Dy{jf@_z1`N&Q8PJj7X4!$`F>xhuWH}0^_(7 zT4*3=K-XjP_}u(> z>V{m_`xH|X3f|rY<#{EwED?i~!b!|ONn_HF5Xg`2VQ%WNC;kVdCrTCid5aKU57H@a>t&|`bDrmEWU@%tuk{#w{j zwTIr3pR<`Q6#!sj;!Qiik~eS)`ut&Yc`t@a*fRHlxal3b#6ul_K|AEVam}dLq<*=5 z_S23%wCnElM!))KGh&DqI&=ZgP|i(~mjyu5bq%cR`YG*+Pq5e{Xte=$?Xn)KMM zZwXr*l9C z#y2U3pyN2LxYF)B-IK!rG^SPGrx4v6FTObORS#Ni(2J!kn0z*S&g*@=43Jx>|1#@m zl8D>&^Ha{@IiTN(F|ry>bmv;jHBBFz-4w7GkXha7HWmRikKJ6bp7Yn%bv`Dw z?cxl*R0k(Iz3kOU@Ac`eoswuhUT4jE+#qys5NQNm4y()QLvx+&6v; zbMIL=84pyclgFXlQ1Sn5s)5uNx88@Iqop|C7}n-NK7XLJ_S#fNp~>1bxI0f{gn0P` zn>E1!2{yciKl@u4@DEM4OJ@HD!cl|v9K0KX6 z%PEfF4b2rfpJw&xJ}9fC3jk{ql`V?X#dmlZFW@D0f&xD*fXN!5dkb6ixw??-gaDxdiI7S+WpBOZ<= zXck`9`hKik(XcPNo`gf~$MxI5O43)%vKrs{{Khj_~KGg)Ux z?}Lo@m8pnPFodjJ~JZ96}7AdZf72)hVy6MowwHR zEC-kqAq>PRcOHkBhpiDn8%etipJfIG#;Y6s98%TU;;g#q==$@71{HTglSZ~+x+iMR z!tUMcW~<{T=he=Qk7b2Rem#eFFE;Uxdo#BSG5ni6h#CVbBkoRvr+b}$8yQhMmUa-L z|M4cN@b)i{*X-p1goHmIr6K^b4%iBwe99xA0|F+x|Lz6-Oo6$W-4Jx$%pHpy4WHj@ zXgORMHu|@5%~mfirm0H4>h62HcW$jl%R_&;fKu(4souV-VG&MD)=E5jT4eR~`5r_* z4*BTWlR?S#Vj8^%slS*G{wIp$#HhZ1y+qQW6ieZD)eA=n??)N*dzS{V%fR>#S()V7V(;6Z6 zsS8`2hQo5qfxmqe7tisPTOwU@T}((h9vdTdzbHu^JAEf1OA^QDharK~Pn^i~AwVX; zh)ep{cZ1Af#lnXgDp!Xa?gf`&NN5oMUGj3_=(0GVib!kOT-OUpe86RxVc>v3JZkrc z+|==1urT)2lK~{mmQkbX5pF!My>}lm8)u5FP7yiiZ!p%J(+T%>jI%GRQ;;(D9g&Pm z;%4fXNi6MDbt|*%yL8kltGTLTL={h3=s*;N!^ZQD(;gikr>fxygQhF{<0otL6g6oe zICm`I&KC6})hqDF(HCk#-W@eX_U%8#T3qW}H3Q#HzaS|8`Me6-I#}dc4?oTw z(5|e!g5q<~;>KX}B}mX6G(W_X6HVhK!7j9UB~!m2lm*>Y95u7FX2T$m<+=I$vGs## z;j2KoimZ3;XY%+6E)=hxl8EnHm06{}8DD%Ze%H1V^)@B+?KB;(q^KAFf_?4>Ya}{4 z;ZaSn^0&7c@O-kv+(W~3vCnTszEVi_YFj&nXorAQR*NRpsaI_%TxDtPSP9WQo??KX zXJH>@)aG{m-ShW+ANCk=JGMH880SyUISm@XYRvtC0p;RfyFF;}HfqSqJ#P^<=rlHS zEi^_I)?|gw4`yF9WtOL@G@+gpl=rEcDMxhdgQhx8()>b1pE6|h7xJ2VOCd2joOY;?7BJGQb>}T!X3oDxbm@WdY%o2kGKslpr0Se?*1~*!SCiO zZx&(IQwn}Y+~7*-Y;(Pz^9|TOH0(5pHVn-i^G1Anj86}@R2&b z^)0wE*_`D%{xWmG3`@~f4dqNv`5Q{Sedw=wcc!pwG5_A*@(5!0(!))D z7l)5jZiJufZ&-fJ0J=#%Sc8@=-G5kh+v1%ASE1Lm`_H3SXji9a9v>##6tc)IRc3s-$w3xtLt(1fq?@HW#TpwQfy4t7# zG94}8)f#6|sYUr-Gir1ae_hSPdPf`&k!`WmQ?v5Cnp(EVDj#yH*vdg)`@>`&6q3F*hMjm&$O-1Q()L?drw-Sz5o-a9e z{vz|%&d(~6c-3bdM$ue+>+UJRLxZRMlo5g(;*S|`bxDcucCtHW#$saJn zM*I96mtNcSOI?;x8?05ynZPE|0!u-FM`-p@Xn5y#ysH>Hu(Pg?2l!hZc!-~TiymCr zGP?LtPsBlINnT#8W2vcO&9hONC_Tizc=CkqGT8uCmwL6R?%C~VVjiI#2SD1TALyT@ zX8dux4tM2XmJzNE7pPJR^z)zuc{t1>++bV0+C-H?(Sre^rn@6T7TmTv`)f8|OgQms z_Eg6~Q~;}vI@s*b<$z-6swR z1Mv%gKx0`MQ0N0C$5<1FWUQK(O{XdYBEEdvtZ_=ZSMS6PLOZQq=hcNuibF-i`07Vz zYMoEc#iwTrl}<5~c2*HPxee_Q%4TsqZlOgPt3-8SI>i7uW?^R>5|p=r^}Jtg6bW*= z9|^;=5CHQ*nnq_}t@YKP9$F#@a_u+I%c~K9P?OmG?S;=1xqnlf#N4|-MYhkY8ih?; z;~kvYCt-DeC<7}Nk2!ZKb**_E<*M~0y;_Cc&!3s752AW=o8Tnv z6o;!u5nU#LQHJaKR;9L4U659rn|YXTx36QH^gOv&%A&@7SD2@73COtBeCb**h$Nww zH&gD)hY{|^}$n_xaT2GwbK#L)Eu@L;cM-zm+CJuYL311(x=mGP0jmw zbPdnmsh9@QO8T1Qen1)JQArc!(sWiv2F%E7fbN*wTspv1rX&`5jdgVbRgiLUSeMd}W3qKnLD~{IMnRJswlgLxNGR4hNgb7b}M!=WA9s2AjsMO9R%O@ zIN+J;d4X120#gsH56Wy*?EQH>u-HsZlY$??l0WSMr^2)R#wn>^jf zg(5amZO~Wr0EI0=jL-Axq17lZj{49?Xpnwf+0}9>4tRq|kbfjQ^J>!R6<0|5dQBGk ze$LjyY+9l+k};js7E-X@Fb#SQAK~Vrn^;yCyA32vMo7_uQ`2e zi05?%onu7&Wz{W>-sQ!g&`4#!4(W*&8Kobnexo*ut;BBhPyQB>U>8J8j|>oNYrnVI zEgDxgDZ=aJ*QgVLhxe?J0(J%TgODSG;m$TA6Y8(jiII{U`?t5Y1NCEl^nGh`1UYy- zdbNY6*Y>OK%n-@+@(b`D4aoH@<0&yFBFW{!j7sL9$tR8B%eYlV#kJ~*JGa!4EE5a^ zzaZ9Qt<%IVvq~9>?1w`izxgYmv+d#z3pX}}%YgQ$fu^Po+leMg6NadCpe9_LY!-FoB=E%>C-;%Z?Wg|Mi zF@fmK>RZNCo8ZrWaY$>#Ucr7W4p<&V%*+g86*f z!>@%wPBg$bm&>7yIOW}f`anEHyQqUn>V~ z9a;Y<@q5)Adu?+zcF)QwiJNY)Mr`df+}+t|Q=H=v_*GxfUhemtE0cs?X$@^X2auC9tq7vKVZi^aLNY<%z zGkmXxUIxnhzgmF&A=*@jK=6p?@Bnc_T9#Fx|F!4DM&YYUa+-{q(jF(TY&H68t^aIg z*B5$wmi%W>_e79D_t_MjXvbi6DT~gfeywmo3fE{hmFh&$RMbYJQU+%06kZ084sm&Fv16gBinqbqeR@3>W{i zKR1`Y5eF$*WJ+C<{QEc9D?@C$C**<$)m*c+ppgA~nrpBmnoqZ3z|Lnne}4tX_cX%v z4J{9%qVJC9Ixz;8dW4ZQ{P2#fYlm41MCp8brZTM;Jyi?i8=Bzdb4Vaox4FlWXAmJT z^?2-i+)SpbWq2bzG-&x10bw3qM>w3l@|zC}&e@*?ije?hWOD;GSB+p)&I-IfHrtGn!m z4P5%$EcIj~0|2{#rF|P+!uyu>Q>cI>^4s;3(ZIhChXn*g|M%^}<}8{{PttEDjZbPL zpT(CyJ-qjvyy4*5#SJFZ^X|0bdI5lBtBN!YDI7gG44}!*`nb=>uo9WYsA5c= zao|IayL0VPGa3^m^3o^4Ro2d0MfYw0^*?m&hI)v`y%g1&*5;v8!~{HuDp{!>J=~rw zc%^GJ*d1GJ#bN)&oaS2eKdH#97es(3b8CFtUQS`MtTLx`a4EzcPddMHgkiJ{-4o!k zl^nWivtKV&ojoMMPR2GCv!vnWvwZ&h?8F6lyLv+HKR&O|8DA_$?~4TIb7b)vhvoX5 zMqk_y$gGHvOAmh$zxP&BP6CwCMBxZHY5~aHHGi$Nw+7m7t+)R3Zwpw97v zs`S1h|5oT|&AI|F@s#Q$PStwWBB1G&y&T$qgDq92TxotQ&%g_$Y3sQ@m?W~fAmg?P@ySBwBfFDFG zhQ9wL#U)RF%v+|L!@Bp>0Jp%B-^i%J1u!~bxcsTB;(93!@ZF-URYUG-@wn;IcCj=m z;=7e+r<+pqy6T{>t#@l(6vB-6Rr8l7fwjzt&)@bV;dDpHt(^->_=RA#ab`MbZ1_Fp zdF&)EBtmR78R6136+_XVGs!43E#*0Xm~Q5tf5@JL9R5h9UgB+^(dqfkkVHvEogqa` zW@j8+l+yScc=h#c+@Dz@em`;u2wfTz0SZ2q*)o1qdP-OSZTVOfr{@8k}K{8k`I?J?3j2*ASwD>1~UG93-rpJa5s}d_yd6 zF?7=RpQr@Iu3!PRD?eqvzRnO&a(eN1p88*Mv7}#iE0?Yd)Uynw2)4+B6c#3R22jK6PHJbC`d4ZARs3-Iq z7C7FnMS||@&;JaG1k@M<6RBR2s*W`OYBNU!ByHe$Y+3+tLXQ&;PN)qVy4I<`U)`*9 zs^-kkm6Y8WKZeJ%$Dfo1VAQa{4gYHq;)Xf!PD5boDpX%ms z;o(H(^JZn=&hl$PxmM=U8q}|d8yexM2b%r=F2W}0UwhDbS|(^hVKY+7VO13-S?>4n*X-_B(C(e z=+S+)DIT+B3!#X22S-=>!p$yY-86|Xk*P)&za3l!T0Mx%V58bfhpGV>!+>w9L*C~ z2^_e+rRbkO|Ik_X95SRgob}UU;|m7)4Grk+&gMznn?w<%P`qYxM~SA8mA5xe$Byr1 zoJX^#P8$cGR$I%)K9e|Ky_YAQWQQN8W9ieWc6(dDwS8VmFkjlcu|^82UwzF<2W|Dl@W)&HIWH4X}|g~x+^NN z_(3oha3nEd=(~&`8o0X3Bdf991hjjkkO^7FZ~1Xw(Pq{{SAEJc5UUUXb|mO5b+}#3 z9526)aE+5~IFFsaJPu8R!6!gB#whK^n1nuo2^#?Cm(CSzxj1%+KuwjUd7^O4K+N${5<>LmE8LMDA)Ds zrUiTGHXbp^#|=bE*J%P8(E!7Z{Fg|8(e4R8G}K>dtLe7@lk?m{A8qABuf5-VXmcl* z*2cHHVQO&P(i6a$Z~xP-f6KwRv6i=LEx(JF)6y2)yUki(<>@y&R(<)}y*f`GUF>f7b?)Aa(BADB!>Hy5-;g-lI6| z^stzV1m|QOej6HGR3iMTRXbR!vV4<=lkLGsO~9)$^5wk%#4}6TCt9Zmv*BN>)Jb^o zls$`YY?Ib;YFk4Oq*-~db-3vAN!n{*8lTe6UzOp@FdaOULl8KB<-O{17Hm6U!4jkn zo!4p3yO!_@sJmMDx1BNGHcys!C! z`Yz#P@E5CJ-KZgW&E;*VMDId~gS%N#kY1S(`zw=AsD2FmD0Z}0U7^N+crQd8Rw#5& zBaFk$v%0??NYA!!J^<&6UlaEQQ^uee{GHi1x-3K7&F`U66 zlUdS+gvVe6Tw6NZ++rK&G1V}HFxtHd;qmMlA)#_6y_W!J<#oMj#sSFYIqH@}_9Bp= z2&f_ap^X9yILPyAuB;gVyY@F6Na^HQ!SDJ;CVB>hFJCq;5_;qIOk|`1=l5-$f!Fm% zp>6s8aC~LLB59nZV}H(#JN_V0=f!tpJ;1{+mrOJTD3|e&A4m$&VgI%YQ$fy%5aKo^ zLu^v~P)OV@L-)n(10ByB8@v!Ah49*_cTs7n(u%R<6)}>aSAE?h!8pI@Q4k6Fcor!f zcC31>xY<=u2_t%<$dyArMC7IBMWzwqN07Zo0w5`e0BcPSHK@dp^JGd8B!TWGsdTo% zQW6Aw5%6w7X5_JxFBm@}5WdV+ybD83CF?^ri_M~FFP*Dy@*1RM`~RLL7CBcApQWT- zZ-oa=QF4eut&b0xEh?RR^j06+J&996r|-+o8(`^l%P}|CbzJ;~HG;rRH)J;qXdOOY zhs`vl9G}M5OBtCi9s`W=z?=bq_wVYdmI7-n=9*vE^rdO*+_MEgPSi4cY}KZjzc*hL zfSi{Dn$Z_N7<_M2%O4_^I!a?EscMgr^g>DAFG~E98RS)z+1@+#sYfDO>DA|Pg%93- ze6y|vah91qzcFLb_BNWmBYt}~%&ZDrp=cbz`|2s9)zYH$|}po(;HMR(_0y5@#o zq>qIZA{4>bY0I2>IA0#&YtHJA5SS1`5YnwFAP2B8TaPWbh-#D}(!?MfCcc3G++YD{ zX8Tqty3D4K#3SAdj#TxBk90GeFhA`E)s$R*#G|G(un4ZoP8HeB+Z!*i!eDzzm_=Hg zx#;nKf0p;u9@n}IFd%Jk*|mOe6)WDg0~S{MajkgHpoXL&*^CrotPeO_j{L@+W2nb{ z4NYFoe4wIKvLEJ*`kgZAgE@r5o|bUogTH3vbR7%2z^fSQwXV=y@TdT-~>Y}I+qyu8-bHEzys zwl-S68mfGhM5CW*2K^C`<`eM|615dA_k_&g6g0fi2=b!A8bT;nHZ!tKZM7J@blu5{ ze4Y4+^2^*|v_Sb+nManF?4a|xov5Ciwb3^HxT69hV1A1CWxy@25wKArj5#_A4m&Qu z`WtHdNv{n0z%@>}hNpz3vB&A@JyIVDIF|nk0%+CQKI(j)Ouee<+bplDmCC9)C;bsu zlXlIMs*yIkB=U=-I!kznuyeIj)}GgY34|P_p)zdrxcz#2zK?(f&p0bWcC9^8SztsA zg`a>z6}!v?5@uNk`O#OK<9%qzaOfa*W$V=$FPWsW? z!CR2E%szKdSVd;a@{-!v(=SeIidFe;aub@BORP$iOFttP!_#w9QN(#F_V=$ohs&{9 zOQ|39M8S-atD?h<*DFzfDZwhax{->ukAbv#r$^D<3vEN@*mf}k(|Q}!` zFW7y{*JOd)g|vYP&^1+?1=<+-mO;Y|8?v1d3PaQlc-e0XWjZ14D3;;xF026n2%{7G zPCBamssZAd+(rec5ApW|v`@H)BzSK3_s}qB8%B2yLNh}JjDuY69qe*aqLCbJGQ*-& z*J0rTkhx?w`wEYfD=qEezVL2%z?L`L%tV6kMcEm3@-i_^_*5o_5H=gBbEMJyxt`er zm^v>1*IXU7SNb*Z^E2@QCSSI1N`y$p_xl;yQ*$HrgVUa7!QZ$mk^-c4`BL>K`}2~0 zL2TIxenExinc6xO-Z6IriMr-2EayUe?6jAOj<@hJ8r%56W3NCRg@trp>k7*rzi5_Q zz6Iy>COc8T?AM|bw+$|(6^Oy>U~t*q{1MMBq`L_|DG?cP2kZ8G+p$F zg1reIw0Y{hL2+p{dzuA(`z0^O)(idP9h`O|-N*G^@@@K=EnG7Ha_q=fU>4UBFj2bx zjSF~2N@o!VC@`PRw%Lh0BcuXM$j~bZ{W7Z)7M=8Rz8`zcaO6v(6(&>XDFX)D{Jlj_ zdlF}ocaF4mmL#0L5Bva5vVXaJ0jWcc9YS3;JI>27p+2mX*8?_BmKXr(Zf_Ci=OJj9{J*T@f9>Bc_8EqEEx$BVUv}Nrre_t1vQAq=~h`? zWQCgH9Z28=NxLS09v<&Qen#FX*eF1&C$=}*EW;%C7yM%V2heF*1dVXPN59E?V{2`< z@sv#Lz4cg|MCPx%|LY5P4tdvHdOWfXYB24W@if4T@eDnqx{bs*c2F!JIe6bG`1w_@ zV9!9@;n4g-FGdd+9o{xEb89-fAmXinLzZ;H-}K4P(zIui-o@;QqbRqCRwCrxj(|#Z z(4I+=-+7c)=_rH-Z@Q#F3;=Z0zG3IyR||fiF4140uj}Gq&IlZVm*%6DM7l zFY($k4okSGftd(^!w45fd^L?2X^dt(^lW?tz_wt|xMl%d7PWf_^b8aauMKd6@oquu zs@?0UF#BL<)ZWgNRJAvy939QEyn2fJuIT=pMW`un5u-3}s~BNS@8dfl^2x5El-H2} z2r}*AfJ`02$hFX+zt1=JE`)tI?#|yT(2JdE76+xiz>D3V76iiMQ!yC`Ujtze4IkCQ zt5aV73}IkV5gAzU&L-vU5p4>ip2%&aWF`G^r3rp%T+xOza~_tHauwam2QlDH)$dQw zAlJaf1{r^~dhg<~N`>nWhSM&Xkf3|#h1=y1`l7)D7qq%hUIItB2!9b=Ym3kjW6pzET&o)X;~BxFB7g!7dylwM1W+-K$!W(u z&X)bNjjm#(X<*C{77We<$!;9YB5#|HY=$m3uNlY+&>H#=E?CAsq#3u8eC_&HsUaN( zt2-;2hFdj+fUoJ0>-8oZ6$ma+{&>H4dzNC`IyQ)1B(K>Acb6+Zz3wcHV-XCJK6A_G zr^YhzvH1qSIk9EWNtoj816j}f(y4^~q9q}_G;T~r*W(M-kasA#JOB`~tL*DANGM^% z*HRU$Gx==*q3=;rPj8CWyhCe^*!PIs81}lLoXDFZ{pA&|E8-;(5QMz>2`nJEM_gs_ zLtDr5K;4a`G3O|9Y>@qJWz1PM>QGB8#swh82L2La(A^~uJ2=`zdK6v%QK(&7+V$NS zDtD)iK?C0$1)LQNqFEei#tqrs%hA#JSnsXJ5G!mN&Bdn#f1WDb zU9o2&qj&64Pw|hv13Bp}*QO#aptHk4L7Y&0NfQ~{whsA7M7RTMt3{HaML6NS)FJ~aXQD^Ve=X}7alK7TyPye%a0ya&zRT-s+wz9b zYW;H<0zgG=G9I3fJARoKQBZt2$dkcWg-lCtkuJtmTqgg_HLGKPy&x*$b z;;^{|wLYM(I`_)^Mt;}3@Kbq9987f)-O$IFB8Mq5&&RlMOP?_f<_qTt3{FxG; z(Ir;7cwP57=k1$6S(+cei@r&XG4I_dRl&Tko}6QJDKDhel6$PKX`xWPY7#+012lTv zrq~Sox_{XAEo5}U1xOh2!e8R0$hhOXY-TNAa$1i=;hUwgMDD&>f_sEG?dV}MZ9G|xX8XWZbJSLTwNNzX(hqc#Kt1CDz&YGMLJ^XO9y2RS z5psPchxJ!l*f>?h3x?t74r>57#GYr8ekBGZ6P($(tdDM&>IZyz1&!#H89sld#L}o! zsr!01#+Y$}9ue@{Y%j`zOs>Rmbx3K{kg7%t>FnW{mO8Ga%M>C2$(woS^X->oRxCda zu9clgLBpM;mS%AGqB1X`xJ7TXV0DQDz3mf5=z6rr*p$nISW8v)_5D36sTbVXdg58A zZX~9JsJ&8`&*^)j$S~EEqck&PbHIwuhg|@lCoP{$7#W-Qbw%7(@&(L-t?om*Lv~qY zCkc#Y|B9sl#TCVntj%f{*_!KHSy(A%Z3oMk#cLDg)y2bIE@@J@*x3lsy-;9?LFP{E zNkWjxvSWucq;;0cCG4Q=hR1ta1%?;VsLA_LlUpNs3z*T5aP{$K(o=+9#hih_QTTan?iWV= zGYqghY<}jqKYuo5CNQrI4MQB^euN_cuhh&+@Ho-Bp3v9ZJJ!0XpEN!I*!BBT5NjpA zNF)hsS0i}fIiQF#RE!e<7c z;0b9&o;Ogdi(PS!iDc|=Pk%Z!XPjEuO*Ja|Euu(9BP}MGA0IlW&j4D(_O;NkUTY^t zQ)SijIRBdeiDgf`k*%%LLwkh6SRHPSo`yNnV$oHEVm2)j)?U>7wD@cybr?64_Ca%L zfk{RtAR{}tD2+z(cKSaJUK>wpz9!qguQC8`;VL zNV#sD70dcQPX<|@tWAwMNZmtQGT8^WgBBW>p&IhGTa7H+8&+UNGwPw)ovXsO%RfNS$3v0+djo(;#xv$d z)OF(>ED>gZzMBHV?EMMo9x(*kBxY_Rd3XU>?X07t@u;i|5j9c+h;Q{PHz9F>D^|r{ zOo00JRuR6)1NjN2%5i))B3eDTjus1oI^Ua^90R8ek;^bER_bXqGm9yGz)1A4N z*H3Fd9J=yVC8rBkzhx1+?H~?!+--unRTH5HBIsUw1Jv1_p9{)a-svMDbngH=@2+ck zMBPrfJX)p^=kx1T^#A#;;sE4~ux!9t-%Lw1wGr@1j(LUP?O}I>s7>Db{rQ!CZMZJ_ z47j4P)sAxHp#cWut(qrOsyHrXbn|$nPZ6T^i@q~5b*et?x8le zW*umo;p{u@Ni~P8sNOc;p;VpovSNz9@>3c4jpXKH>$($7!m}p}3`234;E! z2>>Z+djI4`M(n`@-nKn&^=T7#OPNew$HSF=9KM|9+<&-S6KP|aDULJih51|zeP$3( zfnN}sOkSO?qjUhVMw! zN0X=Bxb(US_te82n^uxKzL^Sqsvxpf*43^;t51DZLxfR7*GL;!oSqivM-W$6I)kS1 zFSVtcYT57zOQ54THl3gz0<04vL!ax4#XxAKmZ{xpJnR9HXsw`)07KZ$znFwqtXc_D zS3~LyyLzQ^&HHz>!nWgciGMi$|5D7{Zwv!K>96G@AB@Gd*$3?IHJI)pG6Jwpw zwPtkTR|F(+7p8oP$(I3+Ga5Qgy70+(3KHG_7wF)y5y0zMtrG`yQ%&&&?O^ukS_2@Z zdCXlRmTM`!bHwPERqsQ0Z!R#0PXMsJik~Q3gk+B_^)RF&i@DVCe=>xILi$|j&R}f5BG&L2%#N*%8f0Rb z*jo(Oxk099q%voTMWqhgY{*uQ2!Jk?>-8npTBW0+?JMlyOv$@u7P$5$;xWQ-x7h-3 zf_-dY=3x=IIYQN!t4w~@uD>6G#ICBQmrveYl@c<~&-iwx33XlaBL)q;$FAYXjw`b5 z)ghOo+z!cr_6+11iu1w{Bw-^pr+cJ7*ZN}g@x7`0z5)?42bDvVyL+;`H_1-vM*RZy z!AK(J_SX}H`01sH!OX<^TS(nuz9i|^XrzXLjpImXXaBryI z$qVu-0lJAi<|*U`I!A-?unt^Aa|&WI!Qk64^dt)&9w&V*?%m2WG-`|I#ql{^bgV5e z;H}eC^YRX6-{jWPG8^VKc1`DX15n=Q9CMh{;>-XC?$+URF>DobD~rs0s)BN+hKK?+tDA*JsQ+#^>0Oc;z>ls18HG8nTTnu5 z(K9Dshu-<3eld=UMKePqJYmEcSp(-LDQ;TePT?S)sr(nTIC|J7d`NdPf6&l5M6~q) zRr~vkI=EsuJO>FXS@dw@?35{k1;OSfP!-u9rRYH6A`9Qvg_nn4wI3{_HN3YBE)1k; zy!DWG+WE*EFhYt>`5-%C{q@;M#C#;YF%Jh>Pwfl!0uM;g;XwhAw_B@KLu$<1`l_@u zl*}JD9~a%%_>D-ex7JB!vf3lJ+YAm02utQTYf#*BLC6knb{~80Ig=Q}rGZn|5Yo1U z4G6U=1hK;)D85J)9Z;%8I<_4U8>Y26z;XCctx_<0_1SGDdHz0x^%m*F>) zX0hPKc`H7Zce8SaJCHrBUwSJ1#N#l#@%WS&V{oC`yWCJ2r9eQAYdBkoI=1@`XE(VoNIu)`D3VM0ZrF8w9BoVu#6=GdZmRd-0 z3*fZQspfZ=Z1B6s7{Z%pA zp8Yvs|DV93zWkT(16U~x{s-s3{0Tui5C0^JBRHHow@2EfM8)Ye&RK8Fo#21wzqfn) zxp#tJHQgr5#~NPAU%A-fgjztW((l*v(|TejcKo_FxzD+}%J-45?d2msQ`G|t)Hn}s zyVqZ~{Uva8W_|jidIkoG|2i|LR=>^T3Ff%i{pCh>&7$&vu6og3W$D^qc^blkkNRpv zO+CG4efFKU54ZAo0y!?82Cm1s`!-hLMrP}aJAV#sO1gHh-AnJ>?RnpCtjdY_<>2t2 z*;lr_?gy}9m@VwUt>_ZHWMy#4>WQw|cUEq=$9q@9xJNw0Y`?;RKZ|p39anj^?2uE( z-r)XsS2e5lfijv6-yF7Q=@<5-M^{`*X4>~Dx@L`vt)k1GCq4de3_r<5Pdz)Wb`n!B zg8@g2QN#cALm&5N6jyL5l0Dx7oFAaO-Z#1bzopr0KUAw A-~a#s literal 0 HcmV?d00001 diff --git a/yass/third_party/mimalloc/doc/unreal-logo.svg b/yass/third_party/mimalloc/doc/unreal-logo.svg new file mode 100644 index 0000000000..5d5192a25e --- /dev/null +++ b/yass/third_party/mimalloc/doc/unreal-logo.svg @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + diff --git a/yass/third_party/mimalloc/docker/alpine-arm32v7/Dockerfile b/yass/third_party/mimalloc/docker/alpine-arm32v7/Dockerfile new file mode 100644 index 0000000000..56f071db3c --- /dev/null +++ b/yass/third_party/mimalloc/docker/alpine-arm32v7/Dockerfile @@ -0,0 +1,28 @@ +# install from an image +# download first an appropiate tar.gz image into the current directory +# from: +FROM scratch + +# Substitute the image name that was downloaded +ADD alpine-minirootfs-20240329-armv7.tar.gz / + +# Install tools +RUN apk add build-base make cmake +RUN apk add git +RUN apk add vim + +RUN mkdir -p /home/dev +WORKDIR /home/dev + +# Get mimalloc +RUN git clone https://github.com/microsoft/mimalloc -b dev-slice +RUN mkdir -p mimalloc/out/release +RUN mkdir -p mimalloc/out/debug + +# Build mimalloc debug +WORKDIR /home/dev/mimalloc/out/debug +RUN cmake ../.. -DMI_DEBUG_FULL=ON +RUN make -j +RUN make test + +CMD ["/bin/sh"] diff --git a/yass/third_party/mimalloc/docker/alpine/Dockerfile b/yass/third_party/mimalloc/docker/alpine/Dockerfile new file mode 100644 index 0000000000..b222b79194 --- /dev/null +++ b/yass/third_party/mimalloc/docker/alpine/Dockerfile @@ -0,0 +1,23 @@ +# alpine image +FROM alpine + +# Install tools +RUN apk add build-base make cmake +RUN apk add git +RUN apk add vim + +RUN mkdir -p /home/dev +WORKDIR /home/dev + +# Get mimalloc +RUN git clone https://github.com/microsoft/mimalloc -b dev-slice +RUN mkdir -p mimalloc/out/release +RUN mkdir -p mimalloc/out/debug + +# Build mimalloc debug +WORKDIR /home/dev/mimalloc/out/debug +RUN cmake ../.. -DMI_DEBUG_FULL=ON +RUN make -j +RUN make test + +CMD ["/bin/sh"] \ No newline at end of file diff --git a/yass/third_party/mimalloc/docker/manylinux-x64/Dockerfile b/yass/third_party/mimalloc/docker/manylinux-x64/Dockerfile new file mode 100644 index 0000000000..22d37e5a72 --- /dev/null +++ b/yass/third_party/mimalloc/docker/manylinux-x64/Dockerfile @@ -0,0 +1,23 @@ +FROM quay.io/pypa/manylinux2014_x86_64 + +# Install tools +RUN yum install -y openssl-devel +RUN yum install -y gcc gcc-c++ kernel-devel make +RUN yum install -y git cmake +RUN yum install -y vim + +RUN mkdir -p /home/dev +WORKDIR /home/dev + +# Get mimalloc +RUN git clone https://github.com/microsoft/mimalloc -b dev-slice +RUN mkdir -p mimalloc/out/release +RUN mkdir -p mimalloc/out/debug + +# Build mimalloc debug +WORKDIR /home/dev/mimalloc/out/debug +RUN cmake ../.. -DMI_DEBUG_FULL=ON +RUN make -j +RUN make test + +CMD ["/bin/sh"] \ No newline at end of file diff --git a/yass/third_party/mimalloc/docker/readme.md b/yass/third_party/mimalloc/docker/readme.md new file mode 100644 index 0000000000..b3d9009407 --- /dev/null +++ b/yass/third_party/mimalloc/docker/readme.md @@ -0,0 +1,10 @@ +Various example docker files used for testing. + +Usage: + +``` +> cd +> docker build -t -mimalloc . +> docker run -it -mimalloc +>> make test +``` diff --git a/yass/third_party/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj b/yass/third_party/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj new file mode 100644 index 0000000000..04c16a9faf --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj @@ -0,0 +1,190 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7868F-750E-4C21-A04D-22707CC66879} + mimalloc-override-test + mimalloc-override-test + 10.0.19041.0 + + + + Application + true + v141 + + + Application + false + v141 + true + + + Application + true + v141 + + + Application + false + v141 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + false + Default + false + + + Console + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + + + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + + + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + {abb5eae7-b3e6-432e-b636-333449892ea7} + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2017/mimalloc-override.vcxproj b/yass/third_party/mimalloc/ide/vs2017/mimalloc-override.vcxproj new file mode 100644 index 0000000000..6d20eb578f --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2017/mimalloc-override.vcxproj @@ -0,0 +1,260 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA7} + mimalloc-override + mimalloc-override + 10.0.19041.0 + + + + DynamicLibrary + true + v141 + + + DynamicLibrary + false + v141 + + + DynamicLibrary + true + v141 + + + DynamicLibrary + false + v141 + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + + Level3 + Disabled + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + Default + + + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + Disabled + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + Default + + + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;bcrypt.lib;%(AdditionalDependencies) + + + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + Default + false + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + Default + false + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;bcrypt.lib;%(AdditionalDependencies) + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + + + + + + + + + + + + false + false + false + false + + + true + true + true + true + + + + + + + + + + + + + true + true + true + true + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj b/yass/third_party/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj new file mode 100644 index 0000000000..061b8605c8 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj @@ -0,0 +1,159 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7958F-750E-4C21-A04D-22707CC66878} + mimalloc-test-stress + mimalloc-test-stress + 10.0.19041.0 + + + + Application + true + v141 + + + Application + false + v141 + true + + + Application + true + v141 + + + Application + false + v141 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + false + false + false + false + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2017/mimalloc-test.vcxproj b/yass/third_party/mimalloc/ide/vs2017/mimalloc-test.vcxproj new file mode 100644 index 0000000000..04bd6537b4 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2017/mimalloc-test.vcxproj @@ -0,0 +1,158 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7858F-750E-4C21-A04D-22707CC66878} + mimalloctest + mimalloc-test + 10.0.19041.0 + + + + Application + true + v141 + + + Application + false + v141 + true + + + Application + true + v141 + + + Application + false + v141 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp17 + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp14 + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2017/mimalloc.sln b/yass/third_party/mimalloc/ide/vs2017/mimalloc.sln new file mode 100644 index 0000000000..515c03f2e7 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2017/mimalloc.sln @@ -0,0 +1,71 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 15 +VisualStudioVersion = 15.0.26228.102 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A} + EndGlobalSection +EndGlobal diff --git a/yass/third_party/mimalloc/ide/vs2017/mimalloc.vcxproj b/yass/third_party/mimalloc/ide/vs2017/mimalloc.vcxproj new file mode 100644 index 0000000000..ece9a14d75 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2017/mimalloc.vcxproj @@ -0,0 +1,260 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA6} + mimalloc + 10.0.19041.0 + mimalloc + + + + StaticLibrary + true + v141 + + + StaticLibrary + false + v141 + true + + + StaticLibrary + true + v141 + + + StaticLibrary + false + v141 + true + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + false + + + false + + + false + + + false + + + + Level3 + Disabled + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;MI_DEBUG=3;%(PreprocessorDefinitions); + CompileAsC + false + stdcpp17 + + + + + + + + + + + Level4 + Disabled + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;MI_DEBUG=3;%(PreprocessorDefinitions); + CompileAsCpp + false + stdcpp14 + + + + + + + + + + + + + + + + + + + Level3 + MaxSpeed + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsC + true + + + true + true + + + + + + + + + + + Level4 + MaxSpeed + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsC + true + + + true + true + + + + + + + + + + + + + + + + + false + false + false + false + + + true + true + true + true + + + + + + + + + + + + true + true + true + true + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2019/mimalloc-override-test.vcxproj b/yass/third_party/mimalloc/ide/vs2019/mimalloc-override-test.vcxproj new file mode 100644 index 0000000000..7a9202f1b1 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2019/mimalloc-override-test.vcxproj @@ -0,0 +1,190 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7868F-750E-4C21-A04D-22707CC66879} + mimalloc-override-test + 10.0 + mimalloc-override-test + + + + Application + true + v142 + + + Application + false + v142 + true + + + Application + true + v142 + + + Application + false + v142 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + + + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + + + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + + + + {abb5eae7-b3e6-432e-b636-333449892ea7} + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2019/mimalloc-override.vcxproj b/yass/third_party/mimalloc/ide/vs2019/mimalloc-override.vcxproj new file mode 100644 index 0000000000..a84a517858 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2019/mimalloc-override.vcxproj @@ -0,0 +1,260 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA7} + mimalloc-override + 10.0 + mimalloc-override + + + + DynamicLibrary + true + v142 + + + DynamicLibrary + false + v142 + + + DynamicLibrary + true + v142 + + + DynamicLibrary + false + v142 + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + + Level3 + Disabled + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + Default + + + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + Disabled + true + true + ../../include + MI_DEBUG=3;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + Default + + + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) + + + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + Default + false + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + Default + false + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + + + + + + + + + + + + false + false + false + false + + + true + true + true + true + + + + + + + + + + + + + true + true + true + true + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2019/mimalloc-test-api.vcxproj b/yass/third_party/mimalloc/ide/vs2019/mimalloc-test-api.vcxproj new file mode 100644 index 0000000000..812a9cb116 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2019/mimalloc-test-api.vcxproj @@ -0,0 +1,155 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FFF7958F-750E-4C21-A04D-22707CC66878} + mimalloc-test-api + 10.0 + mimalloc-test-api + + + + Application + true + v142 + + + Application + false + v142 + true + + + Application + true + v142 + + + Application + false + v142 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + diff --git a/yass/third_party/mimalloc/ide/vs2019/mimalloc-test-stress.vcxproj b/yass/third_party/mimalloc/ide/vs2019/mimalloc-test-stress.vcxproj new file mode 100644 index 0000000000..ef7ab3575a --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2019/mimalloc-test-stress.vcxproj @@ -0,0 +1,159 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7958F-750E-4C21-A04D-22707CC66878} + mimalloc-test-stress + 10.0 + mimalloc-test-stress + + + + Application + true + v142 + + + Application + false + v142 + true + + + Application + true + v142 + + + Application + false + v142 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + false + false + false + false + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2019/mimalloc-test.vcxproj b/yass/third_party/mimalloc/ide/vs2019/mimalloc-test.vcxproj new file mode 100644 index 0000000000..13af6ab495 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2019/mimalloc-test.vcxproj @@ -0,0 +1,158 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7858F-750E-4C21-A04D-22707CC66878} + mimalloctest + 10.0 + mimalloc-test + + + + Application + true + v142 + + + Application + false + v142 + true + + + Application + true + v142 + + + Application + false + v142 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp17 + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp17 + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2019/mimalloc.sln b/yass/third_party/mimalloc/ide/vs2019/mimalloc.sln new file mode 100644 index 0000000000..6ff01d3b44 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2019/mimalloc.sln @@ -0,0 +1,81 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29709.97 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimalloc-test-api.vcxproj", "{FFF7958F-750E-4C21-A04D-22707CC66878}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A} + EndGlobalSection +EndGlobal diff --git a/yass/third_party/mimalloc/ide/vs2019/mimalloc.vcxproj b/yass/third_party/mimalloc/ide/vs2019/mimalloc.vcxproj new file mode 100644 index 0000000000..0076b1dbdd --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2019/mimalloc.vcxproj @@ -0,0 +1,258 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA6} + mimalloc + 10.0 + mimalloc + + + + StaticLibrary + true + v142 + + + StaticLibrary + false + v142 + true + + + StaticLibrary + true + v142 + + + StaticLibrary + false + v142 + true + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + + Level4 + Disabled + true + true + ../../include + MI_DEBUG=3;%(PreprocessorDefinitions); + CompileAsCpp + false + Default + + + + + + + + + + + Level4 + Disabled + true + Default + ../../include + MI_DEBUG=3;%(PreprocessorDefinitions); + CompileAsCpp + false + Default + + + + + + + + + + + + + + + + + + + Level4 + MaxSpeed + true + true + ../../include + %(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsCpp + true + Default + + + true + true + + + + + + + + + + + Level4 + MaxSpeed + true + true + ../../include + %(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsCpp + true + Default + + + true + true + + + + + + + + + + + + + + + + + false + false + false + false + + + true + true + true + true + + + + + + false + + + + + + + true + true + true + true + + + + true + true + true + true + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj b/yass/third_party/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj new file mode 100644 index 0000000000..a3c56f7bad --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj @@ -0,0 +1,190 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7868F-750E-4C21-A04D-22707CC66879} + mimalloc-override-test + 10.0 + mimalloc-override-test + + + + Application + true + v143 + + + Application + false + v143 + true + + + Application + true + v143 + + + Application + false + v143 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + + + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + + + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + + + + {abb5eae7-b3e6-432e-b636-333449892ea7} + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2022/mimalloc-override.vcxproj b/yass/third_party/mimalloc/ide/vs2022/mimalloc-override.vcxproj new file mode 100644 index 0000000000..df2a081690 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2022/mimalloc-override.vcxproj @@ -0,0 +1,271 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA7} + mimalloc-override + 10.0 + mimalloc-override + + + + DynamicLibrary + true + v143 + + + DynamicLibrary + false + v143 + + + DynamicLibrary + true + v143 + + + DynamicLibrary + false + v143 + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + + Level3 + Disabled + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + Default + + + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + Disabled + true + true + ../../include + MI_DEBUG=4;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + Default + + + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) + + + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + Default + false + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + Default + false + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + + + + + + + + + + + + + + false + false + false + false + + + true + true + true + true + + + + + + + + + + + true + true + true + true + + + + + true + true + true + true + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj b/yass/third_party/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj new file mode 100644 index 0000000000..d9b9cae4f1 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj @@ -0,0 +1,162 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FFF7958F-750E-4C21-A04D-22707CC66878} + mimalloc-test-api + 10.0 + mimalloc-test-api + + + + Application + true + v143 + + + Application + false + v143 + true + + + Application + true + v143 + + + Application + false + v143 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + true + true + true + true + + + false + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj b/yass/third_party/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj new file mode 100644 index 0000000000..14bd3e6927 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj @@ -0,0 +1,159 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7958F-750E-4C21-A04D-22707CC66878} + mimalloc-test-stress + 10.0 + mimalloc-test-stress + + + + Application + true + v143 + + + Application + false + v143 + true + + + Application + true + v143 + + + Application + false + v143 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + false + false + false + false + + + + + {abb5eae7-b3e6-432e-b636-333449892ea7} + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2022/mimalloc-test.vcxproj b/yass/third_party/mimalloc/ide/vs2022/mimalloc-test.vcxproj new file mode 100644 index 0000000000..506dd7d457 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2022/mimalloc-test.vcxproj @@ -0,0 +1,158 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7858F-750E-4C21-A04D-22707CC66878} + mimalloctest + 10.0 + mimalloc-test + + + + Application + true + v143 + + + Application + false + v143 + true + + + Application + true + v143 + + + Application + false + v143 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp17 + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp17 + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/ide/vs2022/mimalloc.sln b/yass/third_party/mimalloc/ide/vs2022/mimalloc.sln new file mode 100644 index 0000000000..6ff01d3b44 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2022/mimalloc.sln @@ -0,0 +1,81 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29709.97 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimalloc-test-api.vcxproj", "{FFF7958F-750E-4C21-A04D-22707CC66878}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A} + EndGlobalSection +EndGlobal diff --git a/yass/third_party/mimalloc/ide/vs2022/mimalloc.vcxproj b/yass/third_party/mimalloc/ide/vs2022/mimalloc.vcxproj new file mode 100644 index 0000000000..33ad9cef13 --- /dev/null +++ b/yass/third_party/mimalloc/ide/vs2022/mimalloc.vcxproj @@ -0,0 +1,264 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA6} + mimalloc + 10.0 + mimalloc + + + + StaticLibrary + true + v143 + + + StaticLibrary + false + v143 + true + + + StaticLibrary + true + v143 + + + StaticLibrary + false + v143 + true + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + + Level4 + Disabled + true + Default + ../../include + MI_DEBUG=3;%(PreprocessorDefinitions); + CompileAsCpp + false + stdcpp20 + + + + + + + + + + + Level4 + Disabled + true + Default + ../../include + MI_DEBUG=4;MI_SECURE=0;%(PreprocessorDefinitions); + CompileAsCpp + false + stdcpp20 + + + + + + + + + + + + + + + + + + + Level4 + MaxSpeed + true + Default + ../../include + %(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsCpp + true + stdcpp20 + + + true + true + + + + + + + + + + + Level4 + MaxSpeed + true + Default + ../../include + %(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsCpp + true + stdcpp20 + + + true + true + + + + + + + + + + + + + + + + + false + false + false + false + + + true + true + true + true + + + + + + false + + + true + true + true + true + + + + + + + true + true + true + true + + + + true + true + true + true + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/include/mimalloc-new-delete.h b/yass/third_party/mimalloc/include/mimalloc-new-delete.h new file mode 100644 index 0000000000..c16f4a6653 --- /dev/null +++ b/yass/third_party/mimalloc/include/mimalloc-new-delete.h @@ -0,0 +1,66 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_NEW_DELETE_H +#define MIMALLOC_NEW_DELETE_H + +// ---------------------------------------------------------------------------- +// This header provides convenient overrides for the new and +// delete operations in C++. +// +// This header should be included in only one source file! +// +// On Windows, or when linking dynamically with mimalloc, these +// can be more performant than the standard new-delete operations. +// See +// --------------------------------------------------------------------------- +#if defined(__cplusplus) + #include + #include + + #if defined(_MSC_VER) && defined(_Ret_notnull_) && defined(_Post_writable_byte_size_) + // stay consistent with VCRT definitions + #define mi_decl_new(n) mi_decl_nodiscard mi_decl_restrict _Ret_notnull_ _Post_writable_byte_size_(n) + #define mi_decl_new_nothrow(n) mi_decl_nodiscard mi_decl_restrict _Ret_maybenull_ _Success_(return != NULL) _Post_writable_byte_size_(n) + #else + #define mi_decl_new(n) mi_decl_nodiscard mi_decl_restrict + #define mi_decl_new_nothrow(n) mi_decl_nodiscard mi_decl_restrict + #endif + + void operator delete(void* p) noexcept { mi_free(p); }; + void operator delete[](void* p) noexcept { mi_free(p); }; + + void operator delete (void* p, const std::nothrow_t&) noexcept { mi_free(p); } + void operator delete[](void* p, const std::nothrow_t&) noexcept { mi_free(p); } + + mi_decl_new(n) void* operator new(std::size_t n) noexcept(false) { return mi_new(n); } + mi_decl_new(n) void* operator new[](std::size_t n) noexcept(false) { return mi_new(n); } + + mi_decl_new_nothrow(n) void* operator new (std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); } + mi_decl_new_nothrow(n) void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); } + + #if (__cplusplus >= 201402L || _MSC_VER >= 1916) + void operator delete (void* p, std::size_t n) noexcept { mi_free_size(p,n); }; + void operator delete[](void* p, std::size_t n) noexcept { mi_free_size(p,n); }; + #endif + + #if (__cplusplus > 201402L || defined(__cpp_aligned_new)) + void operator delete (void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); } + void operator delete[](void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); } + void operator delete (void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); }; + void operator delete[](void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); }; + void operator delete (void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); } + void operator delete[](void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); } + + void* operator new (std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); } + void* operator new[](std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); } + void* operator new (std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); } + void* operator new[](std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); } + #endif +#endif + +#endif // MIMALLOC_NEW_DELETE_H diff --git a/yass/third_party/mimalloc/include/mimalloc-override.h b/yass/third_party/mimalloc/include/mimalloc-override.h new file mode 100644 index 0000000000..48a8a6226a --- /dev/null +++ b/yass/third_party/mimalloc/include/mimalloc-override.h @@ -0,0 +1,68 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_OVERRIDE_H +#define MIMALLOC_OVERRIDE_H + +/* ---------------------------------------------------------------------------- +This header can be used to statically redirect malloc/free and new/delete +to the mimalloc variants. This can be useful if one can include this file on +each source file in a project (but be careful when using external code to +not accidentally mix pointers from different allocators). +-----------------------------------------------------------------------------*/ + +#include + +// Standard C allocation +#define malloc(n) mi_malloc(n) +#define calloc(n,c) mi_calloc(n,c) +#define realloc(p,n) mi_realloc(p,n) +#define free(p) mi_free(p) + +#define strdup(s) mi_strdup(s) +#define strndup(s,n) mi_strndup(s,n) +#define realpath(f,n) mi_realpath(f,n) + +// Microsoft extensions +#define _expand(p,n) mi_expand(p,n) +#define _msize(p) mi_usable_size(p) +#define _recalloc(p,n,c) mi_recalloc(p,n,c) + +#define _strdup(s) mi_strdup(s) +#define _strndup(s,n) mi_strndup(s,n) +#define _wcsdup(s) (wchar_t*)mi_wcsdup((const unsigned short*)(s)) +#define _mbsdup(s) mi_mbsdup(s) +#define _dupenv_s(b,n,v) mi_dupenv_s(b,n,v) +#define _wdupenv_s(b,n,v) mi_wdupenv_s((unsigned short*)(b),n,(const unsigned short*)(v)) + +// Various Posix and Unix variants +#define reallocf(p,n) mi_reallocf(p,n) +#define malloc_size(p) mi_usable_size(p) +#define malloc_usable_size(p) mi_usable_size(p) +#define malloc_good_size(sz) mi_malloc_good_size(sz) +#define cfree(p) mi_free(p) + +#define valloc(n) mi_valloc(n) +#define pvalloc(n) mi_pvalloc(n) +#define reallocarray(p,s,n) mi_reallocarray(p,s,n) +#define reallocarr(p,s,n) mi_reallocarr(p,s,n) +#define memalign(a,n) mi_memalign(a,n) +#define aligned_alloc(a,n) mi_aligned_alloc(a,n) +#define posix_memalign(p,a,n) mi_posix_memalign(p,a,n) +#define _posix_memalign(p,a,n) mi_posix_memalign(p,a,n) + +// Microsoft aligned variants +#define _aligned_malloc(n,a) mi_malloc_aligned(n,a) +#define _aligned_realloc(p,n,a) mi_realloc_aligned(p,n,a) +#define _aligned_recalloc(p,s,n,a) mi_aligned_recalloc(p,s,n,a) +#define _aligned_msize(p,a,o) mi_usable_size(p) +#define _aligned_free(p) mi_free(p) +#define _aligned_offset_malloc(n,a,o) mi_malloc_aligned_at(n,a,o) +#define _aligned_offset_realloc(p,n,a,o) mi_realloc_aligned_at(p,n,a,o) +#define _aligned_offset_recalloc(p,s,n,a,o) mi_recalloc_aligned_at(p,s,n,a,o) + +#endif // MIMALLOC_OVERRIDE_H diff --git a/yass/third_party/mimalloc/include/mimalloc.h b/yass/third_party/mimalloc/include/mimalloc.h new file mode 100644 index 0000000000..c41bcc8039 --- /dev/null +++ b/yass/third_party/mimalloc/include/mimalloc.h @@ -0,0 +1,569 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_H +#define MIMALLOC_H + +#define MI_MALLOC_VERSION 217 // major + 2 digits minor + +// ------------------------------------------------------ +// Compiler specific attributes +// ------------------------------------------------------ + +#ifdef __cplusplus + #if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 + #define mi_attr_noexcept noexcept + #else + #define mi_attr_noexcept throw() + #endif +#else + #define mi_attr_noexcept +#endif + +#if defined(__cplusplus) && (__cplusplus >= 201703) + #define mi_decl_nodiscard [[nodiscard]] +#elif (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) // includes clang, icc, and clang-cl + #define mi_decl_nodiscard __attribute__((warn_unused_result)) +#elif defined(_HAS_NODISCARD) + #define mi_decl_nodiscard _NODISCARD +#elif (_MSC_VER >= 1700) + #define mi_decl_nodiscard _Check_return_ +#else + #define mi_decl_nodiscard +#endif + +#if defined(_MSC_VER) || defined(__MINGW32__) + #if !defined(MI_SHARED_LIB) + #define mi_decl_export + #elif defined(MI_SHARED_LIB_EXPORT) + #define mi_decl_export __declspec(dllexport) + #else + #define mi_decl_export __declspec(dllimport) + #endif + #if defined(__MINGW32__) + #define mi_decl_restrict + #define mi_attr_malloc __attribute__((malloc)) + #else + #if (_MSC_VER >= 1900) && !defined(__EDG__) + #define mi_decl_restrict __declspec(allocator) __declspec(restrict) + #else + #define mi_decl_restrict __declspec(restrict) + #endif + #define mi_attr_malloc + #endif + #define mi_cdecl __cdecl + #define mi_attr_alloc_size(s) + #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) +#elif defined(__GNUC__) // includes clang and icc + #if defined(MI_SHARED_LIB) && defined(MI_SHARED_LIB_EXPORT) + #define mi_decl_export __attribute__((visibility("default"))) + #else + #define mi_decl_export + #endif + #define mi_cdecl // leads to warnings... __attribute__((cdecl)) + #define mi_decl_restrict + #define mi_attr_malloc __attribute__((malloc)) + #if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5) + #define mi_attr_alloc_size(s) + #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) + #elif defined(__INTEL_COMPILER) + #define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) + #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2))) + #define mi_attr_alloc_align(p) + #else + #define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) + #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2))) + #define mi_attr_alloc_align(p) __attribute__((alloc_align(p))) + #endif +#else + #define mi_cdecl + #define mi_decl_export + #define mi_decl_restrict + #define mi_attr_malloc + #define mi_attr_alloc_size(s) + #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) +#endif + +// ------------------------------------------------------ +// Includes +// ------------------------------------------------------ + +#include // size_t +#include // bool +#include // INTPTR_MAX + +#ifdef __cplusplus +extern "C" { +#endif + +// ------------------------------------------------------ +// Standard malloc interface +// ------------------------------------------------------ + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_nodiscard mi_decl_export void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); +mi_decl_export void* mi_expand(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); + +mi_decl_export void mi_free(void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc; + +// ------------------------------------------------------ +// Extended functionality +// ------------------------------------------------------ +#define MI_SMALL_WSIZE_MAX (128) +#define MI_SMALL_SIZE_MAX (MI_SMALL_WSIZE_MAX*sizeof(void*)) + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_nodiscard mi_decl_export void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); +mi_decl_nodiscard mi_decl_export void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); + +mi_decl_nodiscard mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept; + + +// ------------------------------------------------------ +// Internals +// ------------------------------------------------------ + +typedef void (mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg); +mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept; + +typedef void (mi_cdecl mi_output_fun)(const char* msg, void* arg); +mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept; + +typedef void (mi_cdecl mi_error_fun)(int err, void* arg); +mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg); + +mi_decl_export void mi_collect(bool force) mi_attr_noexcept; +mi_decl_export int mi_version(void) mi_attr_noexcept; +mi_decl_export void mi_stats_reset(void) mi_attr_noexcept; +mi_decl_export void mi_stats_merge(void) mi_attr_noexcept; +mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL +mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; + +mi_decl_export void mi_process_init(void) mi_attr_noexcept; +mi_decl_export void mi_thread_init(void) mi_attr_noexcept; +mi_decl_export void mi_thread_done(void) mi_attr_noexcept; +mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; + +mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, + size_t* current_rss, size_t* peak_rss, + size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept; + +// ------------------------------------------------------------------------------------- +// Aligned allocation +// Note that `alignment` always follows `size` for consistency with unaligned +// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`. +// ------------------------------------------------------------------------------------- + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2); + + +// ------------------------------------------------------------------------------------- +// Heaps: first-class, but can only allocate from the same thread that created it. +// ------------------------------------------------------------------------------------- + +struct mi_heap_s; +typedef struct mi_heap_s mi_heap_t; + +mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void); +mi_decl_export void mi_heap_delete(mi_heap_t* heap); +mi_decl_export void mi_heap_destroy(mi_heap_t* heap); +mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap); +mi_decl_export mi_heap_t* mi_heap_get_default(void); +mi_decl_export mi_heap_t* mi_heap_get_backing(void); +mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept; + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); + +mi_decl_nodiscard mi_decl_export void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); +mi_decl_nodiscard mi_decl_export void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4); +mi_decl_nodiscard mi_decl_export void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); + +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc; + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3); + + +// -------------------------------------------------------------------------------- +// Zero initialized re-allocation. +// Only valid on memory that was originally allocated with zero initialization too. +// e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc. +// see +// -------------------------------------------------------------------------------- + +mi_decl_nodiscard mi_decl_export void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); + +mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(2,3); + +mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); +mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4); + +mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3); +mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5); +mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(3,4); + + +// ------------------------------------------------------ +// Analysis +// ------------------------------------------------------ + +mi_decl_export bool mi_heap_contains_block(mi_heap_t* heap, const void* p); +mi_decl_export bool mi_heap_check_owned(mi_heap_t* heap, const void* p); +mi_decl_export bool mi_check_owned(const void* p); + +// An area of heap space contains blocks of a single size. +typedef struct mi_heap_area_s { + void* blocks; // start of the area containing heap blocks + size_t reserved; // bytes reserved for this area (virtual) + size_t committed; // current available bytes for this area + size_t used; // number of allocated blocks + size_t block_size; // size in bytes of each block + size_t full_block_size; // size in bytes of a full block including padding and metadata. +} mi_heap_area_t; + +typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg); + +mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg); + +// Experimental +mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept; + +mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept; +mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept; + +mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept; +mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept; + +mi_decl_export void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge) mi_attr_noexcept; + +// Experimental: heaps associated with specific memory arena's +typedef int mi_arena_id_t; +mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size); +mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; +mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; +mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; + +#if MI_MALLOC_VERSION >= 182 +// Create a heap that only allocates in the specified arena +mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id); +#endif + +// deprecated +mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept; + + +// ------------------------------------------------------ +// Convenience +// ------------------------------------------------------ + +#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp))) +#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp))) +#define mi_calloc_tp(tp,n) ((tp*)mi_calloc(n,sizeof(tp))) +#define mi_mallocn_tp(tp,n) ((tp*)mi_mallocn(n,sizeof(tp))) +#define mi_reallocn_tp(p,tp,n) ((tp*)mi_reallocn(p,n,sizeof(tp))) +#define mi_recalloc_tp(p,tp,n) ((tp*)mi_recalloc(p,n,sizeof(tp))) + +#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp))) +#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp))) +#define mi_heap_calloc_tp(hp,tp,n) ((tp*)mi_heap_calloc(hp,n,sizeof(tp))) +#define mi_heap_mallocn_tp(hp,tp,n) ((tp*)mi_heap_mallocn(hp,n,sizeof(tp))) +#define mi_heap_reallocn_tp(hp,p,tp,n) ((tp*)mi_heap_reallocn(hp,p,n,sizeof(tp))) +#define mi_heap_recalloc_tp(hp,p,tp,n) ((tp*)mi_heap_recalloc(hp,p,n,sizeof(tp))) + + +// ------------------------------------------------------ +// Options +// ------------------------------------------------------ + +typedef enum mi_option_e { + // stable options + mi_option_show_errors, // print error messages + mi_option_show_stats, // print statistics on termination + mi_option_verbose, // print verbose messages + // advanced options + mi_option_eager_commit, // eager commit segments? (after `eager_commit_delay` segments) (=1) + mi_option_arena_eager_commit, // eager commit arenas? Use 2 to enable just on overcommit systems (=2) + mi_option_purge_decommits, // should a memory purge decommit? (=1). Set to 0 to use memory reset on a purge (instead of decommit) + mi_option_allow_large_os_pages, // allow large (2 or 4 MiB) OS pages, implies eager commit. If false, also disables THP for the process. + mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB pages) at startup + mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node + mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup (internally, this value is in KiB; use `mi_option_get_size`) + mi_option_deprecated_segment_cache, + mi_option_deprecated_page_reset, + mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination + mi_option_deprecated_segment_reset, + mi_option_eager_commit_delay, // the first N segments per thread are not eagerly committed (but per page in the segment on demand) + mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. (=10) + mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes. + mi_option_disallow_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas) + mi_option_os_tag, // tag used for OS logging (macOS only for now) (=100) + mi_option_max_errors, // issue at most N error messages + mi_option_max_warnings, // issue at most N warning messages + mi_option_max_segment_reclaim, // max. percentage of the abandoned segments can be reclaimed per try (=10%) + mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe + mi_option_arena_reserve, // initial memory size for arena reservation (= 1 GiB on 64-bit) (internally, this value is in KiB; use `mi_option_get_size`) + mi_option_arena_purge_mult, // multiplier for `purge_delay` for the purging delay for arenas (=10) + mi_option_purge_extend_delay, + mi_option_abandoned_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1) + mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's) + mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows) + _mi_option_last, + // legacy option names + mi_option_large_os_pages = mi_option_allow_large_os_pages, + mi_option_eager_region_commit = mi_option_arena_eager_commit, + mi_option_reset_decommits = mi_option_purge_decommits, + mi_option_reset_delay = mi_option_purge_delay, + mi_option_abandoned_page_reset = mi_option_abandoned_page_purge, + mi_option_limit_os_alloc = mi_option_disallow_os_alloc +} mi_option_t; + + +mi_decl_nodiscard mi_decl_export bool mi_option_is_enabled(mi_option_t option); +mi_decl_export void mi_option_enable(mi_option_t option); +mi_decl_export void mi_option_disable(mi_option_t option); +mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable); +mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable); + +mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option); +mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max); +mi_decl_nodiscard mi_decl_export size_t mi_option_get_size(mi_option_t option); +mi_decl_export void mi_option_set(mi_option_t option, long value); +mi_decl_export void mi_option_set_default(mi_option_t option, long value); + + +// ------------------------------------------------------------------------------------------------------- +// "mi" prefixed implementations of various posix, Unix, Windows, and C++ allocation functions. +// (This can be convenient when providing overrides of these functions as done in `mimalloc-override.h`.) +// note: we use `mi_cfree` as "checked free" and it checks if the pointer is in our heap before free-ing. +// ------------------------------------------------------------------------------------------------------- + +mi_decl_export void mi_cfree(void* p) mi_attr_noexcept; +mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_malloc_good_size(size_t size) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept; + +mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); + +mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); +mi_decl_nodiscard mi_decl_export int mi_reallocarr(void* p, size_t count, size_t size) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept; + +mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_export int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept; +mi_decl_export int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept; + +mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept; +mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept; +mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept; + +// The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`. +// (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception). +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2); +mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3); + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3); + +#ifdef __cplusplus +} +#endif + +// --------------------------------------------------------------------------------------------- +// Implement the C++ std::allocator interface for use in STL containers. +// (note: see `mimalloc-new-delete.h` for overriding the new/delete operators globally) +// --------------------------------------------------------------------------------------------- +#ifdef __cplusplus + +#include // std::size_t +#include // PTRDIFF_MAX +#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 +#include // std::true_type +#include // std::forward +#endif + +template struct _mi_stl_allocator_common { + typedef T value_type; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef value_type& reference; + typedef value_type const& const_reference; + typedef value_type* pointer; + typedef value_type const* const_pointer; + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + template void construct(U* p, Args&& ...args) { ::new(p) U(std::forward(args)...); } + template void destroy(U* p) mi_attr_noexcept { p->~U(); } + #else + void construct(pointer p, value_type const& val) { ::new(p) value_type(val); } + void destroy(pointer p) { p->~value_type(); } + #endif + + size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); } + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } +}; + +template struct mi_stl_allocator : public _mi_stl_allocator_common { + using typename _mi_stl_allocator_common::size_type; + using typename _mi_stl_allocator_common::value_type; + using typename _mi_stl_allocator_common::pointer; + template struct rebind { typedef mi_stl_allocator other; }; + + mi_stl_allocator() mi_attr_noexcept = default; + mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept = default; + template mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept { } + mi_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T* p, size_type) { mi_free(p); } + + #if (__cplusplus >= 201703L) // C++17 + mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_new_n(count, sizeof(T))); } + mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); } + #else + mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_new_n(count, sizeof(value_type))); } + #endif + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using is_always_equal = std::true_type; + #endif +}; + +template bool operator==(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return true; } +template bool operator!=(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return false; } + + +#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) // C++11 +#define MI_HAS_HEAP_STL_ALLOCATOR 1 + +#include // std::shared_ptr + +// Common base class for STL allocators in a specific heap +template struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common { + using typename _mi_stl_allocator_common::size_type; + using typename _mi_stl_allocator_common::value_type; + using typename _mi_stl_allocator_common::pointer; + + _mi_heap_stl_allocator_common(mi_heap_t* hp) : heap(hp, [](mi_heap_t*) {}) {} /* will not delete nor destroy the passed in heap */ + + #if (__cplusplus >= 201703L) // C++17 + mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); } + mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); } + #else + mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); } + #endif + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using is_always_equal = std::false_type; + #endif + + void collect(bool force) { mi_heap_collect(this->heap.get(), force); } + template bool is_equal(const _mi_heap_stl_allocator_common& x) const { return (this->heap == x.heap); } + +protected: + std::shared_ptr heap; + template friend struct _mi_heap_stl_allocator_common; + + _mi_heap_stl_allocator_common() { + mi_heap_t* hp = mi_heap_new(); + this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */ + } + _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { } + template _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { } + +private: + static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } } + static void heap_destroy(mi_heap_t* hp) { if (hp != NULL) { mi_heap_destroy(hp); } } +}; + +// STL allocator allocation in a specific heap +template struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common { + using typename _mi_heap_stl_allocator_common::size_type; + mi_heap_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is deleted when the destructor is called + mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + template mi_heap_stl_allocator(const mi_heap_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } + + mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T* p, size_type) { mi_free(p); } + template struct rebind { typedef mi_heap_stl_allocator other; }; +}; + +template bool operator==(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); } +template bool operator!=(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); } + + +// STL allocator allocation in a specific heap, where `free` does nothing and +// the heap is destroyed in one go on destruction -- use with care! +template struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common { + using typename _mi_heap_stl_allocator_common::size_type; + mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is destroyed when the destructor is called + mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + template mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } + + mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T*, size_type) { /* do nothing as we destroy the heap on destruct. */ } + template struct rebind { typedef mi_heap_destroy_stl_allocator other; }; +}; + +template bool operator==(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); } +template bool operator!=(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); } + +#endif // C++11 + +#endif // __cplusplus + +#endif diff --git a/yass/third_party/mimalloc/include/mimalloc/atomic.h b/yass/third_party/mimalloc/include/mimalloc/atomic.h new file mode 100644 index 0000000000..d5333dd90f --- /dev/null +++ b/yass/third_party/mimalloc/include/mimalloc/atomic.h @@ -0,0 +1,393 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_ATOMIC_H +#define MIMALLOC_ATOMIC_H + +// -------------------------------------------------------------------------------------------- +// Atomics +// We need to be portable between C, C++, and MSVC. +// We base the primitives on the C/C++ atomics and create a mimimal wrapper for MSVC in C compilation mode. +// This is why we try to use only `uintptr_t` and `*` as atomic types. +// To gain better insight in the range of used atomics, we use explicitly named memory order operations +// instead of passing the memory order as a parameter. +// ----------------------------------------------------------------------------------------------- + +#if defined(__cplusplus) +// Use C++ atomics +#include +#define _Atomic(tp) std::atomic +#define mi_atomic(name) std::atomic_##name +#define mi_memory_order(name) std::memory_order_##name +#if (__cplusplus >= 202002L) // c++20, see issue #571 +#define MI_ATOMIC_VAR_INIT(x) x +#elif !defined(ATOMIC_VAR_INIT) +#define MI_ATOMIC_VAR_INIT(x) x +#else + #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) +#endif +#elif defined(_MSC_VER) +// Use MSVC C wrapper for C11 atomics +#define _Atomic(tp) tp +#define MI_ATOMIC_VAR_INIT(x) x +#define mi_atomic(name) mi_atomic_##name +#define mi_memory_order(name) mi_memory_order_##name +#else +// Use C11 atomics +#include +#define mi_atomic(name) atomic_##name +#define mi_memory_order(name) memory_order_##name +#if (__STDC_VERSION__ >= 201710L) // c17, see issue #735 + #define MI_ATOMIC_VAR_INIT(x) x +#elif !defined(ATOMIC_VAR_INIT) + #define MI_ATOMIC_VAR_INIT(x) x +#else + #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) +#endif +#endif + +// Various defines for all used memory orders in mimalloc +#define mi_atomic_cas_weak(p,expected,desired,mem_success,mem_fail) \ + mi_atomic(compare_exchange_weak_explicit)(p,expected,desired,mem_success,mem_fail) + +#define mi_atomic_cas_strong(p,expected,desired,mem_success,mem_fail) \ + mi_atomic(compare_exchange_strong_explicit)(p,expected,desired,mem_success,mem_fail) + +#define mi_atomic_load_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire)) +#define mi_atomic_load_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed)) +#define mi_atomic_store_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_store_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_exchange_release(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_exchange_acq_rel(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_cas_weak_release(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed)) +#define mi_atomic_cas_weak_acq_rel(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire)) +#define mi_atomic_cas_strong_release(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed)) +#define mi_atomic_cas_strong_acq_rel(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire)) + +#define mi_atomic_add_relaxed(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_sub_relaxed(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_add_acq_rel(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_sub_acq_rel(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_and_acq_rel(p,x) mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_or_acq_rel(p,x) mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(acq_rel)) + +#define mi_atomic_increment_relaxed(p) mi_atomic_add_relaxed(p,(uintptr_t)1) +#define mi_atomic_decrement_relaxed(p) mi_atomic_sub_relaxed(p,(uintptr_t)1) +#define mi_atomic_increment_acq_rel(p) mi_atomic_add_acq_rel(p,(uintptr_t)1) +#define mi_atomic_decrement_acq_rel(p) mi_atomic_sub_acq_rel(p,(uintptr_t)1) + +static inline void mi_atomic_yield(void); +static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add); +static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub); + + +#if defined(__cplusplus) || !defined(_MSC_VER) + +// In C++/C11 atomics we have polymorphic atomics so can use the typed `ptr` variants (where `tp` is the type of atomic value) +// We use these macros so we can provide a typed wrapper in MSVC in C compilation mode as well +#define mi_atomic_load_ptr_acquire(tp,p) mi_atomic_load_acquire(p) +#define mi_atomic_load_ptr_relaxed(tp,p) mi_atomic_load_relaxed(p) + +// In C++ we need to add casts to help resolve templates if NULL is passed +#if defined(__cplusplus) +#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,(tp*)x) +#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,(tp*)x) +#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des) +#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des) +#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des) +#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x) +#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x) +#else +#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,x) +#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,x) +#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des) +#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des) +#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des) +#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x) +#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x) +#endif + +// These are used by the statistics +static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) { + return mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed)); +} +static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) { + int64_t current = mi_atomic_load_relaxed((_Atomic(int64_t)*)p); + while (current < x && !mi_atomic_cas_weak_release((_Atomic(int64_t)*)p, ¤t, x)) { /* nothing */ }; +} + +// Used by timers +#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire)) +#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed)) +#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed)) + +#define mi_atomic_casi64_strong_acq_rel(p,e,d) mi_atomic_cas_strong_acq_rel(p,e,d) +#define mi_atomic_addi64_acq_rel(p,i) mi_atomic_add_acq_rel(p,i) + + +#elif defined(_MSC_VER) + +// Legacy MSVC plain C compilation wrapper that uses Interlocked operations to model C11 atomics. +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#include +#ifdef _WIN64 +typedef LONG64 msc_intptr_t; +#define MI_64(f) f##64 +#else +typedef LONG msc_intptr_t; +#define MI_64(f) f +#endif + +typedef enum mi_memory_order_e { + mi_memory_order_relaxed, + mi_memory_order_consume, + mi_memory_order_acquire, + mi_memory_order_release, + mi_memory_order_acq_rel, + mi_memory_order_seq_cst +} mi_memory_order; + +static inline uintptr_t mi_atomic_fetch_add_explicit(_Atomic(uintptr_t)*p, uintptr_t add, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add); +} +static inline uintptr_t mi_atomic_fetch_sub_explicit(_Atomic(uintptr_t)*p, uintptr_t sub, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub)); +} +static inline uintptr_t mi_atomic_fetch_and_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x); +} +static inline uintptr_t mi_atomic_fetch_or_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x); +} +static inline bool mi_atomic_compare_exchange_strong_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) { + (void)(mo1); (void)(mo2); + uintptr_t read = (uintptr_t)MI_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected)); + if (read == *expected) { + return true; + } + else { + *expected = read; + return false; + } +} +static inline bool mi_atomic_compare_exchange_weak_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) { + return mi_atomic_compare_exchange_strong_explicit(p, expected, desired, mo1, mo2); +} +static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)*p, uintptr_t exchange, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange); +} +static inline void mi_atomic_thread_fence(mi_memory_order mo) { + (void)(mo); + _Atomic(uintptr_t) x = 0; + mi_atomic_exchange_explicit(&x, 1, mo); +} +static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) { + (void)(mo); +#if defined(_M_IX86) || defined(_M_X64) + return *p; +#else + uintptr_t x = *p; + if (mo > mi_memory_order_relaxed) { + while (!mi_atomic_compare_exchange_weak_explicit((_Atomic(uintptr_t)*)p, &x, x, mo, mi_memory_order_relaxed)) { /* nothing */ }; + } + return x; +#endif +} +static inline void mi_atomic_store_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) { + (void)(mo); +#if defined(_M_IX86) || defined(_M_X64) + *p = x; +#else + mi_atomic_exchange_explicit(p, x, mo); +#endif +} +static inline int64_t mi_atomic_loadi64_explicit(_Atomic(int64_t)*p, mi_memory_order mo) { + (void)(mo); +#if defined(_M_X64) + return *p; +#else + int64_t old = *p; + int64_t x = old; + while ((old = InterlockedCompareExchange64(p, x, old)) != x) { + x = old; + } + return x; +#endif +} +static inline void mi_atomic_storei64_explicit(_Atomic(int64_t)*p, int64_t x, mi_memory_order mo) { + (void)(mo); +#if defined(x_M_IX86) || defined(_M_X64) + *p = x; +#else + InterlockedExchange64(p, x); +#endif +} + +// These are used by the statistics +static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int64_t add) { +#ifdef _WIN64 + return (int64_t)mi_atomic_addi((int64_t*)p, add); +#else + int64_t current; + int64_t sum; + do { + current = *p; + sum = current + add; + } while (_InterlockedCompareExchange64(p, sum, current) != current); + return current; +#endif +} +static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) { + int64_t current; + do { + current = *p; + } while (current < x && _InterlockedCompareExchange64(p, x, current) != current); +} + +static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t i) { + mi_atomic_addi64_relaxed(p, i); +} + +static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) { + int64_t read = _InterlockedCompareExchange64(p, des, *exp); + if (read == *exp) { + return true; + } + else { + *exp = read; + return false; + } +} + +// The pointer macros cast to `uintptr_t`. +#define mi_atomic_load_ptr_acquire(tp,p) (tp*)mi_atomic_load_acquire((_Atomic(uintptr_t)*)(p)) +#define mi_atomic_load_ptr_relaxed(tp,p) (tp*)mi_atomic_load_relaxed((_Atomic(uintptr_t)*)(p)) +#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release((_Atomic(uintptr_t)*)(p),(uintptr_t)(x)) +#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)(x)) +#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x) +#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x) + +#define mi_atomic_loadi64_acquire(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(acquire)) +#define mi_atomic_loadi64_relaxed(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(relaxed)) +#define mi_atomic_storei64_release(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_storei64_relaxed(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(relaxed)) + + +#endif + + +// Atomically add a signed value; returns the previous value. +static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add) { + return (intptr_t)mi_atomic_add_acq_rel((_Atomic(uintptr_t)*)p, (uintptr_t)add); +} + +// Atomically subtract a signed value; returns the previous value. +static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) { + return (intptr_t)mi_atomic_addi(p, -sub); +} + +typedef _Atomic(uintptr_t) mi_atomic_once_t; + +// Returns true only on the first invocation +static inline bool mi_atomic_once( mi_atomic_once_t* once ) { + if (mi_atomic_load_relaxed(once) != 0) return false; // quick test + uintptr_t expected = 0; + return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1 +} + +typedef _Atomic(uintptr_t) mi_atomic_guard_t; + +// Allows only one thread to execute at a time +#define mi_atomic_guard(guard) \ + uintptr_t _mi_guard_expected = 0; \ + for(bool _mi_guard_once = true; \ + _mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,(uintptr_t)1); \ + (mi_atomic_store_release(guard,(uintptr_t)0), _mi_guard_once = false) ) + + + +// Yield +#if defined(__cplusplus) +#include +static inline void mi_atomic_yield(void) { + std::this_thread::yield(); +} +#elif defined(_WIN32) +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +static inline void mi_atomic_yield(void) { + YieldProcessor(); +} +#elif defined(__SSE2__) +#include +static inline void mi_atomic_yield(void) { + _mm_pause(); +} +#elif (defined(__GNUC__) || defined(__clang__)) && \ + (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__armel__) || defined(__ARMEL__) || \ + defined(__aarch64__) || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)) || defined(__POWERPC__) +#if defined(__x86_64__) || defined(__i386__) +static inline void mi_atomic_yield(void) { + __asm__ volatile ("pause" ::: "memory"); +} +#elif defined(__aarch64__) +static inline void mi_atomic_yield(void) { + __asm__ volatile("wfe"); +} +#elif (defined(__arm__) && __ARM_ARCH__ >= 7) +static inline void mi_atomic_yield(void) { + __asm__ volatile("yield" ::: "memory"); +} +#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__) +#ifdef __APPLE__ +static inline void mi_atomic_yield(void) { + __asm__ volatile ("or r27,r27,r27" ::: "memory"); +} +#else +static inline void mi_atomic_yield(void) { + __asm__ __volatile__ ("or 27,27,27" ::: "memory"); +} +#endif +#elif defined(__armel__) || defined(__ARMEL__) +static inline void mi_atomic_yield(void) { + __asm__ volatile ("nop" ::: "memory"); +} +#endif +#elif defined(__sun) +// Fallback for other archs +#include +static inline void mi_atomic_yield(void) { + smt_pause(); +} +#elif defined(__wasi__) +#include +static inline void mi_atomic_yield(void) { + sched_yield(); +} +#else +#include +static inline void mi_atomic_yield(void) { + sleep(0); +} +#endif + + +#endif // __MIMALLOC_ATOMIC_H diff --git a/yass/third_party/mimalloc/include/mimalloc/internal.h b/yass/third_party/mimalloc/include/mimalloc/internal.h new file mode 100644 index 0000000000..6c6e5ed04f --- /dev/null +++ b/yass/third_party/mimalloc/include/mimalloc/internal.h @@ -0,0 +1,1018 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_INTERNAL_H +#define MIMALLOC_INTERNAL_H + + +// -------------------------------------------------------------------------- +// This file contains the interal API's of mimalloc and various utility +// functions and macros. +// -------------------------------------------------------------------------- + +#include "types.h" +#include "track.h" + +#if (MI_DEBUG>0) +#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__) +#else +#define mi_trace_message(...) +#endif + +#define MI_CACHE_LINE 64 +#if defined(_MSC_VER) +#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths) +#pragma warning(disable:26812) // unscoped enum warning +#define mi_decl_noinline __declspec(noinline) +#define mi_decl_thread __declspec(thread) +#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE)) +#define mi_decl_weak +#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc +#define mi_decl_noinline __attribute__((noinline)) +#define mi_decl_thread __thread +#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE))) +#define mi_decl_weak __attribute__((weak)) +#else +#define mi_decl_noinline +#define mi_decl_thread __thread // hope for the best :-) +#define mi_decl_cache_align +#define mi_decl_weak +#endif + +#if defined(__EMSCRIPTEN__) && !defined(__wasi__) +#define __wasi__ +#endif + +#if defined(__cplusplus) +#define mi_decl_externc extern "C" +#else +#define mi_decl_externc +#endif + +// pthreads +#if !defined(_WIN32) && !defined(__wasi__) +#define MI_USE_PTHREADS +#include +#endif + +// "options.c" +void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message); +void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...); +void _mi_warning_message(const char* fmt, ...); +void _mi_verbose_message(const char* fmt, ...); +void _mi_trace_message(const char* fmt, ...); +void _mi_options_init(void); +void _mi_error_message(int err, const char* fmt, ...); + +// random.c +void _mi_random_init(mi_random_ctx_t* ctx); +void _mi_random_init_weak(mi_random_ctx_t* ctx); +void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx); +void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx); +uintptr_t _mi_random_next(mi_random_ctx_t* ctx); +uintptr_t _mi_heap_random_next(mi_heap_t* heap); +uintptr_t _mi_os_random_weak(uintptr_t extra_seed); +static inline uintptr_t _mi_random_shuffle(uintptr_t x); + +// init.c +extern mi_decl_cache_align mi_stats_t _mi_stats_main; +extern mi_decl_cache_align const mi_page_t _mi_page_empty; +bool _mi_is_main_thread(void); +size_t _mi_current_thread_count(void); +bool _mi_preloading(void); // true while the C runtime is not initialized yet +mi_threadid_t _mi_thread_id(void) mi_attr_noexcept; +mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap +void _mi_thread_done(mi_heap_t* heap); +void _mi_thread_data_collect(void); +void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap); + +// os.c +void _mi_os_init(void); // called from process init +void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats); +void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats); +void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats); + +size_t _mi_os_page_size(void); +size_t _mi_os_good_alloc_size(size_t size); +bool _mi_os_has_overcommit(void); +bool _mi_os_has_virtual_reserve(void); + +bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats); +bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats); +bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats); +bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); +bool _mi_os_protect(void* addr, size_t size); +bool _mi_os_unprotect(void* addr, size_t size); +bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats); +bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats); + +void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats); +void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats); + +void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size); +bool _mi_os_use_large_page(size_t size, size_t alignment); +size_t _mi_os_large_page_size(void); + +void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid); + +// arena.c +mi_arena_id_t _mi_arena_id_none(void); +void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats); +void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld); +void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld); +bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id); +bool _mi_arena_contains(const void* p); +void _mi_arenas_collect(bool force_purge, mi_stats_t* stats); +void _mi_arena_unsafe_destroy_all(mi_stats_t* stats); + +bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment); +void _mi_arena_segment_mark_abandoned(mi_segment_t* segment); +size_t _mi_arena_segment_abandoned_count(void); + +typedef struct mi_arena_field_cursor_s { // abstract + mi_arena_id_t start; + int count; + size_t bitmap_idx; +} mi_arena_field_cursor_t; +void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current); +mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous); + +// "segment-map.c" +void _mi_segment_map_allocated_at(const mi_segment_t* segment); +void _mi_segment_map_freed_at(const mi_segment_t* segment); + +// "segment.c" +mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld); +void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld); +void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld); +bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld); +void _mi_segment_collect(mi_segment_t* segment, bool force, mi_segments_tld_t* tld); + +#if MI_HUGE_PAGE_ABANDON +void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); +#else +void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); +#endif + +uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page +void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld); +void _mi_abandoned_await_readers(void); +void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld); +bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment); + +// "page.c" +void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc; + +void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks +void _mi_page_unfull(mi_page_t* page); +void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page +void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread... +void _mi_heap_delayed_free_all(mi_heap_t* heap); +bool _mi_heap_delayed_free_partial(mi_heap_t* heap); +void _mi_heap_collect_retired(mi_heap_t* heap, bool force); + +void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); +bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); +size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); +void _mi_deferred_free(mi_heap_t* heap, bool force); + +void _mi_page_free_collect(mi_page_t* page,bool force); +void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments + +size_t _mi_bin_size(uint8_t bin); // for stats +uint8_t _mi_bin(size_t size); // for stats + +// "heap.c" +void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag); +void _mi_heap_destroy_pages(mi_heap_t* heap); +void _mi_heap_collect_abandon(mi_heap_t* heap); +void _mi_heap_set_default_direct(mi_heap_t* heap); +bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid); +void _mi_heap_unsafe_destroy_all(void); +mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag); + +// "stats.c" +void _mi_stats_done(mi_stats_t* stats); +mi_msecs_t _mi_clock_now(void); +mi_msecs_t _mi_clock_end(mi_msecs_t start); +mi_msecs_t _mi_clock_start(void); + +// "alloc.c" +void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic` +void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; +void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept; +mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p); +bool _mi_free_delayed_block(mi_block_t* block); +void _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size); + +// "libc.c" +#include +void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args); +void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...); +char _mi_toupper(char c); +int _mi_strnicmp(const char* s, const char* t, size_t n); +void _mi_strlcpy(char* dest, const char* src, size_t dest_size); +void _mi_strlcat(char* dest, const char* src, size_t dest_size); +size_t _mi_strlen(const char* s); +size_t _mi_strnlen(const char* s, size_t max_len); +bool _mi_getenv(const char* name, char* result, size_t result_size); + +#if MI_DEBUG>1 +bool _mi_page_is_valid(mi_page_t* page); +#endif + + +// ------------------------------------------------------ +// Branches +// ------------------------------------------------------ + +#if defined(__GNUC__) || defined(__clang__) +#define mi_unlikely(x) (__builtin_expect(!!(x),false)) +#define mi_likely(x) (__builtin_expect(!!(x),true)) +#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) +#define mi_unlikely(x) (x) [[unlikely]] +#define mi_likely(x) (x) [[likely]] +#else +#define mi_unlikely(x) (x) +#define mi_likely(x) (x) +#endif + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + + +/* ----------------------------------------------------------- + Error codes passed to `_mi_fatal_error` + All are recoverable but EFAULT is a serious error and aborts by default in secure mode. + For portability define undefined error codes using common Unix codes: + +----------------------------------------------------------- */ +#include +#ifndef EAGAIN // double free +#define EAGAIN (11) +#endif +#ifndef ENOMEM // out of memory +#define ENOMEM (12) +#endif +#ifndef EFAULT // corrupted free-list or meta-data +#define EFAULT (14) +#endif +#ifndef EINVAL // trying to free an invalid pointer +#define EINVAL (22) +#endif +#ifndef EOVERFLOW // count*size overflow +#define EOVERFLOW (75) +#endif + + +/* ----------------------------------------------------------- + Inlined definitions +----------------------------------------------------------- */ +#define MI_UNUSED(x) (void)(x) +#if (MI_DEBUG>0) +#define MI_UNUSED_RELEASE(x) +#else +#define MI_UNUSED_RELEASE(x) MI_UNUSED(x) +#endif + +#define MI_INIT4(x) x(),x(),x(),x() +#define MI_INIT8(x) MI_INIT4(x),MI_INIT4(x) +#define MI_INIT16(x) MI_INIT8(x),MI_INIT8(x) +#define MI_INIT32(x) MI_INIT16(x),MI_INIT16(x) +#define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x) +#define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x) +#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x) + + +#include +// initialize a local variable to zero; use memset as compilers optimize constant sized memset's +#define _mi_memzero_var(x) memset(&x,0,sizeof(x)) + +// Is `x` a power of two? (0 is considered a power of two) +static inline bool _mi_is_power_of_two(uintptr_t x) { + return ((x & (x - 1)) == 0); +} + +// Is a pointer aligned? +static inline bool _mi_is_aligned(void* p, size_t alignment) { + mi_assert_internal(alignment != 0); + return (((uintptr_t)p % alignment) == 0); +} + +// Align upwards +static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) { + mi_assert_internal(alignment != 0); + uintptr_t mask = alignment - 1; + if ((alignment & mask) == 0) { // power of two? + return ((sz + mask) & ~mask); + } + else { + return (((sz + mask)/alignment)*alignment); + } +} + +// Align downwards +static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) { + mi_assert_internal(alignment != 0); + uintptr_t mask = alignment - 1; + if ((alignment & mask) == 0) { // power of two? + return (sz & ~mask); + } + else { + return ((sz / alignment) * alignment); + } +} + +// Align a pointer upwards +static inline void* mi_align_up_ptr(void* p, size_t alignment) { + return (void*)_mi_align_up((uintptr_t)p, alignment); +} + +// Align a pointer downwards +static inline void* mi_align_down_ptr(void* p, size_t alignment) { + return (void*)_mi_align_down((uintptr_t)p, alignment); +} + + +// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`. +static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) { + mi_assert_internal(divider != 0); + return (divider == 0 ? size : ((size + divider - 1) / divider)); +} + +// Is memory zero initialized? +static inline bool mi_mem_is_zero(const void* p, size_t size) { + for (size_t i = 0; i < size; i++) { + if (((uint8_t*)p)[i] != 0) return false; + } + return true; +} + + +// Align a byte size to a size in _machine words_, +// i.e. byte size == `wsize*sizeof(void*)`. +static inline size_t _mi_wsize_from_size(size_t size) { + mi_assert_internal(size <= SIZE_MAX - sizeof(uintptr_t)); + return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t); +} + +// Overflow detecting multiply +#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5)) +#include // UINT_MAX, ULONG_MAX +#if defined(_CLOCK_T) // for Illumos +#undef _CLOCK_T +#endif +static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { + #if (SIZE_MAX == ULONG_MAX) + return __builtin_umull_overflow(count, size, (unsigned long *)total); + #elif (SIZE_MAX == UINT_MAX) + return __builtin_umul_overflow(count, size, (unsigned int *)total); + #else + return __builtin_umulll_overflow(count, size, (unsigned long long *)total); + #endif +} +#else /* __builtin_umul_overflow is unavailable */ +static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { + #define MI_MUL_COULD_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX) + *total = count * size; + // note: gcc/clang optimize this to directly check the overflow flag + return ((size >= MI_MUL_COULD_OVERFLOW || count >= MI_MUL_COULD_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count); +} +#endif + +// Safe multiply `count*size` into `total`; return `true` on overflow. +static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* total) { + if (count==1) { // quick check for the case where count is one (common for C++ allocators) + *total = size; + return false; + } + else if mi_unlikely(mi_mul_overflow(count, size, total)) { + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size); + #endif + *total = SIZE_MAX; + return true; + } + else return false; +} + + +/*---------------------------------------------------------------------------------------- + Heap functions +------------------------------------------------------------------------------------------- */ + +extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap + +static inline bool mi_heap_is_backing(const mi_heap_t* heap) { + return (heap->tld->heap_backing == heap); +} + +static inline bool mi_heap_is_initialized(mi_heap_t* heap) { + mi_assert_internal(heap != NULL); + return (heap != &_mi_heap_empty); +} + +static inline uintptr_t _mi_ptr_cookie(const void* p) { + extern mi_heap_t _mi_heap_main; + mi_assert_internal(_mi_heap_main.cookie != 0); + return ((uintptr_t)p ^ _mi_heap_main.cookie); +} + +/* ----------------------------------------------------------- + Pages +----------------------------------------------------------- */ + +static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) { + mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE)); + const size_t idx = _mi_wsize_from_size(size); + mi_assert_internal(idx < MI_PAGES_DIRECT); + return heap->pages_free_direct[idx]; +} + +// Segment that contains the pointer +// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE), +// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it; +// therefore we align one byte before `p`. +// We check for NULL afterwards on 64-bit systems to improve codegen for `mi_free`. +static inline mi_segment_t* _mi_ptr_segment(const void* p) { + mi_segment_t* const segment = (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK); + #if MI_INTPTR_SIZE <= 4 + return (p==NULL ? NULL : segment); + #else + return ((intptr_t)segment <= 0 ? NULL : segment); + #endif +} + +static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) { + mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0); + return (mi_page_t*)(s); +} + +static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) { + mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0); + return (mi_slice_t*)(p); +} + +// Segment belonging to a page +static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) { + mi_assert_internal(page!=NULL); + mi_segment_t* segment = _mi_ptr_segment(page); + mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries)); + return segment; +} + +static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) { + mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset); + mi_assert_internal(start >= _mi_ptr_segment(slice)->slices); + mi_assert_internal(start->slice_offset == 0); + mi_assert_internal(start + start->slice_count > slice); + return start; +} + +// Get the page containing the pointer (performance critical as it is called in mi_free) +static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) { + mi_assert_internal(p > (void*)segment); + ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment; + mi_assert_internal(diff > 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE); + size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT; + mi_assert_internal(idx <= segment->slice_entries); + mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx]; + mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data + mi_assert_internal(slice->slice_offset == 0); + mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries); + return mi_slice_to_page(slice); +} + +// Quick page start for initialized pages +static inline uint8_t* mi_page_start(const mi_page_t* page) { + mi_assert_internal(page->page_start != NULL); + mi_assert_expensive(_mi_segment_page_start(_mi_page_segment(page),page,NULL) == page->page_start); + return page->page_start; +} + +// Get the page containing the pointer +static inline mi_page_t* _mi_ptr_page(void* p) { + mi_assert_internal(p!=NULL); + return _mi_segment_page_of(_mi_ptr_segment(p), p); +} + +// Get the block size of a page (special case for huge objects) +static inline size_t mi_page_block_size(const mi_page_t* page) { + mi_assert_internal(page->block_size > 0); + return page->block_size; +} + +static inline bool mi_page_is_huge(const mi_page_t* page) { + mi_assert_internal((page->is_huge && _mi_page_segment(page)->kind == MI_SEGMENT_HUGE) || + (!page->is_huge && _mi_page_segment(page)->kind != MI_SEGMENT_HUGE)); + return page->is_huge; +} + +// Get the usable block size of a page without fixed padding. +// This may still include internal padding due to alignment and rounding up size classes. +static inline size_t mi_page_usable_block_size(const mi_page_t* page) { + return mi_page_block_size(page) - MI_PADDING_SIZE; +} + +// size of a segment +static inline size_t mi_segment_size(mi_segment_t* segment) { + return segment->segment_slices * MI_SEGMENT_SLICE_SIZE; +} + +static inline uint8_t* mi_segment_end(mi_segment_t* segment) { + return (uint8_t*)segment + mi_segment_size(segment); +} + +// Thread free access +static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) { + return (mi_block_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & ~3); +} + +static inline mi_delayed_t mi_page_thread_free_flag(const mi_page_t* page) { + return (mi_delayed_t)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & 3); +} + +// Heap access +static inline mi_heap_t* mi_page_heap(const mi_page_t* page) { + return (mi_heap_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xheap)); +} + +static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) { + mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING); + mi_atomic_store_release(&page->xheap,(uintptr_t)heap); + if (heap != NULL) { page->heap_tag = heap->tag; } +} + +// Thread free flag helpers +static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) { + return (mi_block_t*)(tf & ~0x03); +} +static inline mi_delayed_t mi_tf_delayed(mi_thread_free_t tf) { + return (mi_delayed_t)(tf & 0x03); +} +static inline mi_thread_free_t mi_tf_make(mi_block_t* block, mi_delayed_t delayed) { + return (mi_thread_free_t)((uintptr_t)block | (uintptr_t)delayed); +} +static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) { + return mi_tf_make(mi_tf_block(tf),delayed); +} +static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* block) { + return mi_tf_make(block, mi_tf_delayed(tf)); +} + +// are all blocks in a page freed? +// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`. +static inline bool mi_page_all_free(const mi_page_t* page) { + mi_assert_internal(page != NULL); + return (page->used == 0); +} + +// are there any available blocks? +static inline bool mi_page_has_any_available(const mi_page_t* page) { + mi_assert_internal(page != NULL && page->reserved > 0); + return (page->used < page->reserved || (mi_page_thread_free(page) != NULL)); +} + +// are there immediately available blocks, i.e. blocks available on the free list. +static inline bool mi_page_immediate_available(const mi_page_t* page) { + mi_assert_internal(page != NULL); + return (page->free != NULL); +} + +// is more than 7/8th of a page in use? +static inline bool mi_page_mostly_used(const mi_page_t* page) { + if (page==NULL) return true; + uint16_t frac = page->reserved / 8U; + return (page->reserved - page->used <= frac); +} + +static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size) { + return &((mi_heap_t*)heap)->pages[_mi_bin(size)]; +} + + + +//----------------------------------------------------------- +// Page flags +//----------------------------------------------------------- +static inline bool mi_page_is_in_full(const mi_page_t* page) { + return page->flags.x.in_full; +} + +static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) { + page->flags.x.in_full = in_full; +} + +static inline bool mi_page_has_aligned(const mi_page_t* page) { + return page->flags.x.has_aligned; +} + +static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) { + page->flags.x.has_aligned = has_aligned; +} + + +/* ------------------------------------------------------------------- +Encoding/Decoding the free list next pointers + +This is to protect against buffer overflow exploits where the +free list is mutated. Many hardened allocators xor the next pointer `p` +with a secret key `k1`, as `p^k1`. This prevents overwriting with known +values but might be still too weak: if the attacker can guess +the pointer `p` this can reveal `k1` (since `p^k1^p == k1`). +Moreover, if multiple blocks can be read as well, the attacker can +xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot +about the pointers (and subsequently `k1`). + +Instead mimalloc uses an extra key `k2` and encodes as `((p^k2)<<> (MI_INTPTR_BITS - shift)))); +} +static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) { + shift %= MI_INTPTR_BITS; + return (shift==0 ? x : ((x >> shift) | (x << (MI_INTPTR_BITS - shift)))); +} + +static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) { + void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]); + return (p==null ? NULL : p); +} + +static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) { + uintptr_t x = (uintptr_t)(p==NULL ? null : p); + return mi_rotl(x ^ keys[1], keys[0]) + keys[0]; +} + +static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) { + mi_track_mem_defined(block,sizeof(mi_block_t)); + mi_block_t* next; + #ifdef MI_ENCODE_FREELIST + next = (mi_block_t*)mi_ptr_decode(null, block->next, keys); + #else + MI_UNUSED(keys); MI_UNUSED(null); + next = (mi_block_t*)block->next; + #endif + mi_track_mem_noaccess(block,sizeof(mi_block_t)); + return next; +} + +static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) { + mi_track_mem_undefined(block,sizeof(mi_block_t)); + #ifdef MI_ENCODE_FREELIST + block->next = mi_ptr_encode(null, next, keys); + #else + MI_UNUSED(keys); MI_UNUSED(null); + block->next = (mi_encoded_t)next; + #endif + mi_track_mem_noaccess(block,sizeof(mi_block_t)); +} + +static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) { + #ifdef MI_ENCODE_FREELIST + mi_block_t* next = mi_block_nextx(page,block,page->keys); + // check for free list corruption: is `next` at least in the same page? + // TODO: check if `next` is `page->block_size` aligned? + if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) { + _mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next); + next = NULL; + } + return next; + #else + MI_UNUSED(page); + return mi_block_nextx(page,block,NULL); + #endif +} + +static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) { + #ifdef MI_ENCODE_FREELIST + mi_block_set_nextx(page,block,next, page->keys); + #else + MI_UNUSED(page); + mi_block_set_nextx(page,block,next,NULL); + #endif +} + + +// ------------------------------------------------------------------- +// commit mask +// ------------------------------------------------------------------- + +static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + cm->mask[i] = 0; + } +} + +static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + cm->mask[i] = ~((size_t)0); + } +} + +static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + if (cm->mask[i] != 0) return false; + } + return true; +} + +static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + if (cm->mask[i] != ~((size_t)0)) return false; + } + return true; +} + +// defined in `segment.c`: +size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total); +size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx); + +#define mi_commit_mask_foreach(cm,idx,count) \ + idx = 0; \ + while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) { + +#define mi_commit_mask_foreach_end() \ + idx += count; \ + } + + + +/* ----------------------------------------------------------- + memory id's +----------------------------------------------------------- */ + +static inline mi_memid_t _mi_memid_create(mi_memkind_t memkind) { + mi_memid_t memid; + _mi_memzero_var(memid); + memid.memkind = memkind; + return memid; +} + +static inline mi_memid_t _mi_memid_none(void) { + return _mi_memid_create(MI_MEM_NONE); +} + +static inline mi_memid_t _mi_memid_create_os(bool committed, bool is_zero, bool is_large) { + mi_memid_t memid = _mi_memid_create(MI_MEM_OS); + memid.initially_committed = committed; + memid.initially_zero = is_zero; + memid.is_pinned = is_large; + return memid; +} + + +// ------------------------------------------------------------------- +// Fast "random" shuffle +// ------------------------------------------------------------------- + +static inline uintptr_t _mi_random_shuffle(uintptr_t x) { + if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros +#if (MI_INTPTR_SIZE==8) + // by Sebastiano Vigna, see: + x ^= x >> 30; + x *= 0xbf58476d1ce4e5b9UL; + x ^= x >> 27; + x *= 0x94d049bb133111ebUL; + x ^= x >> 31; +#elif (MI_INTPTR_SIZE==4) + // by Chris Wellons, see: + x ^= x >> 16; + x *= 0x7feb352dUL; + x ^= x >> 15; + x *= 0x846ca68bUL; + x ^= x >> 16; +#endif + return x; +} + +// ------------------------------------------------------------------- +// Optimize numa node access for the common case (= one node) +// ------------------------------------------------------------------- + +int _mi_os_numa_node_get(mi_os_tld_t* tld); +size_t _mi_os_numa_node_count_get(void); + +extern _Atomic(size_t) _mi_numa_node_count; +static inline int _mi_os_numa_node(mi_os_tld_t* tld) { + if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; } + else return _mi_os_numa_node_get(tld); +} +static inline size_t _mi_os_numa_node_count(void) { + const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count); + if mi_likely(count > 0) { return count; } + else return _mi_os_numa_node_count_get(); +} + + + +// ----------------------------------------------------------------------- +// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero) +// ----------------------------------------------------------------------- + +#if defined(__GNUC__) + +#include // LONG_MAX +#define MI_HAVE_FAST_BITSCAN +static inline size_t mi_clz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; +#if (INTPTR_MAX == LONG_MAX) + return __builtin_clzl(x); +#else + return __builtin_clzll(x); +#endif +} +static inline size_t mi_ctz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; +#if (INTPTR_MAX == LONG_MAX) + return __builtin_ctzl(x); +#else + return __builtin_ctzll(x); +#endif +} + +#elif defined(_MSC_VER) + +#include // LONG_MAX +#include // BitScanReverse64 +#define MI_HAVE_FAST_BITSCAN +static inline size_t mi_clz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; + unsigned long idx; +#if (INTPTR_MAX == LONG_MAX) + _BitScanReverse(&idx, x); +#else + _BitScanReverse64(&idx, x); +#endif + return ((MI_INTPTR_BITS - 1) - idx); +} +static inline size_t mi_ctz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; + unsigned long idx; +#if (INTPTR_MAX == LONG_MAX) + _BitScanForward(&idx, x); +#else + _BitScanForward64(&idx, x); +#endif + return idx; +} + +#else +static inline size_t mi_ctz32(uint32_t x) { + // de Bruijn multiplication, see + static const unsigned char debruijn[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 + }; + if (x==0) return 32; + return debruijn[((x & -(int32_t)x) * 0x077CB531UL) >> 27]; +} +static inline size_t mi_clz32(uint32_t x) { + // de Bruijn multiplication, see + static const uint8_t debruijn[32] = { + 31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1, + 23, 19, 11, 3, 16, 14, 7, 24, 12, 4, 8, 25, 5, 26, 27, 0 + }; + if (x==0) return 32; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + return debruijn[(uint32_t)(x * 0x07C4ACDDUL) >> 27]; +} + +static inline size_t mi_clz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; +#if (MI_INTPTR_BITS <= 32) + return mi_clz32((uint32_t)x); +#else + size_t count = mi_clz32((uint32_t)(x >> 32)); + if (count < 32) return count; + return (32 + mi_clz32((uint32_t)x)); +#endif +} +static inline size_t mi_ctz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; +#if (MI_INTPTR_BITS <= 32) + return mi_ctz32((uint32_t)x); +#else + size_t count = mi_ctz32((uint32_t)x); + if (count < 32) return count; + return (32 + mi_ctz32((uint32_t)(x>>32))); +#endif +} + +#endif + +// "bit scan reverse": Return index of the highest bit (or MI_INTPTR_BITS if `x` is zero) +static inline size_t mi_bsr(uintptr_t x) { + return (x==0 ? MI_INTPTR_BITS : MI_INTPTR_BITS - 1 - mi_clz(x)); +} + + +// --------------------------------------------------------------------------------- +// Provide our own `_mi_memcpy` for potential performance optimizations. +// +// For now, only on Windows with msvc/clang-cl we optimize to `rep movsb` if +// we happen to run on x86/x64 cpu's that have "fast short rep movsb" (FSRM) support +// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253. +// --------------------------------------------------------------------------------- + +#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) +#include +extern bool _mi_cpu_has_fsrm; +static inline void _mi_memcpy(void* dst, const void* src, size_t n) { + if (_mi_cpu_has_fsrm) { + __movsb((unsigned char*)dst, (const unsigned char*)src, n); + } + else { + memcpy(dst, src, n); + } +} +static inline void _mi_memzero(void* dst, size_t n) { + if (_mi_cpu_has_fsrm) { + __stosb((unsigned char*)dst, 0, n); + } + else { + memset(dst, 0, n); + } +} +#else +static inline void _mi_memcpy(void* dst, const void* src, size_t n) { + memcpy(dst, src, n); +} +static inline void _mi_memzero(void* dst, size_t n) { + memset(dst, 0, n); +} +#endif + +// ------------------------------------------------------------------------------- +// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned +// This is used for example in `mi_realloc`. +// ------------------------------------------------------------------------------- + +#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) +// On GCC/CLang we provide a hint that the pointers are word aligned. +static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) { + mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0)); + void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE); + const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE); + _mi_memcpy(adst, asrc, n); +} + +static inline void _mi_memzero_aligned(void* dst, size_t n) { + mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0); + void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE); + _mi_memzero(adst, n); +} +#else +// Default fallback on `_mi_memcpy` +static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) { + mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0)); + _mi_memcpy(dst, src, n); +} + +static inline void _mi_memzero_aligned(void* dst, size_t n) { + mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0); + _mi_memzero(dst, n); +} +#endif + + +#endif diff --git a/yass/third_party/mimalloc/include/mimalloc/prim.h b/yass/third_party/mimalloc/include/mimalloc/prim.h new file mode 100644 index 0000000000..3f4574ddd9 --- /dev/null +++ b/yass/third_party/mimalloc/include/mimalloc/prim.h @@ -0,0 +1,373 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_PRIM_H +#define MIMALLOC_PRIM_H + + +// -------------------------------------------------------------------------- +// This file specifies the primitive portability API. +// Each OS/host needs to implement these primitives, see `src/prim` +// for implementations on Window, macOS, WASI, and Linux/Unix. +// +// note: on all primitive functions, we always have result parameters != NULL, and: +// addr != NULL and page aligned +// size > 0 and page aligned +// the return value is an error code as an `int` where 0 is success +// -------------------------------------------------------------------------- + +// OS memory configuration +typedef struct mi_os_mem_config_s { + size_t page_size; // default to 4KiB + size_t large_page_size; // 0 if not supported, usually 2MiB (4MiB on Windows) + size_t alloc_granularity; // smallest allocation size (usually 4KiB, on Windows 64KiB) + bool has_overcommit; // can we reserve more memory than can be actually committed? + bool has_partial_free; // can allocated blocks be freed partially? (true for mmap, false for VirtualAlloc) + bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory) +} mi_os_mem_config_t; + +// Initialize +void _mi_prim_mem_init( mi_os_mem_config_t* config ); + +// Free OS memory +int _mi_prim_free(void* addr, size_t size ); + +// Allocate OS memory. Return NULL on error. +// The `try_alignment` is just a hint and the returned pointer does not have to be aligned. +// If `commit` is false, the virtual memory range only needs to be reserved (with no access) +// which will later be committed explicitly using `_mi_prim_commit`. +// `is_zero` is set to true if the memory was zero initialized (as on most OS's) +// pre: !commit => !allow_large +// try_alignment >= _mi_os_page_size() and a power of 2 +int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr); + +// Commit memory. Returns error code or 0 on success. +// For example, on Linux this would make the memory PROT_READ|PROT_WRITE. +// `is_zero` is set to true if the memory was zero initialized (e.g. on Windows) +int _mi_prim_commit(void* addr, size_t size, bool* is_zero); + +// Decommit memory. Returns error code or 0 on success. The `needs_recommit` result is true +// if the memory would need to be re-committed. For example, on Windows this is always true, +// but on Linux we could use MADV_DONTNEED to decommit which does not need a recommit. +// pre: needs_recommit != NULL +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit); + +// Reset memory. The range keeps being accessible but the content might be reset. +// Returns error code or 0 on success. +int _mi_prim_reset(void* addr, size_t size); + +// Protect memory. Returns error code or 0 on success. +int _mi_prim_protect(void* addr, size_t size, bool protect); + +// Allocate huge (1GiB) pages possibly associated with a NUMA node. +// `is_zero` is set to true if the memory was zero initialized (as on most OS's) +// pre: size > 0 and a multiple of 1GiB. +// numa_node is either negative (don't care), or a numa node number. +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr); + +// Return the current NUMA node +size_t _mi_prim_numa_node(void); + +// Return the number of logical NUMA nodes +size_t _mi_prim_numa_node_count(void); + +// Clock ticks +mi_msecs_t _mi_prim_clock_now(void); + +// Return process information (only for statistics) +typedef struct mi_process_info_s { + mi_msecs_t elapsed; + mi_msecs_t utime; + mi_msecs_t stime; + size_t current_rss; + size_t peak_rss; + size_t current_commit; + size_t peak_commit; + size_t page_faults; +} mi_process_info_t; + +void _mi_prim_process_info(mi_process_info_t* pinfo); + +// Default stderr output. (only for warnings etc. with verbose enabled) +// msg != NULL && _mi_strlen(msg) > 0 +void _mi_prim_out_stderr( const char* msg ); + +// Get an environment variable. (only for options) +// name != NULL, result != NULL, result_size >= 64 +bool _mi_prim_getenv(const char* name, char* result, size_t result_size); + + +// Fill a buffer with strong randomness; return `false` on error or if +// there is no strong randomization available. +bool _mi_prim_random_buf(void* buf, size_t buf_len); + +// Called on the first thread start, and should ensure `_mi_thread_done` is called on thread termination. +void _mi_prim_thread_init_auto_done(void); + +// Called on process exit and may take action to clean up resources associated with the thread auto done. +void _mi_prim_thread_done_auto_done(void); + +// Called when the default heap for a thread changes +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap); + + +//------------------------------------------------------------------- +// Thread id: `_mi_prim_thread_id()` +// +// Getting the thread id should be performant as it is called in the +// fast path of `_mi_free` and we specialize for various platforms as +// inlined definitions. Regular code should call `init.c:_mi_thread_id()`. +// We only require _mi_prim_thread_id() to return a unique id +// for each thread (unequal to zero). +//------------------------------------------------------------------- + +// On some libc + platform combinations we can directly access a thread-local storage (TLS) slot. +// The TLS layout depends on both the OS and libc implementation so we use specific tests for each main platform. +// If you test on another platform and it works please send a PR :-) +// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register. +// +// Note: we would like to prefer `__builtin_thread_pointer()` nowadays instead of using assembly, +// but unfortunately we can not detect support reliably (see issue #883) +// We also use it on Apple OS as we use a TLS slot for the default heap there. +#if defined(__GNUC__) && ( \ + (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ + || (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__) || defined(__POWERPC__))) \ + || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ + || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ + || (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ + ) + +#define MI_HAS_TLS_SLOT + +static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept { + void* res; + const size_t ofs = (slot*sizeof(void*)); + #if defined(__i386__) + __asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS + #elif defined(__APPLE__) && defined(__x86_64__) + __asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS + #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4) + __asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI + #elif defined(__x86_64__) + __asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS + #elif defined(__arm__) + void** tcb; MI_UNUSED(ofs); + __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb)); + res = tcb[slot]; + #elif defined(__aarch64__) + void** tcb; MI_UNUSED(ofs); + #if defined(__APPLE__) // M1, issue #343 + __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb)); + #else + __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); + #endif + res = tcb[slot]; + #elif defined(__APPLE__) && defined(__POWERPC__) // ppc, issue #781 + MI_UNUSED(ofs); + res = pthread_getspecific(slot); + #endif + return res; +} + +// setting a tls slot is only used on macOS for now +static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept { + const size_t ofs = (slot*sizeof(void*)); + #if defined(__i386__) + __asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS + #elif defined(__APPLE__) && defined(__x86_64__) + __asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS + #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4) + __asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI + #elif defined(__x86_64__) + __asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS + #elif defined(__arm__) + void** tcb; MI_UNUSED(ofs); + __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb)); + tcb[slot] = value; + #elif defined(__aarch64__) + void** tcb; MI_UNUSED(ofs); + #if defined(__APPLE__) // M1, issue #343 + __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb)); + #else + __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); + #endif + tcb[slot] = value; + #elif defined(__APPLE__) && defined(__POWERPC__) // ppc, issue #781 + MI_UNUSED(ofs); + pthread_setspecific(slot, value); + #endif +} + +#endif + +// Do we have __builtin_thread_pointer? This would be the preferred way to get a unique thread id +// but unfortunately, it seems we cannot test for this reliably at this time (see issue #883) +// Nevertheless, it seems needed on older graviton platforms (see issue #851). +// For now, we only enable this for specific platforms. +#if !defined(__APPLE__) /* on apple (M1) the wrong register is read (tpidr_el0 instead of tpidrro_el0) so fall back to TLS slot assembly ()*/ \ + && !defined(MI_LIBC_MUSL) \ + && (!defined(__clang_major__) || __clang_major__ >= 14) /* older clang versions emit bad code; fall back to using the TLS slot () */ + #if (defined(__GNUC__) && (__GNUC__ >= 7) && defined(__aarch64__)) /* aarch64 for older gcc versions (issue #851) */ \ + || (defined(__GNUC__) && (__GNUC__ >= 11) && defined(__x86_64__)) \ + || (defined(__clang_major__) && (__clang_major__ >= 14) && (defined(__aarch64__) || defined(__x86_64__))) + #define MI_USE_BUILTIN_THREAD_POINTER 1 + #endif +#endif + + + +// defined in `init.c`; do not use these directly +extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from +extern bool _mi_process_is_initialized; // has mi_process_init been called? + +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept; + +// Get a unique id for the current thread. +#if defined(MI_PRIM_THREAD_ID) + +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + return MI_PRIM_THREAD_ID(); // used for example by CPython for a free threaded build (see python/cpython#115488) +} + +#elif defined(_WIN32) + +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + // Windows: works on Intel and ARM in both 32- and 64-bit + return (uintptr_t)NtCurrentTeb(); +} + +#elif MI_USE_BUILTIN_THREAD_POINTER + +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + // Works on most Unix based platforms with recent compilers + return (uintptr_t)__builtin_thread_pointer(); +} + +#elif defined(MI_HAS_TLS_SLOT) + +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + #if defined(__BIONIC__) + // issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id + // see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86 + return (uintptr_t)mi_prim_tls_slot(1); + #else + // in all our other targets, slot 0 is the thread id + // glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h + // apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36 + return (uintptr_t)mi_prim_tls_slot(0); + #endif +} + +#else + +// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms). +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + return (uintptr_t)&_mi_heap_default; +} + +#endif + + + +/* ---------------------------------------------------------------------------------------- +The thread local default heap: `_mi_prim_get_default_heap()` +This is inlined here as it is on the fast path for allocation functions. + +On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a +__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures +that the storage will always be available (allocated on the thread stacks). + +On some platforms though we cannot use that when overriding `malloc` since the underlying +TLS implementation (or the loader) will call itself `malloc` on a first access and recurse. +We try to circumvent this in an efficient way: +- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the + loader itself calls `malloc` even before the modules are initialized. +- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS). +- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323) +------------------------------------------------------------------------------------------- */ + +static inline mi_heap_t* mi_prim_get_default_heap(void); + +#if defined(MI_MALLOC_OVERRIDE) +#if defined(__APPLE__) // macOS + #define MI_TLS_SLOT 89 // seems unused? + // other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89) + // see +#elif defined(__OpenBSD__) + // use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16) + // see + #define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24) + // #elif defined(__DragonFly__) + // #warning "mimalloc is not working correctly on DragonFly yet." + // #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) +#elif defined(__ANDROID__) + // See issue #381 + #define MI_TLS_PTHREAD +#endif +#endif + + +#if defined(MI_TLS_SLOT) +# if !defined(MI_HAS_TLS_SLOT) +# error "trying to use a TLS slot for the default heap, but the mi_prim_tls_slot primitives are not defined" +# endif + +static inline mi_heap_t* mi_prim_get_default_heap(void) { + mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT); + if mi_unlikely(heap == NULL) { + #ifdef __GNUC__ + __asm(""); // prevent conditional load of the address of _mi_heap_empty + #endif + heap = (mi_heap_t*)&_mi_heap_empty; + } + return heap; +} + +#elif defined(MI_TLS_PTHREAD_SLOT_OFS) + +static inline mi_heap_t** mi_prim_tls_pthread_heap_slot(void) { + pthread_t self = pthread_self(); + #if defined(__DragonFly__) + if (self==NULL) return NULL; + #endif + return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS); +} + +static inline mi_heap_t* mi_prim_get_default_heap(void) { + mi_heap_t** pheap = mi_prim_tls_pthread_heap_slot(); + if mi_unlikely(pheap == NULL) return _mi_heap_main_get(); + mi_heap_t* heap = *pheap; + if mi_unlikely(heap == NULL) return (mi_heap_t*)&_mi_heap_empty; + return heap; +} + +#elif defined(MI_TLS_PTHREAD) + +extern pthread_key_t _mi_heap_default_key; +static inline mi_heap_t* mi_prim_get_default_heap(void) { + mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key)); + return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap); +} + +#else // default using a thread local variable; used on most platforms. + +static inline mi_heap_t* mi_prim_get_default_heap(void) { + #if defined(MI_TLS_RECURSE_GUARD) + if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get(); + #endif + return _mi_heap_default; +} + +#endif // mi_prim_get_default_heap() + + + +#endif // MIMALLOC_PRIM_H diff --git a/yass/third_party/mimalloc/include/mimalloc/track.h b/yass/third_party/mimalloc/include/mimalloc/track.h new file mode 100644 index 0000000000..a659d94044 --- /dev/null +++ b/yass/third_party/mimalloc/include/mimalloc/track.h @@ -0,0 +1,149 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_TRACK_H +#define MIMALLOC_TRACK_H + +/* ------------------------------------------------------------------------------------------------------ +Track memory ranges with macros for tools like Valgrind address sanitizer, or other memory checkers. +These can be defined for tracking allocation: + + #define mi_track_malloc_size(p,reqsize,size,zero) + #define mi_track_free_size(p,_size) + +The macros are set up such that the size passed to `mi_track_free_size` +always matches the size of `mi_track_malloc_size`. (currently, `size == mi_usable_size(p)`). +The `reqsize` is what the user requested, and `size >= reqsize`. +The `size` is either byte precise (and `size==reqsize`) if `MI_PADDING` is enabled, +or otherwise it is the usable block size which may be larger than the original request. +Use `_mi_block_size_of(void* p)` to get the full block size that was allocated (including padding etc). +The `zero` parameter is `true` if the allocated block is zero initialized. + +Optional: + + #define mi_track_align(p,alignedp,offset,size) + #define mi_track_resize(p,oldsize,newsize) + #define mi_track_init() + +The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block. +The corresponding `mi_track_free` still uses the block start pointer and original size (corresponding to the `mi_track_malloc`). +The `mi_track_resize` is currently unused but could be called on reallocations within a block. +`mi_track_init` is called at program start. + +The following macros are for tools like asan and valgrind to track whether memory is +defined, undefined, or not accessible at all: + + #define mi_track_mem_defined(p,size) + #define mi_track_mem_undefined(p,size) + #define mi_track_mem_noaccess(p,size) + +-------------------------------------------------------------------------------------------------------*/ + +#if MI_TRACK_VALGRIND +// valgrind tool + +#define MI_TRACK_ENABLED 1 +#define MI_TRACK_HEAP_DESTROY 1 // track free of individual blocks on heap_destroy +#define MI_TRACK_TOOL "valgrind" + +#include +#include + +#define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero) +#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/) +#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/) +#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) +#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size) +#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size) + +#elif MI_TRACK_ASAN +// address sanitizer + +#define MI_TRACK_ENABLED 1 +#define MI_TRACK_HEAP_DESTROY 0 +#define MI_TRACK_TOOL "asan" + +#include + +#define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size) +#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size) +#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) +#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) +#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size) + +#elif MI_TRACK_ETW +// windows event tracing + +#define MI_TRACK_ENABLED 1 +#define MI_TRACK_HEAP_DESTROY 1 +#define MI_TRACK_TOOL "ETW" + +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#include "../src/prim/windows/etw.h" + +#define mi_track_init() EventRegistermicrosoft_windows_mimalloc(); +#define mi_track_malloc_size(p,reqsize,size,zero) EventWriteETW_MI_ALLOC((UINT64)(p), size) +#define mi_track_free_size(p,size) EventWriteETW_MI_FREE((UINT64)(p), size) + +#else +// no tracking + +#define MI_TRACK_ENABLED 0 +#define MI_TRACK_HEAP_DESTROY 0 +#define MI_TRACK_TOOL "none" + +#define mi_track_malloc_size(p,reqsize,size,zero) +#define mi_track_free_size(p,_size) + +#endif + +// ------------------- +// Utility definitions + +#ifndef mi_track_resize +#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false) +#endif + +#ifndef mi_track_align +#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset) +#endif + +#ifndef mi_track_init +#define mi_track_init() +#endif + +#ifndef mi_track_mem_defined +#define mi_track_mem_defined(p,size) +#endif + +#ifndef mi_track_mem_undefined +#define mi_track_mem_undefined(p,size) +#endif + +#ifndef mi_track_mem_noaccess +#define mi_track_mem_noaccess(p,size) +#endif + + +#if MI_PADDING +#define mi_track_malloc(p,reqsize,zero) \ + if ((p)!=NULL) { \ + mi_assert_internal(mi_usable_size(p)==(reqsize)); \ + mi_track_malloc_size(p,reqsize,reqsize,zero); \ + } +#else +#define mi_track_malloc(p,reqsize,zero) \ + if ((p)!=NULL) { \ + mi_assert_internal(mi_usable_size(p)>=(reqsize)); \ + mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \ + } +#endif + +#endif diff --git a/yass/third_party/mimalloc/include/mimalloc/types.h b/yass/third_party/mimalloc/include/mimalloc/types.h new file mode 100644 index 0000000000..2fdde904bb --- /dev/null +++ b/yass/third_party/mimalloc/include/mimalloc/types.h @@ -0,0 +1,705 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_TYPES_H +#define MIMALLOC_TYPES_H + +// -------------------------------------------------------------------------- +// This file contains the main type definitions for mimalloc: +// mi_heap_t : all data for a thread-local heap, contains +// lists of all managed heap pages. +// mi_segment_t : a larger chunk of memory (32GiB) from where pages +// are allocated. A segment is divided in slices (64KiB) from +// which pages are allocated. +// mi_page_t : a "mimalloc" page (usually 64KiB or 512KiB) from +// where objects are allocated. +// Note: we write "OS page" for OS memory pages while +// using plain "page" for mimalloc pages (`mi_page_t`). +// -------------------------------------------------------------------------- + + +#include // ptrdiff_t +#include // uintptr_t, uint16_t, etc +#include "atomic.h" // _Atomic + +#ifdef _MSC_VER +#pragma warning(disable:4214) // bitfield is not int +#endif + +// Minimal alignment necessary. On most platforms 16 bytes are needed +// due to SSE registers for example. This must be at least `sizeof(void*)` +#ifndef MI_MAX_ALIGN_SIZE +#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t) +#endif + +// ------------------------------------------------------ +// Variants +// ------------------------------------------------------ + +// Define NDEBUG in the release version to disable assertions. +// #define NDEBUG + +// Define MI_TRACK_ to enable tracking support +// #define MI_TRACK_VALGRIND 1 +// #define MI_TRACK_ASAN 1 +// #define MI_TRACK_ETW 1 + +// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance). +// #define MI_STAT 1 + +// Define MI_SECURE to enable security mitigations +// #define MI_SECURE 1 // guard page around metadata +// #define MI_SECURE 2 // guard page around each mimalloc page +// #define MI_SECURE 3 // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free) +// #define MI_SECURE 4 // checks for double free. (may be more expensive) + +#if !defined(MI_SECURE) +#define MI_SECURE 0 +#endif + +// Define MI_DEBUG for debug mode +// #define MI_DEBUG 1 // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free. +// #define MI_DEBUG 2 // + internal assertion checks +// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON) +#if !defined(MI_DEBUG) +#if !defined(NDEBUG) || defined(_DEBUG) +#define MI_DEBUG 2 +#else +#define MI_DEBUG 0 +#endif +#endif + +// Reserve extra padding at the end of each block to be more resilient against heap block overflows. +// The padding can detect buffer overflow on free. +#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW)) +#define MI_PADDING 1 +#endif + +// Check padding bytes; allows byte-precise buffer overflow detection +#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1) +#define MI_PADDING_CHECK 1 +#endif + + +// Encoded free lists allow detection of corrupted free lists +// and can detect buffer overflows, modify after free, and double `free`s. +#if (MI_SECURE>=3 || MI_DEBUG>=1) +#define MI_ENCODE_FREELIST 1 +#endif + + +// We used to abandon huge pages in order to eagerly deallocate it if freed from another thread. +// Unfortunately, that makes it not possible to visit them during a heap walk or include them in a +// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks nowadays if freed from +// another thread so the memory becomes "virtually" available (and eventually gets properly freed by +// the owning thread). +// #define MI_HUGE_PAGE_ABANDON 1 + + +// ------------------------------------------------------ +// Platform specific values +// ------------------------------------------------------ + +// ------------------------------------------------------ +// Size of a pointer. +// We assume that `sizeof(void*)==sizeof(intptr_t)` +// and it holds for all platforms we know of. +// +// However, the C standard only requires that: +// p == (void*)((intptr_t)p)) +// but we also need: +// i == (intptr_t)((void*)i) +// or otherwise one might define an intptr_t type that is larger than a pointer... +// ------------------------------------------------------ + +#if INTPTR_MAX > INT64_MAX +# define MI_INTPTR_SHIFT (4) // assume 128-bit (as on arm CHERI for example) +#elif INTPTR_MAX == INT64_MAX +# define MI_INTPTR_SHIFT (3) +#elif INTPTR_MAX == INT32_MAX +# define MI_INTPTR_SHIFT (2) +#else +#error platform pointers must be 32, 64, or 128 bits +#endif + +#if SIZE_MAX == UINT64_MAX +# define MI_SIZE_SHIFT (3) +typedef int64_t mi_ssize_t; +#elif SIZE_MAX == UINT32_MAX +# define MI_SIZE_SHIFT (2) +typedef int32_t mi_ssize_t; +#else +#error platform objects must be 32 or 64 bits +#endif + +#if (SIZE_MAX/2) > LONG_MAX +# define MI_ZU(x) x##ULL +# define MI_ZI(x) x##LL +#else +# define MI_ZU(x) x##UL +# define MI_ZI(x) x##L +#endif + +#define MI_INTPTR_SIZE (1< 4 +#define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 32MiB +#else +#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit +#endif +#endif + +#ifndef MI_SMALL_PAGE_SHIFT +#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB +#endif +#ifndef MI_MEDIUM_PAGE_SHIFT +#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB +#endif + +// Derived constants +#define MI_SEGMENT_SIZE (MI_ZU(1)<= 655360) +#error "mimalloc internal: define more bins" +#endif + +// Maximum block size for which blocks are guaranteed to be block size aligned. (see `segment.c:_mi_segment_page_start`) +#define MI_MAX_ALIGN_GUARANTEE (MI_MEDIUM_OBJ_SIZE_MAX) + +// Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments +#define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1) + +// Maximum slice count (255) for which we can find the page for interior pointers +#define MI_MAX_SLICE_OFFSET_COUNT ((MI_BLOCK_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1) + +// we never allocate more than PTRDIFF_MAX (see also ) +// on 64-bit+ systems we also limit the maximum allocation size such that the slice count fits in 32-bits. (issue #877) +#if (PTRDIFF_MAX > INT32_MAX) && (PTRDIFF_MAX >= (MI_SEGMENT_SLIZE_SIZE * UINT32_MAX)) +#define MI_MAX_ALLOC_SIZE (MI_SEGMENT_SLICE_SIZE * (UINT32_MAX-1)) +#else +#define MI_MAX_ALLOC_SIZE PTRDIFF_MAX +#endif + + +// ------------------------------------------------------ +// Mimalloc pages contain allocated blocks +// ------------------------------------------------------ + +// The free lists use encoded next fields +// (Only actually encodes when MI_ENCODED_FREELIST is defined.) +typedef uintptr_t mi_encoded_t; + +// thread id's +typedef size_t mi_threadid_t; + +// free lists contain blocks +typedef struct mi_block_s { + mi_encoded_t next; +} mi_block_t; + + +// The delayed flags are used for efficient multi-threaded free-ing +typedef enum mi_delayed_e { + MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list + MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap + MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list + MI_NEVER_DELAYED_FREE = 3 // sticky: used for abondoned pages without a owning heap; this only resets on page reclaim +} mi_delayed_t; + + +// The `in_full` and `has_aligned` page flags are put in a union to efficiently +// test if both are false (`full_aligned == 0`) in the `mi_free` routine. +#if !MI_TSAN +typedef union mi_page_flags_s { + uint8_t full_aligned; + struct { + uint8_t in_full : 1; + uint8_t has_aligned : 1; + } x; +} mi_page_flags_t; +#else +// under thread sanitizer, use a byte for each flag to suppress warning, issue #130 +typedef union mi_page_flags_s { + uint16_t full_aligned; + struct { + uint8_t in_full; + uint8_t has_aligned; + } x; +} mi_page_flags_t; +#endif + +// Thread free list. +// We use the bottom 2 bits of the pointer for mi_delayed_t flags +typedef uintptr_t mi_thread_free_t; + +// A page contains blocks of one specific size (`block_size`). +// Each page has three list of free blocks: +// `free` for blocks that can be allocated, +// `local_free` for freed blocks that are not yet available to `mi_malloc` +// `thread_free` for freed blocks by other threads +// The `local_free` and `thread_free` lists are migrated to the `free` list +// when it is exhausted. The separate `local_free` list is necessary to +// implement a monotonic heartbeat. The `thread_free` list is needed for +// avoiding atomic operations in the common case. +// +// `used - |thread_free|` == actual blocks that are in use (alive) +// `used - |thread_free| + |free| + |local_free| == capacity` +// +// We don't count `freed` (as |free|) but use `used` to reduce +// the number of memory accesses in the `mi_page_all_free` function(s). +// +// Notes: +// - Access is optimized for `free.c:mi_free` and `alloc.c:mi_page_alloc` +// - Using `uint16_t` does not seem to slow things down +// - The size is 12 words on 64-bit which helps the page index calculations +// (and 14 words on 32-bit, and encoded free lists add 2 words) +// - `xthread_free` uses the bottom bits as a delayed-free flags to optimize +// concurrent frees where only the first concurrent free adds to the owning +// heap `thread_delayed_free` list (see `free.c:mi_free_block_mt`). +// The invariant is that no-delayed-free is only set if there is +// at least one block that will be added, or as already been added, to +// the owning heap `thread_delayed_free` list. This guarantees that pages +// will be freed correctly even if only other threads free blocks. +typedef struct mi_page_s { + // "owned" by the segment + uint32_t slice_count; // slices in this page (0 if not a page) + uint32_t slice_offset; // distance from the actual page data slice (0 if a page) + uint8_t is_committed:1; // `true` if the page virtual memory is committed + uint8_t is_zero_init:1; // `true` if the page was initially zero initialized + uint8_t is_huge:1; // `true` if the page is in a huge segment (`segment->kind == MI_SEGMENT_HUGE`) + // padding + // layout like this to optimize access in `mi_malloc` and `mi_free` + uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear` + uint16_t reserved; // number of blocks reserved in memory + mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits) + uint8_t free_is_zero:1; // `true` if the blocks in the free list are zero initialized + uint8_t retire_expire:7; // expiration count for retired blocks + + mi_block_t* free; // list of available free blocks (`malloc` allocates from this list) + mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`) + uint16_t used; // number of blocks in use (including blocks in `thread_free`) + uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`) + uint8_t heap_tag; // tag of the owning heap, used for separated heaps by object type + // padding + size_t block_size; // size available in each block (always `>0`) + uint8_t* page_start; // start of the page area containing the blocks + + #if (MI_ENCODE_FREELIST || MI_PADDING) + uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary + #endif + + _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads + _Atomic(uintptr_t) xheap; + + struct mi_page_s* next; // next page owned by this thread with the same `block_size` + struct mi_page_s* prev; // previous page owned by this thread with the same `block_size` + + // 64-bit 11 words, 32-bit 13 words, (+2 for secure) + void* padding[1]; +} mi_page_t; + + + +// ------------------------------------------------------ +// Mimalloc segments contain mimalloc pages +// ------------------------------------------------------ + +typedef enum mi_page_kind_e { + MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment + MI_PAGE_MEDIUM, // medium blocks go into 512KiB pages inside a segment + MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment + MI_PAGE_HUGE // a huge page is a single page in a segment of variable size + // used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or an aligment `> MI_BLOCK_ALIGNMENT_MAX`. +} mi_page_kind_t; + +typedef enum mi_segment_kind_e { + MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside. + MI_SEGMENT_HUGE, // segment with just one huge page inside. +} mi_segment_kind_t; + +// ------------------------------------------------------ +// A segment holds a commit mask where a bit is set if +// the corresponding MI_COMMIT_SIZE area is committed. +// The MI_COMMIT_SIZE must be a multiple of the slice +// size. If it is equal we have the most fine grained +// decommit (but setting it higher can be more efficient). +// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will +// be committed in one go which can be set higher than +// MI_COMMIT_SIZE for efficiency (while the decommit mask +// is still tracked in fine-grained MI_COMMIT_SIZE chunks) +// ------------------------------------------------------ + +#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE) +#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB +#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE) +#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS +#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS) + +#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS)) +#error "the segment size must be exactly divisible by the (commit size * size_t bits)" +#endif + +typedef struct mi_commit_mask_s { + size_t mask[MI_COMMIT_MASK_FIELD_COUNT]; +} mi_commit_mask_t; + +typedef mi_page_t mi_slice_t; +typedef int64_t mi_msecs_t; + + +// --------------------------------------------------------------- +// a memory id tracks the provenance of arena/OS allocated memory +// --------------------------------------------------------------- + +// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this. +typedef enum mi_memkind_e { + MI_MEM_NONE, // not allocated + MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example) + MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example) + MI_MEM_OS, // allocated from the OS + MI_MEM_OS_HUGE, // allocated as huge OS pages (usually 1GiB, pinned to physical memory) + MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`) + MI_MEM_ARENA // allocated from an arena (the usual case) +} mi_memkind_t; + +static inline bool mi_memkind_is_os(mi_memkind_t memkind) { + return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP); +} + +typedef struct mi_memid_os_info { + void* base; // actual base address of the block (used for offset aligned allocations) + size_t alignment; // alignment at allocation +} mi_memid_os_info_t; + +typedef struct mi_memid_arena_info { + size_t block_index; // index in the arena + mi_arena_id_t id; // arena id (>= 1) + bool is_exclusive; // this arena can only be used for specific arena allocations +} mi_memid_arena_info_t; + +typedef struct mi_memid_s { + union { + mi_memid_os_info_t os; // only used for MI_MEM_OS + mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA + } mem; + bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large (2Mib) or huge (1GiB) OS pages) + bool initially_committed;// `true` if the memory was originally allocated as committed + bool initially_zero; // `true` if the memory was originally zero initialized + mi_memkind_t memkind; +} mi_memid_t; + + +// ----------------------------------------------------------------------------------------- +// Segments are large allocated memory blocks (8mb on 64 bit) from arenas or the OS. +// +// Inside segments we allocated fixed size mimalloc pages (`mi_page_t`) that contain blocks. +// The start of a segment is this structure with a fixed number of slice entries (`slices`) +// usually followed by a guard OS page and the actual allocation area with pages. +// While a page is not allocated, we view it's data as a `mi_slice_t` (instead of a `mi_page_t`). +// Of any free area, the first slice has the info and `slice_offset == 0`; for any subsequent +// slices part of the area, the `slice_offset` is the byte offset back to the first slice +// (so we can quickly find the page info on a free, `internal.h:_mi_segment_page_of`). +// For slices, the `block_size` field is repurposed to signify if a slice is used (`1`) or not (`0`). +// Small and medium pages use a fixed amount of slices to reduce slice fragmentation, while +// large and huge pages span a variable amount of slices. +typedef struct mi_segment_s { + // constant fields + mi_memid_t memid; // memory id for arena/OS allocation + bool allow_decommit; // can we decommmit the memory + bool allow_purge; // can we purge the memory (reset or decommit) + size_t segment_size; + + // segment fields + mi_msecs_t purge_expire; // purge slices in the `purge_mask` after this time + mi_commit_mask_t purge_mask; // slices that can be purged + mi_commit_mask_t commit_mask; // slices that are currently committed + + // from here is zero initialized + struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`) + bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation) + + size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`) + size_t abandoned_visits; // count how often this segment is visited during abondoned reclamation (to force reclaim if it takes too long) + size_t used; // count of pages in use + uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie` + + size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT` + size_t segment_info_slices; // initial count of slices that we are using for segment info and possible guard pages. + + // layout like this to optimize access in `mi_free` + mi_segment_kind_t kind; + size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT` + _Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment + + mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one extra final entry for huge blocks with large alignment +} mi_segment_t; + + +// ------------------------------------------------------ +// Heaps +// Provide first-class heaps to allocate from. +// A heap just owns a set of pages for allocation and +// can only be allocate/reallocate from the thread that created it. +// Freeing blocks can be done from any thread though. +// Per thread, the segments are shared among its heaps. +// Per thread, there is always a default heap that is +// used for allocation; it is initialized to statically +// point to an empty heap to avoid initialization checks +// in the fast path. +// ------------------------------------------------------ + +// Thread local data +typedef struct mi_tld_s mi_tld_t; + +// Pages of a certain block size are held in a queue. +typedef struct mi_page_queue_s { + mi_page_t* first; + mi_page_t* last; + size_t block_size; +} mi_page_queue_t; + +#define MI_BIN_FULL (MI_BIN_HUGE+1) + +// Random context +typedef struct mi_random_cxt_s { + uint32_t input[16]; + uint32_t output[16]; + int output_available; + bool weak; +} mi_random_ctx_t; + + +// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows +#if (MI_PADDING) +typedef struct mi_padding_s { + uint32_t canary; // encoded block value to check validity of the padding (in case of overflow) + uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes) +} mi_padding_t; +#define MI_PADDING_SIZE (sizeof(mi_padding_t)) +#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE) +#else +#define MI_PADDING_SIZE 0 +#define MI_PADDING_WSIZE 0 +#endif + +#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1) + + +// A heap owns a set of pages. +struct mi_heap_s { + mi_tld_t* tld; + _Atomic(mi_block_t*) thread_delayed_free; + mi_threadid_t thread_id; // thread this heap belongs too + mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0) + uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`) + uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list + mi_random_ctx_t random; // random number context used for secure allocation + size_t page_count; // total number of pages in the `pages` queues. + size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues) + size_t page_retired_max; // largest retired index into the `pages` array. + mi_heap_t* next; // list of heaps per thread + bool no_reclaim; // `true` if this heap should not reclaim abandoned pages + uint8_t tag; // custom tag, can be used for separating heaps based on the object types + mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size. + mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin") +}; + + + +// ------------------------------------------------------ +// Debug +// ------------------------------------------------------ + +#if !defined(MI_DEBUG_UNINIT) +#define MI_DEBUG_UNINIT (0xD0) +#endif +#if !defined(MI_DEBUG_FREED) +#define MI_DEBUG_FREED (0xDF) +#endif +#if !defined(MI_DEBUG_PADDING) +#define MI_DEBUG_PADDING (0xDE) +#endif + +#if (MI_DEBUG) +// use our own assertion to print without memory allocation +void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func ); +#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__)) +#else +#define mi_assert(x) +#endif + +#if (MI_DEBUG>1) +#define mi_assert_internal mi_assert +#else +#define mi_assert_internal(x) +#endif + +#if (MI_DEBUG>2) +#define mi_assert_expensive mi_assert +#else +#define mi_assert_expensive(x) +#endif + +// ------------------------------------------------------ +// Statistics +// ------------------------------------------------------ + +#ifndef MI_STAT +#if (MI_DEBUG>0) +#define MI_STAT 2 +#else +#define MI_STAT 0 +#endif +#endif + +typedef struct mi_stat_count_s { + int64_t allocated; + int64_t freed; + int64_t peak; + int64_t current; +} mi_stat_count_t; + +typedef struct mi_stat_counter_s { + int64_t total; + int64_t count; +} mi_stat_counter_t; + +typedef struct mi_stats_s { + mi_stat_count_t segments; + mi_stat_count_t pages; + mi_stat_count_t reserved; + mi_stat_count_t committed; + mi_stat_count_t reset; + mi_stat_count_t purged; + mi_stat_count_t page_committed; + mi_stat_count_t segments_abandoned; + mi_stat_count_t pages_abandoned; + mi_stat_count_t threads; + mi_stat_count_t normal; + mi_stat_count_t huge; + mi_stat_count_t large; + mi_stat_count_t malloc; + mi_stat_count_t segments_cache; + mi_stat_counter_t pages_extended; + mi_stat_counter_t mmap_calls; + mi_stat_counter_t commit_calls; + mi_stat_counter_t reset_calls; + mi_stat_counter_t purge_calls; + mi_stat_counter_t page_no_retire; + mi_stat_counter_t searches; + mi_stat_counter_t normal_count; + mi_stat_counter_t huge_count; + mi_stat_counter_t large_count; + mi_stat_counter_t arena_count; + mi_stat_counter_t arena_crossover_count; + mi_stat_counter_t arena_rollback_count; +#if MI_STAT>1 + mi_stat_count_t normal_bins[MI_BIN_HUGE+1]; +#endif +} mi_stats_t; + + +void _mi_stat_increase(mi_stat_count_t* stat, size_t amount); +void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount); +void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount); + +#if (MI_STAT) +#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount) +#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount) +#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount) +#else +#define mi_stat_increase(stat,amount) (void)0 +#define mi_stat_decrease(stat,amount) (void)0 +#define mi_stat_counter_increase(stat,amount) (void)0 +#endif + +#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount) +#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount) +#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount) + + +// ------------------------------------------------------ +// Thread Local data +// ------------------------------------------------------ + +// A "span" is is an available range of slices. The span queues keep +// track of slice spans of at most the given `slice_count` (but more than the previous size class). +typedef struct mi_span_queue_s { + mi_slice_t* first; + mi_slice_t* last; + size_t slice_count; +} mi_span_queue_t; + +#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT) + +// OS thread local data +typedef struct mi_os_tld_s { + size_t region_idx; // start point for next allocation + mi_stats_t* stats; // points to tld stats +} mi_os_tld_t; + + +// Segments thread local data +typedef struct mi_segments_tld_s { + mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments + size_t count; // current number of segments; + size_t peak_count; // peak number of segments + size_t current_size; // current size of all segments + size_t peak_size; // peak size of all segments + size_t reclaim_count;// number of reclaimed (abandoned) segments + mi_stats_t* stats; // points to tld stats + mi_os_tld_t* os; // points to os stats +} mi_segments_tld_t; + +// Thread local data +struct mi_tld_s { + unsigned long long heartbeat; // monotonic heartbeat count + bool recurse; // true if deferred was called; used to prevent infinite recursion. + mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted) + mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates) + mi_segments_tld_t segments; // segment tld + mi_os_tld_t os; // os tld + mi_stats_t stats; // statistics +}; + +#endif diff --git a/yass/third_party/mimalloc/mimalloc.pc.in b/yass/third_party/mimalloc/mimalloc.pc.in new file mode 100644 index 0000000000..36da203884 --- /dev/null +++ b/yass/third_party/mimalloc/mimalloc.pc.in @@ -0,0 +1,11 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +libdir=@libdir_for_pc_file@ +includedir=@includedir_for_pc_file@ + +Name: @PROJECT_NAME@ +Description: A compact general purpose allocator with excellent performance +Version: @PACKAGE_VERSION@ +URL: https://github.com/microsoft/mimalloc/ +Libs: -L${libdir} -lmimalloc +Libs.private: @pc_libraries@ +Cflags: -I${includedir} diff --git a/yass/third_party/mimalloc/readme.md b/yass/third_party/mimalloc/readme.md new file mode 100644 index 0000000000..58f6d5ccb2 --- /dev/null +++ b/yass/third_party/mimalloc/readme.md @@ -0,0 +1,861 @@ + + + +[](https://dev.azure.com/Daan0324/mimalloc/_build?definitionId=1&_a=summary) + +# mimalloc + +  + +mimalloc (pronounced "me-malloc") +is a general purpose allocator with excellent [performance](#performance) characteristics. +Initially developed by Daan Leijen for the runtime systems of the +[Koka](https://koka-lang.github.io) and [Lean](https://github.com/leanprover/lean) languages. + +Latest release tag: `v2.1.7` (2024-05-21). +Latest v1 tag: `v1.8.7` (2024-05-21). + +mimalloc is a drop-in replacement for `malloc` and can be used in other programs +without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as: +``` +> LD_PRELOAD=/usr/lib/libmimalloc.so myprogram +``` +It also includes a robust way to override the default allocator in [Windows](#override_on_windows). Notable aspects of the design include: + +- __small and consistent__: the library is about 8k LOC using simple and + consistent data structures. This makes it very suitable + to integrate and adapt in other projects. For runtime systems it + provides hooks for a monotonic _heartbeat_ and deferred freeing (for + bounded worst-case times with reference counting). + Partly due to its simplicity, mimalloc has been ported to many systems (Windows, macOS, + Linux, WASM, various BSD's, Haiku, MUSL, etc) and has excellent support for dynamic overriding. + At the same time, it is an industrial strength allocator that runs (very) large scale + distributed services on thousands of machines with excellent worst case latencies. +- __free list sharding__: instead of one big free list (per size class) we have + many smaller lists per "mimalloc page" which reduces fragmentation and + increases locality -- + things that are allocated close in time get allocated close in memory. + (A mimalloc page contains blocks of one size class and is usually 64KiB on a 64-bit system). +- __free list multi-sharding__: the big idea! Not only do we shard the free list + per mimalloc page, but for each page we have multiple free lists. In particular, there + is one list for thread-local `free` operations, and another one for concurrent `free` + operations. Free-ing from another thread can now be a single CAS without needing + sophisticated coordination between threads. Since there will be + thousands of separate free lists, contention is naturally distributed over the heap, + and the chance of contending on a single location will be low -- this is quite + similar to randomized algorithms like skip lists where adding + a random oracle removes the need for a more complex algorithm. +- __eager page purging__: when a "page" becomes empty (with increased chance + due to free list sharding) the memory is marked to the OS as unused (reset or decommitted) + reducing (real) memory pressure and fragmentation, especially in long running + programs. +- __secure__: _mimalloc_ can be built in secure mode, adding guard pages, + randomized allocation, encrypted free lists, etc. to protect against various + heap vulnerabilities. The performance penalty is usually around 10% on average + over our benchmarks. +- __first-class heaps__: efficiently create and use multiple heaps to allocate across different regions. + A heap can be destroyed at once instead of deallocating each object separately. +- __bounded__: it does not suffer from _blowup_ \[1\], has bounded worst-case allocation + times (_wcat_) (upto OS primitives), bounded space overhead (~0.2% meta-data, with low + internal fragmentation), and has no internal points of contention using only atomic operations. +- __fast__: In our benchmarks (see [below](#performance)), + _mimalloc_ outperforms other leading allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc), + and often uses less memory. A nice property is that it does consistently well over a wide range + of benchmarks. There is also good huge OS page support for larger server programs. + +The [documentation](https://microsoft.github.io/mimalloc) gives a full overview of the API. +You can read more on the design of _mimalloc_ in the [technical report](https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action) which also has detailed benchmark results. + +Enjoy! + +### Branches + +* `master`: latest stable release (based on `dev-slice`). +* `dev`: development branch for mimalloc v1. Use this branch for submitting PR's. +* `dev-slice`: development branch for mimalloc v2. This branch is downstream of `dev` (and is essentially equal to `dev` except for +`src/segment.c`) + +### Releases + +Note: the `v2.x` version has a different algorithm for managing internal mimalloc pages (as slices) that tends to use reduce +memory usage + and fragmentation compared to mimalloc `v1.x` (especially for large workloads). Should otherwise have similar performance + (see [below](#performance)); please report if you observe any significant performance regression. + +* 2024-05-21, `v1.8.7`, `v2.1.7`: Fix build issues on less common platforms. Started upstreaming patches + from the CPython [integration](https://github.com/python/cpython/issues/113141#issuecomment-2119255217). Upstream `vcpkg` patches. +* 2024-05-13, `v1.8.6`, `v2.1.6`: Fix build errors on various (older) platforms. Refactored aligned allocation. +* 2024-04-22, `v1.8.4`, `v2.1.4`: Fixes various bugs and build issues. Add `MI_LIBC_MUSL` cmake flag for musl builds. + Free-ing code is refactored into a separate module (`free.c`). Mimalloc page info is simplified with the block size + directly available (and new `block_size_shift` to improve aligned block free-ing). + New approach to collection of abandoned segments: When + a thread terminates the segments it owns are abandoned (containing still live objects) and these can be + reclaimed by other threads. We no longer use a list of abandoned segments but this is now done using bitmaps in arena's + which is more concurrent (and more aggressive). Abandoned memory can now also be reclaimed if a thread frees an object in + an abandoned page (which can be disabled using `mi_option_abandoned_reclaim_on_free`). The option `mi_option_max_segment_reclaim` + gives a maximum percentage of abandoned segments that can be reclaimed per try (=10%). + +* 2023-04-24, `v1.8.2`, `v2.1.2`: Fixes build issues on freeBSD, musl, and C17 (UE 5.1.1). Reduce code size/complexity + by removing regions and segment-cache's and only use arenas with improved memory purging -- this may improve memory + usage as well for larger services. Renamed options for consistency. Improved Valgrind and ASAN checking. + +* 2023-04-03, `v1.8.1`, `v2.1.1`: Fixes build issues on some platforms. + +* 2023-03-29, `v1.8.0`, `v2.1.0`: Improved support dynamic overriding on Windows 11. Improved tracing precision + with [asan](#asan) and [Valgrind](#valgrind), and added Windows event tracing [ETW](#ETW) (contributed by Xinglong He). Created an OS + abstraction layer to make it easier to port and separate platform dependent code (in `src/prim`). Fixed C++ STL compilation on older Microsoft C++ compilers, and various small bug fixes. + +* 2022-12-23, `v1.7.9`, `v2.0.9`: Supports building with [asan](#asan) and improved [Valgrind](#valgrind) support. + Support arbitrary large alignments (in particular for `std::pmr` pools). + Added C++ STL allocators attached to a specific heap (thanks @vmarkovtsev). + Heap walks now visit all object (including huge objects). Support Windows nano server containers (by Johannes Schindelin,@dscho). Various small bug fixes. + +* 2022-11-03, `v1.7.7`, `v2.0.7`: Initial support for [Valgrind](#valgrind) for leak testing and heap block overflow + detection. Initial + support for attaching heaps to a speficic memory area (only in v2). Fix `realloc` behavior for zero size blocks, remove restriction to integral multiple of the alignment in `alloc_align`, improved aligned allocation performance, reduced contention with many threads on few processors (thank you @dposluns!), vs2022 support, support `pkg-config`, . + +* 2022-04-14, `v1.7.6`, `v2.0.6`: fix fallback path for aligned OS allocation on Windows, improve Windows aligned allocation + even when compiling with older SDK's, fix dynamic overriding on macOS Monterey, fix MSVC C++ dynamic overriding, fix + warnings under Clang 14, improve performance if many OS threads are created and destroyed, fix statistics for large object + allocations, using MIMALLOC_VERBOSE=1 has no maximum on the number of error messages, various small fixes. + +* 2022-02-14, `v1.7.5`, `v2.0.5` (alpha): fix malloc override on + Windows 11, fix compilation with musl, potentially reduced + committed memory, add `bin/minject` for Windows, + improved wasm support, faster aligned allocation, + various small fixes. + +* [Older release notes](#older-release-notes) + +Special thanks to: + +* [David Carlier](https://devnexen.blogspot.com/) (@devnexen) for his many contributions, and making + mimalloc work better on many less common operating systems, like Haiku, Dragonfly, etc. +* Mary Feofanova (@mary3000), Evgeniy Moiseenko, and Manuel Pöter (@mpoeter) for making mimalloc TSAN checkable, and finding + memory model bugs using the [genMC] model checker. +* Weipeng Liu (@pongba), Zhuowei Li, Junhua Wang, and Jakub Szymanski, for their early support of mimalloc and deployment + at large scale services, leading to many improvements in the mimalloc algorithms for large workloads. +* Jason Gibson (@jasongibson) for exhaustive testing on large scale workloads and server environments, and finding complex bugs + in (early versions of) `mimalloc`. +* Manuel Pöter (@mpoeter) and Sam Gross(@colesbury) for finding an ABA concurrency issue in abandoned segment reclamation. Sam also created the [no GIL](https://github.com/colesbury/nogil) Python fork which + uses mimalloc internally. + + +[genMC]: https://plv.mpi-sws.org/genmc/ + +### Usage + +mimalloc is used in various large scale low-latency services and programs, for example: + + + + + + + + +# Building + +## Windows + +Open `ide/vs2022/mimalloc.sln` in Visual Studio 2022 and build. +The `mimalloc` project builds a static library (in `out/msvc-x64`), while the +`mimalloc-override` project builds a DLL for overriding malloc +in the entire program. + +## macOS, Linux, BSD, etc. + +We use [`cmake`](https://cmake.org)1 as the build system: + +``` +> mkdir -p out/release +> cd out/release +> cmake ../.. +> make +``` +This builds the library as a shared (dynamic) +library (`.so` or `.dylib`), a static library (`.a`), and +as a single object file (`.o`). + +`> sudo make install` (install the library and header files in `/usr/local/lib` and `/usr/local/include`) + +You can build the debug version which does many internal checks and +maintains detailed statistics as: + +``` +> mkdir -p out/debug +> cd out/debug +> cmake -DCMAKE_BUILD_TYPE=Debug ../.. +> make +``` +This will name the shared library as `libmimalloc-debug.so`. + +Finally, you can build a _secure_ version that uses guard pages, encrypted +free lists, etc., as: +``` +> mkdir -p out/secure +> cd out/secure +> cmake -DMI_SECURE=ON ../.. +> make +``` +This will name the shared library as `libmimalloc-secure.so`. +Use `ccmake`2 instead of `cmake` +to see and customize all the available build options. + +Notes: +1. Install CMake: `sudo apt-get install cmake` +2. Install CCMake: `sudo apt-get install cmake-curses-gui` + + +## Single source + +You can also directly build the single `src/static.c` file as part of your project without +needing `cmake` at all. Make sure to also add the mimalloc `include` directory to the include path. + + +# Using the library + +The preferred usage is including ``, linking with +the shared- or static library, and using the `mi_malloc` API exclusively for allocation. For example, +``` +> gcc -o myprogram -lmimalloc myfile.c +``` + +mimalloc uses only safe OS calls (`mmap` and `VirtualAlloc`) and can co-exist +with other allocators linked to the same program. +If you use `cmake`, you can simply use: +``` +find_package(mimalloc 1.4 REQUIRED) +``` +in your `CMakeLists.txt` to find a locally installed mimalloc. Then use either: +``` +target_link_libraries(myapp PUBLIC mimalloc) +``` +to link with the shared (dynamic) library, or: +``` +target_link_libraries(myapp PUBLIC mimalloc-static) +``` +to link with the static library. See `test\CMakeLists.txt` for an example. + +For best performance in C++ programs, it is also recommended to override the +global `new` and `delete` operators. For convenience, mimalloc provides +[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. +In C++, mimalloc also provides the `mi_stl_allocator` struct which implements the `std::allocator` +interface. + +You can pass environment variables to print verbose messages (`MIMALLOC_VERBOSE=1`) +and statistics (`MIMALLOC_SHOW_STATS=1`) (in the debug version): +``` +> env MIMALLOC_SHOW_STATS=1 ./cfrac 175451865205073170563711388363 + +175451865205073170563711388363 = 374456281610909315237213 * 468551 + +heap stats: peak total freed unit +normal 2: 16.4 kb 17.5 mb 17.5 mb 16 b ok +normal 3: 16.3 kb 15.2 mb 15.2 mb 24 b ok +normal 4: 64 b 4.6 kb 4.6 kb 32 b ok +normal 5: 80 b 118.4 kb 118.4 kb 40 b ok +normal 6: 48 b 48 b 48 b 48 b ok +normal 17: 960 b 960 b 960 b 320 b ok + +heap stats: peak total freed unit + normal: 33.9 kb 32.8 mb 32.8 mb 1 b ok + huge: 0 b 0 b 0 b 1 b ok + total: 33.9 kb 32.8 mb 32.8 mb 1 b ok +malloc requested: 32.8 mb + + committed: 58.2 kb 58.2 kb 58.2 kb 1 b ok + reserved: 2.0 mb 2.0 mb 2.0 mb 1 b ok + reset: 0 b 0 b 0 b 1 b ok + segments: 1 1 1 +-abandoned: 0 + pages: 6 6 6 +-abandoned: 0 + mmaps: 3 + mmap fast: 0 + mmap slow: 1 + threads: 0 + elapsed: 2.022s + process: user: 1.781s, system: 0.016s, faults: 756, reclaims: 0, rss: 2.7 mb +``` + +The above model of using the `mi_` prefixed API is not always possible +though in existing programs that already use the standard malloc interface, +and another option is to override the standard malloc interface +completely and redirect all calls to the _mimalloc_ library instead . + +## Environment Options + +You can set further options either programmatically (using [`mi_option_set`](https://microsoft.github.io/mimalloc/group__options.html)), or via environment variables: + +- `MIMALLOC_SHOW_STATS=1`: show statistics when the program terminates. +- `MIMALLOC_VERBOSE=1`: show verbose messages. +- `MIMALLOC_SHOW_ERRORS=1`: show error and warning messages. + +Advanced options: + +- `MIMALLOC_ARENA_EAGER_COMMIT=2`: turns on eager commit for the large arenas (usually 1GiB) from which mimalloc + allocates segments and pages. Set this to 2 (default) to + only enable this on overcommit systems (e.g. Linux). Set this to 1 to enable explicitly on other systems + as well (like Windows or macOS) which may improve performance (as the whole arena is committed at once). + Note that eager commit only increases the commit but not the actual the peak resident set + (rss) so it is generally ok to enable this. +- `MIMALLOC_PURGE_DELAY=N`: the delay in `N` milli-seconds (by default `10`) after which mimalloc will purge + OS pages that are not in use. This signals to the OS that the underlying physical memory can be reused which + can reduce memory fragmentation especially in long running (server) programs. Setting `N` to `0` purges immediately when + a page becomes unused which can improve memory usage but also decreases performance. Setting `N` to a higher + value like `100` can improve performance (sometimes by a lot) at the cost of potentially using more memory at times. + Setting it to `-1` disables purging completely. +- `MIMALLOC_PURGE_DECOMMITS=1`: By default "purging" memory means unused memory is decommitted (`MEM_DECOMMIT` on Windows, + `MADV_DONTNEED` (which decresease rss immediately) on `mmap` systems). Set this to 0 to instead "reset" unused + memory on a purge (`MEM_RESET` on Windows, generally `MADV_FREE` (which does not decrease rss immediately) on `mmap` systems). + Mimalloc generally does not "free" OS memory but only "purges" OS memory, in other words, it tries to keep virtual + address ranges and decommits within those ranges (to make the underlying physical memory available to other processes). + +Further options for large workloads and services: + +- `MIMALLOC_USE_NUMA_NODES=N`: pretend there are at most `N` NUMA nodes. If not set, the actual NUMA nodes are detected + at runtime. Setting `N` to 1 may avoid problems in some virtual environments. Also, setting it to a lower number than + the actual NUMA nodes is fine and will only cause threads to potentially allocate more memory across actual NUMA + nodes (but this can happen in any case as NUMA local allocation is always a best effort but not guaranteed). +- `MIMALLOC_ALLOW_LARGE_OS_PAGES=1`: use large OS pages (2 or 4MiB) when available; for some workloads this can significantly + improve performance. When this option is disabled, it also disables transparent huge pages (THP) for the process + (on Linux and Android). Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs + to explicitly give permissions for large OS pages (as on [Windows][windows-huge] and [Linux][linux-huge]). However, sometimes + the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that + can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead whenever possible). +- `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where `N` is the number of 1GiB _huge_ OS pages. This reserves the huge pages at + startup and sometimes this can give a large (latency) performance improvement on big workloads. + Usually it is better to not use `MIMALLOC_ALLOW_LARGE_OS_PAGES=1` in combination with this setting. Just like large + OS pages, use with care as reserving + contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at + startup only once). + Note that we usually need to explicitly give permission for huge OS pages (as on [Windows][windows-huge] and [Linux][linux-huge])). + With huge OS pages, it may be beneficial to set the setting + `MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB) + of a thread to not allocate in the huge OS pages; this prevents threads that are short lived + and allocate just a little to take up space in the huge OS page area (which cannot be purged as huge OS pages are pinned + to physical memory). + The huge pages are usually allocated evenly among NUMA nodes. + We can use `MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N` where `N` is the numa node (starting at 0) to allocate all + the huge pages at a specific numa node instead. + +Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write +for all pages in the original process including the huge OS pages. When any memory is now written in that area, the +OS will copy the entire 1GiB huge page (or 2MiB large page) which can cause the memory usage to grow in large increments. + +[linux-huge]: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/tuning_and_optimizing_red_hat_enterprise_linux_for_oracle_9i_and_10g_databases/sect-oracle_9i_and_10g_tuning_guide-large_memory_optimization_big_pages_and_huge_pages-configuring_huge_pages_in_red_hat_enterprise_linux_4_or_5 +[windows-huge]: https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/enable-the-lock-pages-in-memory-option-windows?view=sql-server-2017 + +## Secure Mode + +_mimalloc_ can be build in secure mode by using the `-DMI_SECURE=ON` flags in `cmake`. This build enables various mitigations +to make mimalloc more robust against exploits. In particular: + +- All internal mimalloc pages are surrounded by guard pages and the heap metadata is behind a guard page as well (so a buffer overflow + exploit cannot reach into the metadata). +- All free list pointers are + [encoded](https://github.com/microsoft/mimalloc/blob/783e3377f79ee82af43a0793910a9f2d01ac7863/include/mimalloc-internal.h#L396) + with per-page keys which is used both to prevent overwrites with a known pointer, as well as to detect heap corruption. +- Double free's are detected (and ignored). +- The free lists are initialized in a random order and allocation randomly chooses between extension and reuse within a page to + mitigate against attacks that rely on a predicable allocation order. Similarly, the larger heap blocks allocated by mimalloc + from the OS are also address randomized. + +As always, evaluate with care as part of an overall security strategy as all of the above are mitigations but not guarantees. + +## Debug Mode + +When _mimalloc_ is built using debug mode, various checks are done at runtime to catch development errors. + +- Statistics are maintained in detail for each object size. They can be shown using `MIMALLOC_SHOW_STATS=1` at runtime. +- All objects have padding at the end to detect (byte precise) heap block overflows. +- Double free's, and freeing invalid heap pointers are detected. +- Corrupted free-lists and some forms of use-after-free are detected. + + +# Overriding Standard Malloc + +Overriding the standard `malloc` (and `new`) can be done either _dynamically_ or _statically_. + +## Dynamic override + +This is the recommended way to override the standard malloc interface. + +### Dynamic Override on Linux, BSD + +On these ELF-based systems we preload the mimalloc shared +library so all calls to the standard `malloc` interface are +resolved to the _mimalloc_ library. +``` +> env LD_PRELOAD=/usr/lib/libmimalloc.so myprogram +``` + +You can set extra environment variables to check that mimalloc is running, +like: +``` +> env MIMALLOC_VERBOSE=1 LD_PRELOAD=/usr/lib/libmimalloc.so myprogram +``` +or run with the debug version to get detailed statistics: +``` +> env MIMALLOC_SHOW_STATS=1 LD_PRELOAD=/usr/lib/libmimalloc-debug.so myprogram +``` + +### Dynamic Override on MacOS + +On macOS we can also preload the mimalloc shared +library so all calls to the standard `malloc` interface are +resolved to the _mimalloc_ library. +``` +> env DYLD_INSERT_LIBRARIES=/usr/lib/libmimalloc.dylib myprogram +``` + +Note that certain security restrictions may apply when doing this from +the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-ignored-when-calling-application-through-bash). + + +### Dynamic Override on Windows + +Dynamically overriding on mimalloc on Windows +is robust and has the particular advantage to be able to redirect all malloc/free calls that go through +the (dynamic) C runtime allocator, including those from other DLL's or libraries. +As it intercepts all allocation calls on a low level, it can be used reliably +on large programs that include other 3rd party components. +There are four requirements to make the overriding work robustly: + +1. Use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). +2. Link your program explicitly with `mimalloc-override.dll` library. + To ensure the `mimalloc-override.dll` is loaded at run-time it is easiest to insert some + call to the mimalloc API in the `main` function, like `mi_version()` + (or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project + for an example on how to use this. +3. The [`mimalloc-redirect.dll`](bin) (or `mimalloc-redirect32.dll`) must be put + in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency of that DLL). + The redirection DLL ensures that all calls to the C runtime malloc API get redirected to + mimalloc functions (which reside in `mimalloc-override.dll`). +4. Ensure the `mimalloc-override.dll` comes as early as possible in the import + list of the final executable (so it can intercept all potential allocations). + +For best performance on Windows with C++, it +is also recommended to also override the `new`/`delete` operations (by including +[`mimalloc-new-delete.h`](include/mimalloc-new-delete.h) +a single(!) source file in your project). + +The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic +overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully redirected. + +We cannot always re-link an executable with `mimalloc-override.dll`, and similarly, we cannot always +ensure the the DLL comes first in the import table of the final executable. +In many cases though we can patch existing executables without any recompilation +if they are linked with the dynamic C runtime (`ucrtbase.dll`) -- just put the `mimalloc-override.dll` +into the import table (and put `mimalloc-redirect.dll` in the same folder) +Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388) or +the [`minject`](bin) program. + +## Static override + +On Unix-like systems, you can also statically link with _mimalloc_ to override the standard +malloc interface. The recommended way is to link the final program with the +_mimalloc_ single object file (`mimalloc.o`). We use +an object file instead of a library file as linkers give preference to +that over archives to resolve symbols. To ensure that the standard +malloc interface resolves to the _mimalloc_ library, link it as the first +object file. For example: +``` +> gcc -o myprogram mimalloc.o myfile1.c ... +``` + +Another way to override statically that works on all platforms, is to +link statically to mimalloc (as shown in the introduction) and include a +header file in each source file that re-defines `malloc` etc. to `mi_malloc`. +This is provided by [`mimalloc-override.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-override.h). This only works reliably though if all sources are +under your control or otherwise mixing of pointers from different heaps may occur! + + +# Tools + +Generally, we recommend using the standard allocator with memory tracking tools, but mimalloc +can also be build to support the [address sanitizer][asan] or the excellent [Valgrind] tool. +Moreover, it can be build to support Windows event tracing ([ETW]). +This has a small performance overhead but does allow detecting memory leaks and byte-precise +buffer overflows directly on final executables. See also the `test/test-wrong.c` file to test with various tools. + +## Valgrind + +To build with [valgrind] support, use the `MI_TRACK_VALGRIND=ON` cmake option: + +``` +> cmake ../.. -DMI_TRACK_VALGRIND=ON +``` + +This can also be combined with secure mode or debug mode. +You can then run your programs directly under valgrind: + +``` +> valgrind +``` + +If you rely on overriding `malloc`/`free` by mimalloc (instead of using the `mi_malloc`/`mi_free` API directly), +you also need to tell `valgrind` to not intercept those calls itself, and use: + +``` +> MIMALLOC_SHOW_STATS=1 valgrind --soname-synonyms=somalloc=*mimalloc* -- +``` + +By setting the `MIMALLOC_SHOW_STATS` environment variable you can check that mimalloc is indeed +used and not the standard allocator. Even though the [Valgrind option][valgrind-soname] +is called `--soname-synonyms`, this also +works when overriding with a static library or object file. Unfortunately, it is not possible to +dynamically override mimalloc using `LD_PRELOAD` together with `valgrind`. +See also the `test/test-wrong.c` file to test with `valgrind`. + +Valgrind support is in its initial development -- please report any issues. + +[Valgrind]: https://valgrind.org/ +[valgrind-soname]: https://valgrind.org/docs/manual/manual-core.html#opt.soname-synonyms + +## ASAN + +To build with the address sanitizer, use the `-DMI_TRACK_ASAN=ON` cmake option: + +``` +> cmake ../.. -DMI_TRACK_ASAN=ON +``` + +This can also be combined with secure mode or debug mode. +You can then run your programs as:' + +``` +> ASAN_OPTIONS=verbosity=1 +``` + +When you link a program with an address sanitizer build of mimalloc, you should +generally compile that program too with the address sanitizer enabled. +For example, assuming you build mimalloc in `out/debug`: + +``` +clang -g -o test-wrong -Iinclude test/test-wrong.c out/debug/libmimalloc-asan-debug.a -lpthread -fsanitize=address -fsanitize-recover=address +``` + +Since the address sanitizer redirects the standard allocation functions, on some platforms (macOSX for example) +it is required to compile mimalloc with `-DMI_OVERRIDE=OFF`. +Adress sanitizer support is in its initial development -- please report any issues. + +[asan]: https://github.com/google/sanitizers/wiki/AddressSanitizer + +## ETW + +Event tracing for Windows ([ETW]) provides a high performance way to capture all allocations though +mimalloc and analyze them later. To build with ETW support, use the `-DMI_TRACK_ETW=ON` cmake option. + +You can then capture an allocation trace using the Windows performance recorder (WPR), using the +`src/prim/windows/etw-mimalloc.wprp` profile. In an admin prompt, you can use: +``` +> wpr -start src\prim\windows\etw-mimalloc.wprp -filemode +> +> wpr -stop .etl +``` +and then open `.etl` in the Windows Performance Analyzer (WPA), or +use a tool like [TraceControl] that is specialized for analyzing mimalloc traces. + +[ETW]: https://learn.microsoft.com/en-us/windows-hardware/test/wpt/event-tracing-for-windows +[TraceControl]: https://github.com/xinglonghe/TraceControl + + +# Performance + +Last update: 2021-01-30 + +We tested _mimalloc_ against many other top allocators over a wide +range of benchmarks, ranging from various real world programs to +synthetic benchmarks that see how the allocator behaves under more +extreme circumstances. In our benchmark suite, _mimalloc_ outperforms other leading +allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc), and has a similar memory footprint. A nice property is that it +does consistently well over the wide range of benchmarks. + +General memory allocators are interesting as there exists no algorithm that is +optimal -- for a given allocator one can usually construct a workload +where it does not do so well. The goal is thus to find an allocation +strategy that performs well over a wide range of benchmarks without +suffering from (too much) underperformance in less common situations. + +As always, interpret these results with care since some benchmarks test synthetic +or uncommon situations that may never apply to your workloads. For example, most +allocators do not do well on `xmalloc-testN` but that includes even the best +industrial allocators like _jemalloc_ and _tcmalloc_ that are used in some of +the world's largest systems (like Chrome or FreeBSD). + +Also, the benchmarks here do not measure the behaviour on very large and long-running server workloads, +or worst-case latencies of allocation. Much work has gone into `mimalloc` to work well on such +workloads (for example, to reduce virtual memory fragmentation on long-running services) +but such optimizations are not always reflected in the current benchmark suite. + +We show here only an overview -- for +more specific details and further benchmarks we refer to the +[technical report](https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action). +The benchmark suite is automated and available separately +as [mimalloc-bench](https://github.com/daanx/mimalloc-bench). + + +## Benchmark Results on a 16-core AMD 5950x (Zen3) + +Testing on the 16-core AMD 5950x processor at 3.4Ghz (4.9Ghz boost), with +with 32GiB memory at 3600Mhz, running Ubuntu 20.04 with glibc 2.31 and GCC 9.3.0. + +We measure three versions of _mimalloc_: the main version `mi` (tag:v1.7.0), +the new v2.0 beta version as `xmi` (tag:v2.0.0), and the main version in secure mode as `smi` (tag:v1.7.0). + +The other allocators are +Google's [_tcmalloc_](https://github.com/gperftools/gperftools) (`tc`, tag:gperftools-2.8.1) used in Chrome, +Facebook's [_jemalloc_](https://github.com/jemalloc/jemalloc) (`je`, tag:5.2.1) by Jason Evans used in Firefox and FreeBSD, +the Intel thread building blocks [allocator](https://github.com/intel/tbb) (`tbb`, tag:v2020.3), +[rpmalloc](https://github.com/mjansson/rpmalloc) (`rp`,tag:1.4.1) by Mattias Jansson, +the original scalable [_Hoard_](https://github.com/emeryberger/Hoard) (git:d880f72) allocator by Emery Berger \[1], +the memory compacting [_Mesh_](https://github.com/plasma-umass/Mesh) (git:67ff31a) allocator by +Bobby Powers _et al_ \[8], +and finally the default system allocator (`glibc`, 2.31) (based on _PtMalloc2_). + + + + +Any benchmarks ending in `N` run on all 32 logical cores in parallel. +Results are averaged over 10 runs and reported relative +to mimalloc (where 1.2 means it took 1.2× longer to run). +The legend also contains the _overall relative score_ between the +allocators where 100 points is the maximum if an allocator is fastest on +all benchmarks. + +The single threaded _cfrac_ benchmark by Dave Barrett is an implementation of +continued fraction factorization which uses many small short-lived allocations. +All allocators do well on such common usage, where _mimalloc_ is just a tad +faster than _tcmalloc_ and +_jemalloc_. + +The _leanN_ program is interesting as a large realistic and +concurrent workload of the [Lean](https://github.com/leanprover/lean) +theorem prover compiling its own standard library, and there is a 13% +speedup over _tcmalloc_. This is +quite significant: if Lean spends 20% of its time in the +allocator that means that _mimalloc_ is 1.6× faster than _tcmalloc_ +here. (This is surprising as that is not measured in a pure +allocation benchmark like _alloc-test_. We conjecture that we see this +outsized improvement here because _mimalloc_ has better locality in +the allocation which improves performance for the *other* computations +in a program as well). + +The single threaded _redis_ benchmark again show that most allocators do well on such workloads. + +The _larsonN_ server benchmark by Larson and Krishnan \[2] allocates and frees between threads. They observed this +behavior (which they call _bleeding_) in actual server applications, and the benchmark simulates this. +Here, _mimalloc_ is quite a bit faster than _tcmalloc_ and _jemalloc_ probably due to the object migration between different threads. + +The _mstressN_ workload performs many allocations and re-allocations, +and migrates objects between threads (as in _larsonN_). However, it also +creates and destroys the _N_ worker threads a few times keeping some objects +alive beyond the life time of the allocating thread. We observed this +behavior in many larger server applications. + +The [_rptestN_](https://github.com/mjansson/rpmalloc-benchmark) benchmark +by Mattias Jansson is a allocator test originally designed +for _rpmalloc_, and tries to simulate realistic allocation patterns over +multiple threads. Here the differences between allocators become more apparent. + +The second benchmark set tests specific aspects of the allocators and +shows even more extreme differences between them. + +The _alloc-test_, by +[OLogN Technologies AG](http://ithare.com/testing-memory-allocators-ptmalloc2-tcmalloc-hoard-jemalloc-while-trying-to-simulate-real-world-loads/), is a very allocation intensive benchmark doing millions of +allocations in various size classes. The test is scaled such that when an +allocator performs almost identically on _alloc-test1_ as _alloc-testN_ it +means that it scales linearly. + +The _sh6bench_ and _sh8bench_ benchmarks are +developed by [MicroQuill](http://www.microquill.com/) as part of SmartHeap. +In _sh6bench_ _mimalloc_ does much +better than the others (more than 2.5× faster than _jemalloc_). +We cannot explain this well but believe it is +caused in part by the "reverse" free-ing pattern in _sh6bench_. +The _sh8bench_ is a variation with object migration +between threads; whereas _tcmalloc_ did well on _sh6bench_, the addition of object migration causes it to be 10× slower than before. + +The _xmalloc-testN_ benchmark by Lever and Boreham \[5] and Christian Eder, simulates an asymmetric workload where +some threads only allocate, and others only free -- they observed this pattern in +larger server applications. Here we see that +the _mimalloc_ technique of having non-contended sharded thread free +lists pays off as it outperforms others by a very large margin. Only _rpmalloc_, _tbb_, and _glibc_ also scale well on this benchmark. + +The _cache-scratch_ benchmark by Emery Berger \[1], and introduced with +the Hoard allocator to test for _passive-false_ sharing of cache lines. +With a single thread they all +perform the same, but when running with multiple threads the potential allocator +induced false sharing of the cache lines can cause large run-time differences. +Crundal \[6] describes in detail why the false cache line sharing occurs in the _tcmalloc_ design, and also discusses how this +can be avoided with some small implementation changes. +Only the _tbb_, _rpmalloc_ and _mesh_ allocators also avoid the +cache line sharing completely, while _Hoard_ and _glibc_ seem to mitigate +the effects. Kukanov and Voss \[7] describe in detail +how the design of _tbb_ avoids the false cache line sharing. + + +## On a 36-core Intel Xeon + +For completeness, here are the results on a big Amazon +[c5.18xlarge](https://aws.amazon.com/ec2/instance-types/#Compute_Optimized) instance +consisting of a 2×18-core Intel Xeon (Cascade Lake) at 3.4GHz (boost 3.5GHz) +with 144GiB ECC memory, running Ubuntu 20.04 with glibc 2.31, GCC 9.3.0, and +Clang 10.0.0. This time, the mimalloc allocators (mi, xmi, and smi) were +compiled with the Clang compiler instead of GCC. +The results are similar to the AMD results but it is interesting to +see the differences in the _larsonN_, _mstressN_, and _xmalloc-testN_ benchmarks. + + + + + +## Peak Working Set + +The following figure shows the peak working set (rss) of the allocators +on the benchmarks (on the c5.18xlarge instance). + + + + +Note that the _xmalloc-testN_ memory usage should be disregarded as it +allocates more the faster the program runs. Similarly, memory usage of +_larsonN_, _mstressN_, _rptestN_ and _sh8bench_ can vary depending on scheduling and +speed. Nevertheless, we hope to improve the memory usage on _mstressN_ +and _rptestN_ (just as _cfrac_, _larsonN_ and _sh8bench_ have a small working set which skews the results). + + + + +# References + +- \[1] Emery D. Berger, Kathryn S. McKinley, Robert D. Blumofe, and Paul R. Wilson. + _Hoard: A Scalable Memory Allocator for Multithreaded Applications_ + the Ninth International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS-IX). Cambridge, MA, November 2000. + [pdf](http://www.cs.utexas.edu/users/mckinley/papers/asplos-2000.pdf) + +- \[2] P. Larson and M. Krishnan. _Memory allocation for long-running server applications_. + In ISMM, Vancouver, B.C., Canada, 1998. [pdf](http://citeseer.ist.psu.edu/viewdoc/download?doi=10.1.1.45.1947&rep=rep1&type=pdf) + +- \[3] D. Grunwald, B. Zorn, and R. Henderson. + _Improving the cache locality of memory allocation_. In R. Cartwright, editor, + Proceedings of the Conference on Programming Language Design and Implementation, pages 177–186, New York, NY, USA, June 1993. [pdf](http://citeseer.ist.psu.edu/viewdoc/download?doi=10.1.1.43.6621&rep=rep1&type=pdf) + +- \[4] J. Barnes and P. Hut. _A hierarchical O(n*log(n)) force-calculation algorithm_. Nature, 324:446-449, 1986. + +- \[5] C. Lever, and D. Boreham. _Malloc() Performance in a Multithreaded Linux Environment._ + In USENIX Annual Technical Conference, Freenix Session. San Diego, CA. Jun. 2000. + Available at + +- \[6] Timothy Crundal. _Reducing Active-False Sharing in TCMalloc_. 2016. CS16S1 project at the Australian National University. [pdf](http://courses.cecs.anu.edu.au/courses/CSPROJECTS/16S1/Reports/Timothy_Crundal_Report.pdf) + +- \[7] Alexey Kukanov, and Michael J Voss. + _The Foundations for Scalable Multi-Core Software in Intel Threading Building Blocks._ + Intel Technology Journal 11 (4). 2007 + +- \[8] Bobby Powers, David Tench, Emery D. Berger, and Andrew McGregor. + _Mesh: Compacting Memory Management for C/C++_ + In Proceedings of the 40th ACM SIGPLAN Conference on Programming Language Design and Implementation (PLDI'19), June 2019, pages 333-–346. + + + +# Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + + +# Older Release Notes + +* 2021-11-14, `v1.7.3`, `v2.0.3` (beta): improved WASM support, improved macOS support and performance (including + M1), improved performance for v2 for large objects, Python integration improvements, more standard + installation directories, various small fixes. +* 2021-06-17, `v1.7.2`, `v2.0.2` (beta): support M1, better installation layout on Linux, fix + thread_id on Android, prefer 2-6TiB area for aligned allocation to work better on pre-windows 8, various small fixes. +* 2021-04-06, `v1.7.1`, `v2.0.1` (beta): fix bug in arena allocation for huge pages, improved aslr on large allocations, initial M1 support (still experimental). +* 2021-01-31, `v2.0.0`: beta release 2.0: new slice algorithm for managing internal mimalloc pages. +* 2021-01-31, `v1.7.0`: stable release 1.7: support explicit user provided memory regions, more precise statistics, + improve macOS overriding, initial support for Apple M1, improved DragonFly support, faster memcpy on Windows, various small fixes. + +* 2020-09-24, `v1.6.7`: stable release 1.6: using standard C atomics, passing tsan testing, improved + handling of failing to commit on Windows, add [`mi_process_info`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc.h#L156) api call. +* 2020-08-06, `v1.6.4`: stable release 1.6: improved error recovery in low-memory situations, + support for IllumOS and Haiku, NUMA support for Vista/XP, improved NUMA detection for AMD Ryzen, ubsan support. +* 2020-05-05, `v1.6.3`: stable release 1.6: improved behavior in out-of-memory situations, improved malloc zones on macOS, + build PIC static libraries by default, add option to abort on out-of-memory, line buffered statistics. +* 2020-04-20, `v1.6.2`: stable release 1.6: fix compilation on Android, MingW, Raspberry, and Conda, + stability fix for Windows 7, fix multiple mimalloc instances in one executable, fix `strnlen` overload, + fix aligned debug padding. +* 2020-02-17, `v1.6.1`: stable release 1.6: minor updates (build with clang-cl, fix alignment issue for small objects). +* 2020-02-09, `v1.6.0`: stable release 1.6: fixed potential memory leak, improved overriding + and thread local support on FreeBSD, NetBSD, DragonFly, and macOSX. New byte-precise + heap block overflow detection in debug mode (besides the double-free detection and free-list + corruption detection). Add `nodiscard` attribute to most allocation functions. + Enable `MIMALLOC_PAGE_RESET` by default. New reclamation strategy for abandoned heap pages + for better memory footprint. +* 2020-02-09, `v1.5.0`: stable release 1.5: improved free performance, small bug fixes. +* 2020-01-22, `v1.4.0`: stable release 1.4: improved performance for delayed OS page reset, +more eager concurrent free, addition of STL allocator, fixed potential memory leak. +* 2020-01-15, `v1.3.0`: stable release 1.3: bug fixes, improved randomness and [stronger +free list encoding](https://github.com/microsoft/mimalloc/blob/783e3377f79ee82af43a0793910a9f2d01ac7863/include/mimalloc-internal.h#L396) in secure mode. + +* 2019-12-22, `v1.2.2`: stable release 1.2: minor updates. +* 2019-11-22, `v1.2.0`: stable release 1.2: bug fixes, improved secure mode (free list corruption checks, double free mitigation). Improved dynamic overriding on Windows. +* 2019-10-07, `v1.1.0`: stable release 1.1. +* 2019-09-01, `v1.0.8`: pre-release 8: more robust windows dynamic overriding, initial huge page support. +* 2019-08-10, `v1.0.6`: pre-release 6: various performance improvements. diff --git a/yass/third_party/mimalloc/src/alloc-aligned.c b/yass/third_party/mimalloc/src/alloc-aligned.c new file mode 100644 index 0000000000..ba629ef30a --- /dev/null +++ b/yass/third_party/mimalloc/src/alloc-aligned.c @@ -0,0 +1,312 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" // mi_prim_get_default_heap + +#include // memset + +// ------------------------------------------------------ +// Aligned Allocation +// ------------------------------------------------------ + +static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) { + // objects up to `MI_MAX_ALIGN_GUARANTEE` are allocated aligned to their size (see `segment.c:_mi_segment_page_start`). + mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0)); + if (alignment > size) return false; + if (alignment <= MI_MAX_ALIGN_SIZE) return true; + const size_t bsize = mi_good_size(size); + return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0); +} + +// Fallback aligned allocation that over-allocates -- split out for better codegen +static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept +{ + mi_assert_internal(size <= (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)); + mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment)); + + void* p; + size_t oversize; + if mi_unlikely(alignment > MI_BLOCK_ALIGNMENT_MAX) { + // use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page) + // This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the + // first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down) + if mi_unlikely(offset != 0) { + // todo: cannot support offset alignment for very large alignments yet + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset); + #endif + return NULL; + } + oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size); + p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block + // zero afterwards as only the area from the aligned_p may be committed! + if (p == NULL) return NULL; + } + else { + // otherwise over-allocate + oversize = size + alignment - 1; + p = _mi_heap_malloc_zero(heap, oversize, zero); + if (p == NULL) return NULL; + } + + // .. and align within the allocation + const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)` + const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask; + const uintptr_t adjust = (poffset == 0 ? 0 : alignment - poffset); + mi_assert_internal(adjust < alignment); + void* aligned_p = (void*)((uintptr_t)p + adjust); + if (aligned_p != p) { + mi_page_t* page = _mi_ptr_page(p); + mi_page_set_has_aligned(page, true); + _mi_padding_shrink(page, (mi_block_t*)p, adjust + size); + } + // todo: expand padding if overallocated ? + + mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size); + mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_page(aligned_p), aligned_p)); + mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0); + mi_assert_internal(mi_usable_size(aligned_p)>=size); + mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust); + + // now zero the block if needed + if (alignment > MI_BLOCK_ALIGNMENT_MAX) { + // for the tracker, on huge aligned allocations only the memory from the start of the large block is defined + mi_track_mem_undefined(aligned_p, size); + if (zero) { + _mi_memzero_aligned(aligned_p, mi_usable_size(aligned_p)); + } + } + + if (p != aligned_p) { + mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p)); + } + return aligned_p; +} + +// Generic primitive aligned allocation -- split out for better codegen +static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_generic(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept +{ + mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment)); + // we don't allocate more than MI_MAX_ALLOC_SIZE (see ) + if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) { + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment); + #endif + return NULL; + } + + // use regular allocation if it is guaranteed to fit the alignment constraints. + // this is important to try as the fast path in `mi_heap_malloc_zero_aligned` only works when there exist + // a page with the right block size, and if we always use the over-alloc fallback that would never happen. + if (offset == 0 && mi_malloc_is_naturally_aligned(size,alignment)) { + void* p = _mi_heap_malloc_zero(heap, size, zero); + mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0); + const bool is_aligned_or_null = (((uintptr_t)p) & (alignment-1))==0; + if mi_likely(is_aligned_or_null) { + return p; + } + else { + // this should never happen if the `mi_malloc_is_naturally_aligned` check is correct.. + mi_assert(false); + mi_free(p); + } + } + + // fall back to over-allocation + return mi_heap_malloc_zero_aligned_at_overalloc(heap,size,alignment,offset,zero); +} + +// Primitive aligned allocation +static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept +{ + // note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size. + if mi_unlikely(alignment == 0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see ) + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment); + #endif + return NULL; + } + + // try first if there happens to be a small block available with just the right alignment + if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) { + const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)` + const size_t padsize = size + MI_PADDING_SIZE; + mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize); + if mi_likely(page->free != NULL) { + const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0; + if mi_likely(is_aligned) + { + #if MI_STAT>1 + mi_heap_stat_increase(heap, malloc, size); + #endif + void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen + mi_assert_internal(p != NULL); + mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); + mi_track_malloc(p,size,zero); + return p; + } + } + } + + // fallback to generic aligned allocation + return mi_heap_malloc_zero_aligned_at_generic(heap, size, alignment, offset, zero); +} + + +// ------------------------------------------------------ +// Optimized mi_heap_malloc_aligned / mi_malloc_aligned +// ------------------------------------------------------ + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_malloc_aligned_at(heap, size, alignment, 0); +} + +// ensure a definition is emitted +#if defined(__cplusplus) +void* _mi_extern_heap_malloc_aligned = (void*)&mi_heap_malloc_aligned; +#endif + +// ------------------------------------------------------ +// Aligned Allocation +// ------------------------------------------------------ + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_zalloc_aligned_at(heap, size, alignment, 0); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(count, size, &total)) return NULL; + return mi_heap_zalloc_aligned_at(heap, total, alignment, offset); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_calloc_aligned_at(heap,count,size,alignment,0); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_malloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_malloc_aligned(mi_prim_get_default_heap(), size, alignment); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_zalloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_zalloc_aligned(mi_prim_get_default_heap(), size, alignment); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_calloc_aligned_at(mi_prim_get_default_heap(), count, size, alignment, offset); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_calloc_aligned(mi_prim_get_default_heap(), count, size, alignment); +} + + +// ------------------------------------------------------ +// Aligned re-allocation +// ------------------------------------------------------ + +static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero) mi_attr_noexcept { + mi_assert(alignment > 0); + if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero); + if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,zero); + size_t size = mi_usable_size(p); + if (newsize <= size && newsize >= (size - (size / 2)) + && (((uintptr_t)p + offset) % alignment) == 0) { + return p; // reallocation still fits, is aligned and not more than 50% waste + } + else { + // note: we don't zero allocate upfront so we only zero initialize the expanded part + void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset); + if (newp != NULL) { + if (zero && newsize > size) { + // also set last word in the previous allocation to zero to ensure any padding is zero-initialized + size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); + _mi_memzero((uint8_t*)newp + start, newsize - start); + } + _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize)); + mi_free(p); // only free if successful + } + return newp; + } +} + +static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, bool zero) mi_attr_noexcept { + mi_assert(alignment > 0); + if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero); + size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL) + return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero); +} + +mi_decl_nodiscard void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false); +} + +mi_decl_nodiscard void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept { + return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false); +} + +mi_decl_nodiscard void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true); +} + +mi_decl_nodiscard void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept { + return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true); +} + +mi_decl_nodiscard void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(newcount, size, &total)) return NULL; + return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset); +} + +mi_decl_nodiscard void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(newcount, size, &total)) return NULL; + return mi_heap_rezalloc_aligned(heap, p, total, alignment); +} + +mi_decl_nodiscard void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_realloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset); +} + +mi_decl_nodiscard void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept { + return mi_heap_realloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment); +} + +mi_decl_nodiscard void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_rezalloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset); +} + +mi_decl_nodiscard void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept { + return mi_heap_rezalloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment); +} + +mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_recalloc_aligned_at(mi_prim_get_default_heap(), p, newcount, size, alignment, offset); +} + +mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_recalloc_aligned(mi_prim_get_default_heap(), p, newcount, size, alignment); +} diff --git a/yass/third_party/mimalloc/src/alloc-override.c b/yass/third_party/mimalloc/src/alloc-override.c new file mode 100644 index 0000000000..12837cdd94 --- /dev/null +++ b/yass/third_party/mimalloc/src/alloc-override.c @@ -0,0 +1,314 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +#if !defined(MI_IN_ALLOC_C) +#error "this file should be included from 'alloc.c' (so aliases can work)" +#endif + +#if defined(MI_MALLOC_OVERRIDE) && defined(_WIN32) && !(defined(MI_SHARED_LIB) && defined(_DLL)) +#error "It is only possible to override "malloc" on Windows when building as a DLL (and linking the C runtime as a DLL)" +#endif + +#if defined(MI_MALLOC_OVERRIDE) && !(defined(_WIN32)) + +#if defined(__APPLE__) +#include +mi_decl_externc void vfree(void* p); +mi_decl_externc size_t malloc_size(const void* p); +mi_decl_externc size_t malloc_good_size(size_t size); +#endif + +// helper definition for C override of C++ new +typedef void* mi_nothrow_t; + +// ------------------------------------------------------ +// Override system malloc +// ------------------------------------------------------ + +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) && !MI_TRACK_ENABLED + // gcc, clang: use aliasing to alias the exported function to one of our `mi_` functions + #if (defined(__GNUC__) && __GNUC__ >= 9) + #pragma GCC diagnostic ignored "-Wattributes" // or we get warnings that nodiscard is ignored on a forward + #define MI_FORWARD(fun) __attribute__((alias(#fun), used, visibility("default"), copy(fun))); + #else + #define MI_FORWARD(fun) __attribute__((alias(#fun), used, visibility("default"))); + #endif + #define MI_FORWARD1(fun,x) MI_FORWARD(fun) + #define MI_FORWARD2(fun,x,y) MI_FORWARD(fun) + #define MI_FORWARD3(fun,x,y,z) MI_FORWARD(fun) + #define MI_FORWARD0(fun,x) MI_FORWARD(fun) + #define MI_FORWARD02(fun,x,y) MI_FORWARD(fun) +#else + // otherwise use forwarding by calling our `mi_` function + #define MI_FORWARD1(fun,x) { return fun(x); } + #define MI_FORWARD2(fun,x,y) { return fun(x,y); } + #define MI_FORWARD3(fun,x,y,z) { return fun(x,y,z); } + #define MI_FORWARD0(fun,x) { fun(x); } + #define MI_FORWARD02(fun,x,y) { fun(x,y); } +#endif + + +#if defined(__APPLE__) && defined(MI_SHARED_LIB_EXPORT) && defined(MI_OSX_INTERPOSE) + // define MI_OSX_IS_INTERPOSED as we should not provide forwarding definitions for + // functions that are interposed (or the interposing does not work) + #define MI_OSX_IS_INTERPOSED + + mi_decl_externc size_t mi_malloc_size_checked(void *p) { + if (!mi_is_in_heap_region(p)) return 0; + return mi_usable_size(p); + } + + // use interposing so `DYLD_INSERT_LIBRARIES` works without `DYLD_FORCE_FLAT_NAMESPACE=1` + // See: + struct mi_interpose_s { + const void* replacement; + const void* target; + }; + #define MI_INTERPOSE_FUN(oldfun,newfun) { (const void*)&newfun, (const void*)&oldfun } + #define MI_INTERPOSE_MI(fun) MI_INTERPOSE_FUN(fun,mi_##fun) + + __attribute__((used)) static struct mi_interpose_s _mi_interposes[] __attribute__((section("__DATA, __interpose"))) = + { + MI_INTERPOSE_MI(malloc), + MI_INTERPOSE_MI(calloc), + MI_INTERPOSE_MI(realloc), + MI_INTERPOSE_MI(strdup), + #if defined(MAC_OS_X_VERSION_10_7) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_7 + MI_INTERPOSE_MI(strndup), + #endif + MI_INTERPOSE_MI(realpath), + MI_INTERPOSE_MI(posix_memalign), + MI_INTERPOSE_MI(reallocf), + MI_INTERPOSE_MI(valloc), + MI_INTERPOSE_FUN(malloc_size,mi_malloc_size_checked), + MI_INTERPOSE_MI(malloc_good_size), + #if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15 + MI_INTERPOSE_MI(aligned_alloc), + #endif + #ifdef MI_OSX_ZONE + // we interpose malloc_default_zone in alloc-override-osx.c so we can use mi_free safely + MI_INTERPOSE_MI(free), + MI_INTERPOSE_FUN(vfree,mi_free), + #else + // sometimes code allocates from default zone but deallocates using plain free :-( (like NxHashResizeToCapacity ) + MI_INTERPOSE_FUN(free,mi_cfree), // use safe free that checks if pointers are from us + MI_INTERPOSE_FUN(vfree,mi_cfree), + #endif + }; + + #ifdef __cplusplus + extern "C" { + #endif + void _ZdlPv(void* p); // delete + void _ZdaPv(void* p); // delete[] + void _ZdlPvm(void* p, size_t n); // delete + void _ZdaPvm(void* p, size_t n); // delete[] + void* _Znwm(size_t n); // new + void* _Znam(size_t n); // new[] + void* _ZnwmRKSt9nothrow_t(size_t n, mi_nothrow_t tag); // new nothrow + void* _ZnamRKSt9nothrow_t(size_t n, mi_nothrow_t tag); // new[] nothrow + #ifdef __cplusplus + } + #endif + __attribute__((used)) static struct mi_interpose_s _mi_cxx_interposes[] __attribute__((section("__DATA, __interpose"))) = + { + MI_INTERPOSE_FUN(_ZdlPv,mi_free), + MI_INTERPOSE_FUN(_ZdaPv,mi_free), + MI_INTERPOSE_FUN(_ZdlPvm,mi_free_size), + MI_INTERPOSE_FUN(_ZdaPvm,mi_free_size), + MI_INTERPOSE_FUN(_Znwm,mi_new), + MI_INTERPOSE_FUN(_Znam,mi_new), + MI_INTERPOSE_FUN(_ZnwmRKSt9nothrow_t,mi_new_nothrow), + MI_INTERPOSE_FUN(_ZnamRKSt9nothrow_t,mi_new_nothrow), + }; + +#elif defined(_MSC_VER) + // cannot override malloc unless using a dll. + // we just override new/delete which does work in a static library. +#else + // On all other systems forward allocation primitives to our API + mi_decl_export void* malloc(size_t size) MI_FORWARD1(mi_malloc, size) + mi_decl_export void* calloc(size_t size, size_t n) MI_FORWARD2(mi_calloc, size, n) + mi_decl_export void* realloc(void* p, size_t newsize) MI_FORWARD2(mi_realloc, p, newsize) + mi_decl_export void free(void* p) MI_FORWARD0(mi_free, p) + // In principle we do not need to forward `strdup`/`strndup` but on some systems these do not use `malloc` internally (but a more primitive call) + // We only override if `strdup` is not a macro (as on some older libc's, see issue #885) + #if !defined(strdup) + mi_decl_export char* strdup(const char* str) MI_FORWARD1(mi_strdup, str) + #endif + #if !defined(strndup) && (!defined(__APPLE__) || (defined(MAC_OS_X_VERSION_10_7) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_7)) + mi_decl_export char* strndup(const char* str, size_t n) MI_FORWARD2(mi_strndup, str, n) + #endif +#endif + +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) +#pragma GCC visibility push(default) +#endif + +// ------------------------------------------------------ +// Override new/delete +// This is not really necessary as they usually call +// malloc/free anyway, but it improves performance. +// ------------------------------------------------------ +#ifdef __cplusplus + // ------------------------------------------------------ + // With a C++ compiler we override the new/delete operators. + // see + // ------------------------------------------------------ + #include + + #ifndef MI_OSX_IS_INTERPOSED + void operator delete(void* p) noexcept MI_FORWARD0(mi_free,p) + void operator delete[](void* p) noexcept MI_FORWARD0(mi_free,p) + + void* operator new(std::size_t n) noexcept(false) MI_FORWARD1(mi_new,n) + void* operator new[](std::size_t n) noexcept(false) MI_FORWARD1(mi_new,n) + + void* operator new (std::size_t n, const std::nothrow_t& tag) noexcept { MI_UNUSED(tag); return mi_new_nothrow(n); } + void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { MI_UNUSED(tag); return mi_new_nothrow(n); } + + #if (__cplusplus >= 201402L || _MSC_VER >= 1916) + void operator delete (void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n) + void operator delete[](void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n) + #endif + #endif + + #if (__cplusplus > 201402L && defined(__cpp_aligned_new)) && (!defined(__GNUC__) || (__GNUC__ > 5)) + void operator delete (void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); } + void operator delete[](void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); } + void operator delete (void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); }; + void operator delete[](void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); }; + void operator delete (void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); } + void operator delete[](void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); } + + void* operator new( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); } + void* operator new[]( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); } + void* operator new (std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); } + void* operator new[](std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); } + #endif + +#elif (defined(__GNUC__) || defined(__clang__)) + // ------------------------------------------------------ + // Override by defining the mangled C++ names of the operators (as + // used by GCC and CLang). + // See + // ------------------------------------------------------ + + void _ZdlPv(void* p) MI_FORWARD0(mi_free,p) // delete + void _ZdaPv(void* p) MI_FORWARD0(mi_free,p) // delete[] + void _ZdlPvm(void* p, size_t n) MI_FORWARD02(mi_free_size,p,n) + void _ZdaPvm(void* p, size_t n) MI_FORWARD02(mi_free_size,p,n) + + void _ZdlPvSt11align_val_t(void* p, size_t al) { mi_free_aligned(p,al); } + void _ZdaPvSt11align_val_t(void* p, size_t al) { mi_free_aligned(p,al); } + void _ZdlPvmSt11align_val_t(void* p, size_t n, size_t al) { mi_free_size_aligned(p,n,al); } + void _ZdaPvmSt11align_val_t(void* p, size_t n, size_t al) { mi_free_size_aligned(p,n,al); } + + void _ZdlPvRKSt9nothrow_t(void* p, mi_nothrow_t tag) { MI_UNUSED(tag); mi_free(p); } // operator delete(void*, std::nothrow_t const&) + void _ZdaPvRKSt9nothrow_t(void* p, mi_nothrow_t tag) { MI_UNUSED(tag); mi_free(p); } // operator delete[](void*, std::nothrow_t const&) + void _ZdlPvSt11align_val_tRKSt9nothrow_t(void* p, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); mi_free_aligned(p,al); } // operator delete(void*, std::align_val_t, std::nothrow_t const&) + void _ZdaPvSt11align_val_tRKSt9nothrow_t(void* p, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); mi_free_aligned(p,al); } // operator delete[](void*, std::align_val_t, std::nothrow_t const&) + + #if (MI_INTPTR_SIZE==8) + void* _Znwm(size_t n) MI_FORWARD1(mi_new,n) // new 64-bit + void* _Znam(size_t n) MI_FORWARD1(mi_new,n) // new[] 64-bit + void* _ZnwmRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } + void* _ZnamRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } + void* _ZnwmSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al) + void* _ZnamSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al) + void* _ZnwmSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); } + void* _ZnamSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); } + #elif (MI_INTPTR_SIZE==4) + void* _Znwj(size_t n) MI_FORWARD1(mi_new,n) // new 64-bit + void* _Znaj(size_t n) MI_FORWARD1(mi_new,n) // new[] 64-bit + void* _ZnwjRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } + void* _ZnajRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } + void* _ZnwjSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al) + void* _ZnajSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al) + void* _ZnwjSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); } + void* _ZnajSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); } + #else + #error "define overloads for new/delete for this platform (just for performance, can be skipped)" + #endif +#endif // __cplusplus + +// ------------------------------------------------------ +// Further Posix & Unix functions definitions +// ------------------------------------------------------ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef MI_OSX_IS_INTERPOSED + // Forward Posix/Unix calls as well + void* reallocf(void* p, size_t newsize) MI_FORWARD2(mi_reallocf,p,newsize) + size_t malloc_size(const void* p) MI_FORWARD1(mi_usable_size,p) + #if !defined(__ANDROID__) && !defined(__FreeBSD__) + size_t malloc_usable_size(void *p) MI_FORWARD1(mi_usable_size,p) + #else + size_t malloc_usable_size(const void *p) MI_FORWARD1(mi_usable_size,p) + #endif + + // No forwarding here due to aliasing/name mangling issues + void* valloc(size_t size) { return mi_valloc(size); } + void vfree(void* p) { mi_free(p); } + size_t malloc_good_size(size_t size) { return mi_malloc_good_size(size); } + int posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p, alignment, size); } + + // `aligned_alloc` is only available when __USE_ISOC11 is defined. + // Note: it seems __USE_ISOC11 is not defined in musl (and perhaps other libc's) so we only check + // for it if using glibc. + // Note: Conda has a custom glibc where `aligned_alloc` is declared `static inline` and we cannot + // override it, but both _ISOC11_SOURCE and __USE_ISOC11 are undefined in Conda GCC7 or GCC9. + // Fortunately, in the case where `aligned_alloc` is declared as `static inline` it + // uses internally `memalign`, `posix_memalign`, or `_aligned_malloc` so we can avoid overriding it ourselves. + #if !defined(__GLIBC__) || __USE_ISOC11 + void* aligned_alloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); } + #endif +#endif + +// no forwarding here due to aliasing/name mangling issues +void cfree(void* p) { mi_free(p); } +void* pvalloc(size_t size) { return mi_pvalloc(size); } +void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); } +void* _aligned_malloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); } +void* reallocarray(void* p, size_t count, size_t size) { return mi_reallocarray(p, count, size); } +// some systems define reallocarr so mark it as a weak symbol (#751) +mi_decl_weak int reallocarr(void* p, size_t count, size_t size) { return mi_reallocarr(p, count, size); } + +#if defined(__wasi__) + // forward __libc interface (see PR #667) + void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc, size) + void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc, count, size) + void* __libc_realloc(void* p, size_t size) MI_FORWARD2(mi_realloc, p, size) + void __libc_free(void* p) MI_FORWARD0(mi_free, p) + void* __libc_memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); } + +#elif defined(__GLIBC__) && defined(__linux__) + // forward __libc interface (needed for glibc-based Linux distributions) + void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc,size) + void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc,count,size) + void* __libc_realloc(void* p, size_t size) MI_FORWARD2(mi_realloc,p,size) + void __libc_free(void* p) MI_FORWARD0(mi_free,p) + void __libc_cfree(void* p) MI_FORWARD0(mi_free,p) + + void* __libc_valloc(size_t size) { return mi_valloc(size); } + void* __libc_pvalloc(size_t size) { return mi_pvalloc(size); } + void* __libc_memalign(size_t alignment, size_t size) { return mi_memalign(alignment,size); } + int __posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p,alignment,size); } +#endif + +#ifdef __cplusplus +} +#endif + +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) +#pragma GCC visibility pop +#endif + +#endif // MI_MALLOC_OVERRIDE && !_WIN32 diff --git a/yass/third_party/mimalloc/src/alloc-posix.c b/yass/third_party/mimalloc/src/alloc-posix.c new file mode 100644 index 0000000000..225752fd87 --- /dev/null +++ b/yass/third_party/mimalloc/src/alloc-posix.c @@ -0,0 +1,185 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// ------------------------------------------------------------------------ +// mi prefixed publi definitions of various Posix, Unix, and C++ functions +// for convenience and used when overriding these functions. +// ------------------------------------------------------------------------ +#include "mimalloc.h" +#include "mimalloc/internal.h" + +// ------------------------------------------------------ +// Posix & Unix functions definitions +// ------------------------------------------------------ + +#include +#include // memset +#include // getenv + +#ifdef _MSC_VER +#pragma warning(disable:4996) // getenv _wgetenv +#endif + +#ifndef EINVAL +#define EINVAL 22 +#endif +#ifndef ENOMEM +#define ENOMEM 12 +#endif + + +mi_decl_nodiscard size_t mi_malloc_size(const void* p) mi_attr_noexcept { + // if (!mi_is_in_heap_region(p)) return 0; + return mi_usable_size(p); +} + +mi_decl_nodiscard size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept { + // if (!mi_is_in_heap_region(p)) return 0; + return mi_usable_size(p); +} + +mi_decl_nodiscard size_t mi_malloc_good_size(size_t size) mi_attr_noexcept { + return mi_good_size(size); +} + +void mi_cfree(void* p) mi_attr_noexcept { + if (mi_is_in_heap_region(p)) { + mi_free(p); + } +} + +int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept { + // Note: The spec dictates we should not modify `*p` on an error. (issue#27) + // + if (p == NULL) return EINVAL; + if ((alignment % sizeof(void*)) != 0) return EINVAL; // natural alignment + // it is also required that alignment is a power of 2 and > 0; this is checked in `mi_malloc_aligned` + if (alignment==0 || !_mi_is_power_of_two(alignment)) return EINVAL; // not a power of 2 + void* q = mi_malloc_aligned(size, alignment); + if (q==NULL && size != 0) return ENOMEM; + mi_assert_internal(((uintptr_t)q % alignment) == 0); + *p = q; + return 0; +} + +mi_decl_nodiscard mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept { + void* p = mi_malloc_aligned(size, alignment); + mi_assert_internal(((uintptr_t)p % alignment) == 0); + return p; +} + +mi_decl_nodiscard mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept { + return mi_memalign( _mi_os_page_size(), size ); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept { + size_t psize = _mi_os_page_size(); + if (size >= SIZE_MAX - psize) return NULL; // overflow + size_t asize = _mi_align_up(size, psize); + return mi_malloc_aligned(asize, psize); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept { + // C11 requires the size to be an integral multiple of the alignment, see . + // unfortunately, it turns out quite some programs pass a size that is not an integral multiple so skip this check.. + /* if mi_unlikely((size & (alignment - 1)) != 0) { // C11 requires alignment>0 && integral multiple, see + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "(mi_)aligned_alloc requires the size to be an integral multiple of the alignment (size %zu, alignment %zu)\n", size, alignment); + #endif + return NULL; + } + */ + // C11 also requires alignment to be a power-of-two (and > 0) which is checked in mi_malloc_aligned + void* p = mi_malloc_aligned(size, alignment); + mi_assert_internal(((uintptr_t)p % alignment) == 0); + return p; +} + +mi_decl_nodiscard void* mi_reallocarray( void* p, size_t count, size_t size ) mi_attr_noexcept { // BSD + void* newp = mi_reallocn(p,count,size); + if (newp==NULL) { errno = ENOMEM; } + return newp; +} + +mi_decl_nodiscard int mi_reallocarr( void* p, size_t count, size_t size ) mi_attr_noexcept { // NetBSD + mi_assert(p != NULL); + if (p == NULL) { + errno = EINVAL; + return EINVAL; + } + void** op = (void**)p; + void* newp = mi_reallocarray(*op, count, size); + if mi_unlikely(newp == NULL) { return errno; } + *op = newp; + return 0; +} + +void* mi__expand(void* p, size_t newsize) mi_attr_noexcept { // Microsoft + void* res = mi_expand(p, newsize); + if (res == NULL) { errno = ENOMEM; } + return res; +} + +mi_decl_nodiscard mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept { + if (s==NULL) return NULL; + size_t len; + for(len = 0; s[len] != 0; len++) { } + size_t size = (len+1)*sizeof(unsigned short); + unsigned short* p = (unsigned short*)mi_malloc(size); + if (p != NULL) { + _mi_memcpy(p,s,size); + } + return p; +} + +mi_decl_nodiscard mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept { + return (unsigned char*)mi_strdup((const char*)s); +} + +int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept { + if (buf==NULL || name==NULL) return EINVAL; + if (size != NULL) *size = 0; + char* p = getenv(name); // mscver warning 4996 + if (p==NULL) { + *buf = NULL; + } + else { + *buf = mi_strdup(p); + if (*buf==NULL) return ENOMEM; + if (size != NULL) *size = _mi_strlen(p); + } + return 0; +} + +int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept { + if (buf==NULL || name==NULL) return EINVAL; + if (size != NULL) *size = 0; +#if !defined(_WIN32) || (defined(WINAPI_FAMILY) && (WINAPI_FAMILY != WINAPI_FAMILY_DESKTOP_APP)) + // not supported + *buf = NULL; + return EINVAL; +#else + unsigned short* p = (unsigned short*)_wgetenv((const wchar_t*)name); // msvc warning 4996 + if (p==NULL) { + *buf = NULL; + } + else { + *buf = mi_wcsdup(p); + if (*buf==NULL) return ENOMEM; + if (size != NULL) *size = wcslen((const wchar_t*)p); + } + return 0; +#endif +} + +mi_decl_nodiscard void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { // Microsoft + return mi_recalloc_aligned_at(p, newcount, size, alignment, offset); +} + +mi_decl_nodiscard void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { // Microsoft + return mi_recalloc_aligned(p, newcount, size, alignment); +} diff --git a/yass/third_party/mimalloc/src/alloc.c b/yass/third_party/mimalloc/src/alloc.c new file mode 100644 index 0000000000..86aaae757b --- /dev/null +++ b/yass/third_party/mimalloc/src/alloc.c @@ -0,0 +1,598 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#ifndef _DEFAULT_SOURCE +#define _DEFAULT_SOURCE // for realpath() on Linux +#endif + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" // _mi_prim_thread_id() + +#include // memset, strlen (for mi_strdup) +#include // malloc, abort + +#define MI_IN_ALLOC_C +#include "alloc-override.c" +#include "free.c" +#undef MI_IN_ALLOC_C + +// ------------------------------------------------------ +// Allocation +// ------------------------------------------------------ + +// Fast allocation in a page: just pop from the free list. +// Fall back to generic allocation only if the list is empty. +// Note: in release mode the (inlined) routine is about 7 instructions with a single test. +extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept +{ + mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size); + mi_block_t* const block = page->free; + if mi_unlikely(block == NULL) { + return _mi_malloc_generic(heap, size, zero, 0); + } + mi_assert_internal(block != NULL && _mi_ptr_page(block) == page); + // pop from the free list + page->free = mi_block_next(page, block); + page->used++; + mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); + #if MI_DEBUG>3 + if (page->free_is_zero) { + mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block))); + } + #endif + + // allow use of the block internally + // note: when tracking we need to avoid ever touching the MI_PADDING since + // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc/track.h`) + mi_track_mem_undefined(block, mi_page_usable_block_size(page)); + + // zero the block? note: we need to zero the full block size (issue #63) + if mi_unlikely(zero) { + mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) + mi_assert_internal(page->block_size >= MI_PADDING_SIZE); + if (page->free_is_zero) { + block->next = 0; + mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE); + } + else { + _mi_memzero_aligned(block, page->block_size - MI_PADDING_SIZE); + } + } + + #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN + if (!zero && !mi_page_is_huge(page)) { + memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page)); + } + #elif (MI_SECURE!=0) + if (!zero) { block->next = 0; } // don't leak internal data + #endif + + #if (MI_STAT>0) + const size_t bsize = mi_page_usable_block_size(page); + if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { + mi_heap_stat_increase(heap, normal, bsize); + mi_heap_stat_counter_increase(heap, normal_count, 1); + #if (MI_STAT>1) + const size_t bin = _mi_bin(bsize); + mi_heap_stat_increase(heap, normal_bins[bin], 1); + #endif + } + #endif + + #if MI_PADDING // && !MI_TRACK_ENABLED + mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page)); + ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE)); + #if (MI_DEBUG>=2) + mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta)); + #endif + mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess + padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys)); + padding->delta = (uint32_t)(delta); + #if MI_PADDING_CHECK + if (!mi_page_is_huge(page)) { + uint8_t* fill = (uint8_t*)padding - delta; + const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes + for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; } + } + #endif + #endif + + return block; +} + +// extra entries for improved efficiency in `alloc-aligned.c`. +extern void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept { + return _mi_page_malloc_zero(heap,page,size,false); +} +extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept { + return _mi_page_malloc_zero(heap,page,size,true); +} + +static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { + mi_assert(heap != NULL); + #if MI_DEBUG + const uintptr_t tid = _mi_thread_id(); + mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local + #endif + mi_assert(size <= MI_SMALL_SIZE_MAX); + #if (MI_PADDING) + if (size == 0) { size = sizeof(void*); } + #endif + + mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE); + void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero); + mi_track_malloc(p,size,zero); + + #if MI_STAT>1 + if (p != NULL) { + if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } + mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); + } + #endif + #if MI_DEBUG>3 + if (p != NULL && zero) { + mi_assert_expensive(mi_mem_is_zero(p, size)); + } + #endif + return p; +} + +// allocate a small block +mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept { + return mi_heap_malloc_small_zero(heap, size, false); +} + +mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept { + return mi_heap_malloc_small(mi_prim_get_default_heap(), size); +} + +// The main allocation function +extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept { + if mi_likely(size <= MI_SMALL_SIZE_MAX) { + mi_assert_internal(huge_alignment == 0); + return mi_heap_malloc_small_zero(heap, size, zero); + } + else { + mi_assert(heap!=NULL); + mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local + void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic + mi_track_malloc(p,size,zero); + #if MI_STAT>1 + if (p != NULL) { + if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } + mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); + } + #endif + #if MI_DEBUG>3 + if (p != NULL && zero) { + mi_assert_expensive(mi_mem_is_zero(p, size)); + } + #endif + return p; + } +} + +extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { + return _mi_heap_malloc_zero_ex(heap, size, zero, 0); +} + +mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { + return _mi_heap_malloc_zero(heap, size, false); +} + +mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept { + return mi_heap_malloc(mi_prim_get_default_heap(), size); +} + +// zero initialized small block +mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept { + return mi_heap_malloc_small_zero(mi_prim_get_default_heap(), size, true); +} + +mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { + return _mi_heap_malloc_zero(heap, size, true); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept { + return mi_heap_zalloc(mi_prim_get_default_heap(),size); +} + + +mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(count,size,&total)) return NULL; + return mi_heap_zalloc(heap,total); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept { + return mi_heap_calloc(mi_prim_get_default_heap(),count,size); +} + +// Uninitialized `calloc` +mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(count, size, &total)) return NULL; + return mi_heap_malloc(heap, total); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept { + return mi_heap_mallocn(mi_prim_get_default_heap(),count,size); +} + +// Expand (or shrink) in place (or fail) +void* mi_expand(void* p, size_t newsize) mi_attr_noexcept { + #if MI_PADDING + // we do not shrink/expand with padding enabled + MI_UNUSED(p); MI_UNUSED(newsize); + return NULL; + #else + if (p == NULL) return NULL; + const size_t size = _mi_usable_size(p,"mi_expand"); + if (newsize > size) return NULL; + return p; // it fits + #endif +} + +void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept { + // if p == NULL then behave as malloc. + // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)). + // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.) + const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0) + if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0) + mi_assert_internal(p!=NULL); + // todo: do not track as the usable size is still the same in the free; adjust potential padding? + // mi_track_resize(p,size,newsize) + // if (newsize < size) { mi_track_mem_noaccess((uint8_t*)p + newsize, size - newsize); } + return p; // reallocation still fits and not more than 50% waste + } + void* newp = mi_heap_malloc(heap,newsize); + if mi_likely(newp != NULL) { + if (zero && newsize > size) { + // also set last word in the previous allocation to zero to ensure any padding is zero-initialized + const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); + _mi_memzero((uint8_t*)newp + start, newsize - start); + } + else if (newsize == 0) { + ((uint8_t*)newp)[0] = 0; // work around for applications that expect zero-reallocation to be zero initialized (issue #725) + } + if mi_likely(p != NULL) { + const size_t copysize = (newsize > size ? size : newsize); + mi_track_mem_defined(p,copysize); // _mi_useable_size may be too large for byte precise memory tracking.. + _mi_memcpy(newp, p, copysize); + mi_free(p); // only free the original pointer if successful + } + } + return newp; +} + +mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { + return _mi_heap_realloc_zero(heap, p, newsize, false); +} + +mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(count, size, &total)) return NULL; + return mi_heap_realloc(heap, p, total); +} + + +// Reallocate but free `p` on errors +mi_decl_nodiscard void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { + void* newp = mi_heap_realloc(heap, p, newsize); + if (newp==NULL && p!=NULL) mi_free(p); + return newp; +} + +mi_decl_nodiscard void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { + return _mi_heap_realloc_zero(heap, p, newsize, true); +} + +mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(count, size, &total)) return NULL; + return mi_heap_rezalloc(heap, p, total); +} + + +mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept { + return mi_heap_realloc(mi_prim_get_default_heap(),p,newsize); +} + +mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept { + return mi_heap_reallocn(mi_prim_get_default_heap(),p,count,size); +} + +// Reallocate but free `p` on errors +mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept { + return mi_heap_reallocf(mi_prim_get_default_heap(),p,newsize); +} + +mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept { + return mi_heap_rezalloc(mi_prim_get_default_heap(), p, newsize); +} + +mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept { + return mi_heap_recalloc(mi_prim_get_default_heap(), p, count, size); +} + + + +// ------------------------------------------------------ +// strdup, strndup, and realpath +// ------------------------------------------------------ + +// `strdup` using mi_malloc +mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept { + if (s == NULL) return NULL; + size_t len = _mi_strlen(s); + char* t = (char*)mi_heap_malloc(heap,len+1); + if (t == NULL) return NULL; + _mi_memcpy(t, s, len); + t[len] = 0; + return t; +} + +mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept { + return mi_heap_strdup(mi_prim_get_default_heap(), s); +} + +// `strndup` using mi_malloc +mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept { + if (s == NULL) return NULL; + const size_t len = _mi_strnlen(s,n); // len <= n + char* t = (char*)mi_heap_malloc(heap, len+1); + if (t == NULL) return NULL; + _mi_memcpy(t, s, len); + t[len] = 0; + return t; +} + +mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept { + return mi_heap_strndup(mi_prim_get_default_heap(),s,n); +} + +#ifndef __wasi__ +// `realpath` using mi_malloc +#ifdef _WIN32 +#ifndef PATH_MAX +#define PATH_MAX MAX_PATH +#endif +#include +mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept { + // todo: use GetFullPathNameW to allow longer file names + char buf[PATH_MAX]; + DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL); + if (res == 0) { + errno = GetLastError(); return NULL; + } + else if (res > PATH_MAX) { + errno = EINVAL; return NULL; + } + else if (resolved_name != NULL) { + return resolved_name; + } + else { + return mi_heap_strndup(heap, buf, PATH_MAX); + } +} +#else +/* +#include // pathconf +static size_t mi_path_max(void) { + static size_t path_max = 0; + if (path_max <= 0) { + long m = pathconf("/",_PC_PATH_MAX); + if (m <= 0) path_max = 4096; // guess + else if (m < 256) path_max = 256; // at least 256 + else path_max = m; + } + return path_max; +} +*/ +char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept { + if (resolved_name != NULL) { + return realpath(fname,resolved_name); + } + else { + char* rname = realpath(fname, NULL); + if (rname == NULL) return NULL; + char* result = mi_heap_strdup(heap, rname); + mi_cfree(rname); // use checked free (which may be redirected to our free but that's ok) + // note: with ASAN realpath is intercepted and mi_cfree may leak the returned pointer :-( + return result; + } + /* + const size_t n = mi_path_max(); + char* buf = (char*)mi_malloc(n+1); + if (buf == NULL) { + errno = ENOMEM; + return NULL; + } + char* rname = realpath(fname,buf); + char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL` + mi_free(buf); + return result; + } + */ +} +#endif + +mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept { + return mi_heap_realpath(mi_prim_get_default_heap(),fname,resolved_name); +} +#endif + +/*------------------------------------------------------- +C++ new and new_aligned +The standard requires calling into `get_new_handler` and +throwing the bad_alloc exception on failure. If we compile +with a C++ compiler we can implement this precisely. If we +use a C compiler we cannot throw a `bad_alloc` exception +but we call `exit` instead (i.e. not returning). +-------------------------------------------------------*/ + +#ifdef __cplusplus +#include +static bool mi_try_new_handler(bool nothrow) { + #if defined(_MSC_VER) || (__cplusplus >= 201103L) + std::new_handler h = std::get_new_handler(); + #else + std::new_handler h = std::set_new_handler(); + std::set_new_handler(h); + #endif + if (h==NULL) { + _mi_error_message(ENOMEM, "out of memory in 'new'"); + #if defined(_CPPUNWIND) || defined(__cpp_exceptions) // exceptions are not always enabled + if (!nothrow) { + throw std::bad_alloc(); + } + #else + MI_UNUSED(nothrow); + #endif + return false; + } + else { + h(); + return true; + } +} +#else +typedef void (*std_new_handler_t)(void); + +#if (defined(__GNUC__) || (defined(__clang__) && !defined(_MSC_VER))) // exclude clang-cl, see issue #631 +std_new_handler_t __attribute__((weak)) _ZSt15get_new_handlerv(void) { + return NULL; +} +static std_new_handler_t mi_get_new_handler(void) { + return _ZSt15get_new_handlerv(); +} +#else +// note: on windows we could dynamically link to `?get_new_handler@std@@YAP6AXXZXZ`. +static std_new_handler_t mi_get_new_handler() { + return NULL; +} +#endif + +static bool mi_try_new_handler(bool nothrow) { + std_new_handler_t h = mi_get_new_handler(); + if (h==NULL) { + _mi_error_message(ENOMEM, "out of memory in 'new'"); + if (!nothrow) { + abort(); // cannot throw in plain C, use abort + } + return false; + } + else { + h(); + return true; + } +} +#endif + +mi_decl_export mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) { + void* p = NULL; + while(p == NULL && mi_try_new_handler(nothrow)) { + p = mi_heap_malloc(heap,size); + } + return p; +} + +static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow) { + return mi_heap_try_new(mi_prim_get_default_heap(), size, nothrow); +} + + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) { + void* p = mi_heap_malloc(heap,size); + if mi_unlikely(p == NULL) return mi_heap_try_new(heap, size, false); + return p; +} + +mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) { + return mi_heap_alloc_new(mi_prim_get_default_heap(), size); +} + + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) { + size_t total; + if mi_unlikely(mi_count_size_overflow(count, size, &total)) { + mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc + return NULL; + } + else { + return mi_heap_alloc_new(heap,total); + } +} + +mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) { + return mi_heap_alloc_new_n(mi_prim_get_default_heap(), size, count); +} + + +mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept { + void* p = mi_malloc(size); + if mi_unlikely(p == NULL) return mi_try_new(size, true); + return p; +} + +mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) { + void* p; + do { + p = mi_malloc_aligned(size, alignment); + } + while(p == NULL && mi_try_new_handler(false)); + return p; +} + +mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept { + void* p; + do { + p = mi_malloc_aligned(size, alignment); + } + while(p == NULL && mi_try_new_handler(true)); + return p; +} + +mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) { + void* q; + do { + q = mi_realloc(p, newsize); + } while (q == NULL && mi_try_new_handler(false)); + return q; +} + +mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) { + size_t total; + if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) { + mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc + return NULL; + } + else { + return mi_new_realloc(p, total); + } +} + +// ------------------------------------------------------ +// ensure explicit external inline definitions are emitted! +// ------------------------------------------------------ + +#ifdef __cplusplus +void* _mi_externs[] = { + (void*)&_mi_page_malloc, + (void*)&_mi_heap_malloc_zero, + (void*)&_mi_heap_malloc_zero_ex, + (void*)&mi_malloc, + (void*)&mi_malloc_small, + (void*)&mi_zalloc_small, + (void*)&mi_heap_malloc, + (void*)&mi_heap_zalloc, + (void*)&mi_heap_malloc_small, + // (void*)&mi_heap_alloc_new, + // (void*)&mi_heap_alloc_new_n +}; +#endif diff --git a/yass/third_party/mimalloc/src/arena.c b/yass/third_party/mimalloc/src/arena.c new file mode 100644 index 0000000000..648ee844fe --- /dev/null +++ b/yass/third_party/mimalloc/src/arena.c @@ -0,0 +1,1108 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ---------------------------------------------------------------------------- +"Arenas" are fixed area's of OS memory from which we can allocate +large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB). +In contrast to the rest of mimalloc, the arenas are shared between +threads and need to be accessed using atomic operations. + +Arenas are used to for huge OS page (1GiB) reservations or for reserving +OS memory upfront which can be improve performance or is sometimes needed +on embedded devices. We can also employ this with WASI or `sbrk` systems +to reserve large arenas upfront and be able to reuse the memory more effectively. + +The arena allocation needs to be thread safe and we use an atomic bitmap to allocate. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" + +#include // memset +#include // ENOMEM + +#include "bitmap.h" // atomic bitmap + +/* ----------------------------------------------------------- + Arena allocation +----------------------------------------------------------- */ + +// Block info: bit 0 contains the `in_use` bit, the upper bits the +// size in count of arena blocks. +typedef uintptr_t mi_block_info_t; +#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 64MiB (must be at least MI_SEGMENT_ALIGN) +#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 32MiB +#define MI_MAX_ARENAS (112) // not more than 126 (since we use 7 bits in the memid and an arena index + 1) + +// A memory arena descriptor +typedef struct mi_arena_s { + mi_arena_id_t id; // arena id; 0 for non-specific + mi_memid_t memid; // memid of the memory area + _Atomic(uint8_t*) start; // the start of the memory area + size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`) + size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`) + size_t meta_size; // size of the arena structure itself (including its bitmaps) + mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation) + int numa_node; // associated NUMA node + bool exclusive; // only allow allocations if specifically for this arena + bool is_large; // memory area consists of large- or huge OS pages (always committed) + _Atomic(size_t) search_idx; // optimization to start the search for free blocks + _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`. + mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero? + mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted) + mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted) + mi_bitmap_field_t* blocks_abandoned; // blocks that start with an abandoned segment. (This crosses API's but it is convenient to have here) + mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`) + // do not add further fields here as the dirty, committed, purged, and abandoned bitmaps follow the inuse bitmap fields. +} mi_arena_t; + + +// The available arenas +static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS]; +static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0 + + +//static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept; + +/* ----------------------------------------------------------- + Arena id's + id = arena_index + 1 +----------------------------------------------------------- */ + +static size_t mi_arena_id_index(mi_arena_id_t id) { + return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1); +} + +static mi_arena_id_t mi_arena_id_create(size_t arena_index) { + mi_assert_internal(arena_index < MI_MAX_ARENAS); + return (int)arena_index + 1; +} + +mi_arena_id_t _mi_arena_id_none(void) { + return 0; +} + +static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) { + return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) || + (arena_id == req_arena_id)); +} + +bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) { + if (memid.memkind == MI_MEM_ARENA) { + return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id); + } + else { + return mi_arena_id_is_suitable(_mi_arena_id_none(), false, request_arena_id); + } +} + +bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) { + return (memid.memkind == MI_MEM_OS); +} + +/* ----------------------------------------------------------- + Arena allocations get a (currently) 16-bit memory id where the + lower 8 bits are the arena id, and the upper bits the block index. +----------------------------------------------------------- */ + +static size_t mi_block_count_of_size(size_t size) { + return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE); +} + +static size_t mi_arena_block_size(size_t bcount) { + return (bcount * MI_ARENA_BLOCK_SIZE); +} + +static size_t mi_arena_size(mi_arena_t* arena) { + return mi_arena_block_size(arena->block_count); +} + +static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) { + mi_memid_t memid = _mi_memid_create(MI_MEM_ARENA); + memid.mem.arena.id = id; + memid.mem.arena.block_index = bitmap_index; + memid.mem.arena.is_exclusive = is_exclusive; + return memid; +} + +static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) { + mi_assert_internal(memid.memkind == MI_MEM_ARENA); + *arena_index = mi_arena_id_index(memid.mem.arena.id); + *bitmap_index = memid.mem.arena.block_index; + return memid.mem.arena.is_exclusive; +} + + + +/* ----------------------------------------------------------- + Special static area for mimalloc internal structures + to avoid OS calls (for example, for the arena metadata) +----------------------------------------------------------- */ + +#define MI_ARENA_STATIC_MAX (MI_INTPTR_SIZE*MI_KiB) // 8 KiB on 64-bit + +static mi_decl_cache_align uint8_t mi_arena_static[MI_ARENA_STATIC_MAX]; // must be cache aligned, see issue #895 +static mi_decl_cache_align _Atomic(size_t) mi_arena_static_top; + +static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) { + *memid = _mi_memid_none(); + if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL; + const size_t toplow = mi_atomic_load_relaxed(&mi_arena_static_top); + if ((toplow + size) > MI_ARENA_STATIC_MAX) return NULL; + + // try to claim space + if (alignment < MI_MAX_ALIGN_SIZE) { alignment = MI_MAX_ALIGN_SIZE; } + const size_t oversize = size + alignment - 1; + if (toplow + oversize > MI_ARENA_STATIC_MAX) return NULL; + const size_t oldtop = mi_atomic_add_acq_rel(&mi_arena_static_top, oversize); + size_t top = oldtop + oversize; + if (top > MI_ARENA_STATIC_MAX) { + // try to roll back, ok if this fails + mi_atomic_cas_strong_acq_rel(&mi_arena_static_top, &top, oldtop); + return NULL; + } + + // success + *memid = _mi_memid_create(MI_MEM_STATIC); + memid->initially_zero = true; + const size_t start = _mi_align_up(oldtop, alignment); + uint8_t* const p = &mi_arena_static[start]; + _mi_memzero_aligned(p, size); + return p; +} + +static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) { + *memid = _mi_memid_none(); + + // try static + void* p = mi_arena_static_zalloc(size, MI_MAX_ALIGN_SIZE, memid); + if (p != NULL) return p; + + // or fall back to the OS + p = _mi_os_alloc(size, memid, stats); + if (p == NULL) return NULL; + + // zero the OS memory if needed + if (!memid->initially_zero) { + _mi_memzero_aligned(p, size); + memid->initially_zero = true; + } + return p; +} + +static void mi_arena_meta_free(void* p, mi_memid_t memid, size_t size, mi_stats_t* stats) { + if (mi_memkind_is_os(memid.memkind)) { + _mi_os_free(p, size, memid, stats); + } + else { + mi_assert(memid.memkind == MI_MEM_STATIC); + } +} + +static void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) { + return (arena->start + mi_arena_block_size(mi_bitmap_index_bit(bindex))); +} + + +/* ----------------------------------------------------------- + Thread safe allocation in an arena +----------------------------------------------------------- */ + +// claim the `blocks_inuse` bits +static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) +{ + size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter + if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx, stats)) { + mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around + return true; + }; + return false; +} + + +/* ----------------------------------------------------------- + Arena Allocation +----------------------------------------------------------- */ + +static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount, + bool commit, mi_memid_t* memid, mi_os_tld_t* tld) +{ + MI_UNUSED(arena_index); + mi_assert_internal(mi_arena_id_index(arena->id) == arena_index); + + mi_bitmap_index_t bitmap_index; + if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index, tld->stats)) return NULL; + + // claimed it! + void* p = mi_arena_block_start(arena, bitmap_index); + *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index); + memid->is_pinned = arena->memid.is_pinned; + + // none of the claimed blocks should be scheduled for a decommit + if (arena->blocks_purge != NULL) { + // this is thread safe as a potential purge only decommits parts that are not yet claimed as used (in `blocks_inuse`). + _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, needed_bcount, bitmap_index); + } + + // set the dirty bits (todo: no need for an atomic op here?) + if (arena->memid.initially_zero && arena->blocks_dirty != NULL) { + memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL); + } + + // set commit state + if (arena->blocks_committed == NULL) { + // always committed + memid->initially_committed = true; + } + else if (commit) { + // commit requested, but the range may not be committed as a whole: ensure it is committed now + memid->initially_committed = true; + bool any_uncommitted; + _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted); + if (any_uncommitted) { + bool commit_zero = false; + if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) { + memid->initially_committed = false; + } + else { + if (commit_zero) { memid->initially_zero = true; } + } + } + } + else { + // no need to commit, but check if already fully committed + memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index); + } + + return p; +} + +// allocate in a speficic arena +static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment, + bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) +{ + MI_UNUSED_RELEASE(alignment); + mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); + const size_t bcount = mi_block_count_of_size(size); + const size_t arena_index = mi_arena_id_index(arena_id); + mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count)); + mi_assert_internal(size <= mi_arena_block_size(bcount)); + + // Check arena suitability + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]); + if (arena == NULL) return NULL; + if (!allow_large && arena->is_large) return NULL; + if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL; + if (req_arena_id == _mi_arena_id_none()) { // in not specific, check numa affinity + const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node); + if (match_numa_node) { if (!numa_suitable) return NULL; } + else { if (numa_suitable) return NULL; } + } + + // try to allocate + void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld); + mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment)); + return p; +} + + +// allocate from an arena with fallback to the OS +static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment, + bool commit, bool allow_large, + mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) +{ + MI_UNUSED(alignment); + mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); + const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); + if mi_likely(max_arena == 0) return NULL; + + if (req_arena_id != _mi_arena_id_none()) { + // try a specific arena if requested + if (mi_arena_id_index(req_arena_id) < max_arena) { + void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + if (p != NULL) return p; + } + } + else { + // try numa affine allocation + for (size_t i = 0; i < max_arena; i++) { + void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + if (p != NULL) return p; + } + + // try from another numa node instead.. + if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already + for (size_t i = 0; i < max_arena; i++) { + void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + if (p != NULL) return p; + } + } + } + return NULL; +} + +// try to reserve a fresh arena space +static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id) +{ + if (_mi_preloading()) return false; // use OS only while pre loading + if (req_arena_id != _mi_arena_id_none()) return false; + + const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count); + if (arena_count > (MI_MAX_ARENAS - 4)) return false; + + size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve); + if (arena_reserve == 0) return false; + + if (!_mi_os_has_virtual_reserve()) { + arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for WASM for example) + } + arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE); + if (arena_count >= 8 && arena_count <= 128) { + arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially + } + if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size + + // commit eagerly? + bool arena_commit = false; + if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); } + else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; } + + return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id) == 0); +} + + +void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, + mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) +{ + mi_assert_internal(memid != NULL && tld != NULL); + mi_assert_internal(size > 0); + *memid = _mi_memid_none(); + + const int numa_node = _mi_os_numa_node(tld); // current numa node + + // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data) + if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) || req_arena_id != _mi_arena_id_none()) { // is arena allocation allowed? + if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) { + void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + if (p != NULL) return p; + + // otherwise, try to first eagerly reserve a new arena + if (req_arena_id == _mi_arena_id_none()) { + mi_arena_id_t arena_id = 0; + if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) { + // and try allocate in there + mi_assert_internal(req_arena_id == _mi_arena_id_none()); + p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + if (p != NULL) return p; + } + } + } + } + + // if we cannot use OS allocation, return NULL + if (mi_option_is_enabled(mi_option_disallow_os_alloc) || req_arena_id != _mi_arena_id_none()) { + errno = ENOMEM; + return NULL; + } + + // finally, fall back to the OS + if (align_offset > 0) { + return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats); + } + else { + return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats); + } +} + +void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) +{ + return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld); +} + + +void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) { + if (size != NULL) *size = 0; + size_t arena_index = mi_arena_id_index(arena_id); + if (arena_index >= MI_MAX_ARENAS) return NULL; + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]); + if (arena == NULL) return NULL; + if (size != NULL) { *size = mi_arena_block_size(arena->block_count); } + return arena->start; +} + + +/* ----------------------------------------------------------- + Arena purge +----------------------------------------------------------- */ + +static long mi_arena_purge_delay(void) { + // <0 = no purging allowed, 0=immediate purging, >0=milli-second delay + return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult)); +} + +// reset or decommit in an arena and update the committed/decommit bitmaps +// assumes we own the area (i.e. blocks_in_use is claimed by us) +static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) { + mi_assert_internal(arena->blocks_committed != NULL); + mi_assert_internal(arena->blocks_purge != NULL); + mi_assert_internal(!arena->memid.is_pinned); + const size_t size = mi_arena_block_size(blocks); + void* const p = mi_arena_block_start(arena, bitmap_idx); + bool needs_recommit; + if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) { + // all blocks are committed, we can purge freely + needs_recommit = _mi_os_purge(p, size, stats); + } + else { + // some blocks are not committed -- this can happen when a partially committed block is freed + // in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge + // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory), + // and also undo the decommit stats (as it was already adjusted) + mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits)); + needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats); + if (needs_recommit) { _mi_stat_increase(&_mi_stats_main.committed, size); } + } + + // clear the purged blocks + _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx); + // update committed bitmap + if (needs_recommit) { + _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx); + } +} + +// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls. +// Note: assumes we (still) own the area as we may purge immediately +static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) { + mi_assert_internal(arena->blocks_purge != NULL); + const long delay = mi_arena_purge_delay(); + if (delay < 0) return; // is purging allowed at all? + + if (_mi_preloading() || delay == 0) { + // decommit directly + mi_arena_purge(arena, bitmap_idx, blocks, stats); + } + else { + // schedule decommit + mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire); + if (expire != 0) { + mi_atomic_addi64_acq_rel(&arena->purge_expire, (mi_msecs_t)(delay/10)); // add smallish extra delay + } + else { + mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay); + } + _mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL); + } +} + +// purge a range of blocks +// return true if the full range was purged. +// assumes we own the area (i.e. blocks_in_use is claimed by us) +static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) { + const size_t endidx = startidx + bitlen; + size_t bitidx = startidx; + bool all_purged = false; + while (bitidx < endidx) { + // count consequetive ones in the purge mask + size_t count = 0; + while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) != 0) { + count++; + } + if (count > 0) { + // found range to be purged + const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx); + mi_arena_purge(arena, range_idx, count, stats); + if (count == bitlen) { + all_purged = true; + } + } + bitidx += (count+1); // +1 to skip the zero bit (or end) + } + return all_purged; +} + +// returns true if anything was purged +static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats) +{ + if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false; + mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire); + if (expire == 0) return false; + if (!force && expire > now) return false; + + // reset expire (if not already set concurrently) + mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, (mi_msecs_t)0); + + // potential purges scheduled, walk through the bitmap + bool any_purged = false; + bool full_purge = true; + for (size_t i = 0; i < arena->field_count; i++) { + size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]); + if (purge != 0) { + size_t bitidx = 0; + while (bitidx < MI_BITMAP_FIELD_BITS) { + // find consequetive range of ones in the purge mask + size_t bitlen = 0; + while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) { + bitlen++; + } + // try to claim the longest range of corresponding in_use bits + const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx); + while( bitlen > 0 ) { + if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) { + break; + } + bitlen--; + } + // actual claimed bits at `in_use` + if (bitlen > 0) { + // read purge again now that we have the in_use bits + purge = mi_atomic_load_acquire(&arena->blocks_purge[i]); + if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) { + full_purge = false; + } + any_purged = true; + // release the claimed `in_use` bits again + _mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index); + } + bitidx += (bitlen+1); // +1 to skip the zero (or end) + } // while bitidx + } // purge != 0 + } + // if not fully purged, make sure to purge again in the future + if (!full_purge) { + const long delay = mi_arena_purge_delay(); + mi_msecs_t expected = 0; + mi_atomic_casi64_strong_acq_rel(&arena->purge_expire,&expected,_mi_clock_now() + delay); + } + return any_purged; +} + +static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) { + if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled + + const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count); + if (max_arena == 0) return; + + // allow only one thread to purge at a time + static mi_atomic_guard_t purge_guard; + mi_atomic_guard(&purge_guard) + { + mi_msecs_t now = _mi_clock_now(); + size_t max_purge_count = (visit_all ? max_arena : 1); + for (size_t i = 0; i < max_arena; i++) { + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); + if (arena != NULL) { + if (mi_arena_try_purge(arena, now, force, stats)) { + if (max_purge_count <= 1) break; + max_purge_count--; + } + } + } + } +} + + +/* ----------------------------------------------------------- + Arena free +----------------------------------------------------------- */ + +void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) { + mi_assert_internal(size > 0 && stats != NULL); + mi_assert_internal(committed_size <= size); + if (p==NULL) return; + if (size==0) return; + const bool all_committed = (committed_size == size); + + if (mi_memkind_is_os(memid.memkind)) { + // was a direct OS allocation, pass through + if (!all_committed && committed_size > 0) { + // if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size) + _mi_stat_decrease(&_mi_stats_main.committed, committed_size); + } + _mi_os_free(p, size, memid, stats); + } + else if (memid.memkind == MI_MEM_ARENA) { + // allocated in an arena + size_t arena_idx; + size_t bitmap_idx; + mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx); + mi_assert_internal(arena_idx < MI_MAX_ARENAS); + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]); + mi_assert_internal(arena != NULL); + const size_t blocks = mi_block_count_of_size(size); + + // checks + if (arena == NULL) { + _mi_error_message(EINVAL, "trying to free from an invalid arena: %p, size %zu, memid: 0x%zx\n", p, size, memid); + return; + } + mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx)); + if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) { + _mi_error_message(EINVAL, "trying to free from an invalid arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid); + return; + } + + // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.) + mi_track_mem_undefined(p,size); + + // potentially decommit + if (arena->memid.is_pinned || arena->blocks_committed == NULL) { + mi_assert_internal(all_committed); + } + else { + mi_assert_internal(arena->blocks_committed != NULL); + mi_assert_internal(arena->blocks_purge != NULL); + + if (!all_committed) { + // mark the entire range as no longer committed (so we recommit the full range when re-using) + _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx); + mi_track_mem_noaccess(p,size); + if (committed_size > 0) { + // if partially committed, adjust the committed stats (is it will be recommitted when re-using) + // in the delayed purge, we now need to not count a decommit if the range is not marked as committed. + _mi_stat_decrease(&_mi_stats_main.committed, committed_size); + } + // note: if not all committed, it may be that the purge will reset/decommit the entire range + // that contains already decommitted parts. Since purge consistently uses reset or decommit that + // works (as we should never reset decommitted parts). + } + // (delay) purge the entire range + mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats); + } + + // and make it available to others again + bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx); + if (!all_inuse) { + _mi_error_message(EAGAIN, "trying to free an already freed arena block: %p, size %zu\n", p, size); + return; + }; + } + else { + // arena was none, external, or static; nothing to do + mi_assert_internal(memid.memkind < MI_MEM_OS); + } + + // purge expired decommits + mi_arenas_try_purge(false, false, stats); +} + +// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` +// for dynamic libraries that are unloaded and need to release all their allocated memory. +static void mi_arenas_unsafe_destroy(void) { + const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); + size_t new_max_arena = 0; + for (size_t i = 0; i < max_arena; i++) { + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); + if (arena != NULL) { + if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) { + mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL); + _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main); + } + else { + new_max_arena = i; + } + mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size, &_mi_stats_main); + } + } + + // try to lower the max arena. + size_t expected = max_arena; + mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena); +} + +// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired +void _mi_arenas_collect(bool force_purge, mi_stats_t* stats) { + mi_arenas_try_purge(force_purge, force_purge /* visit all? */, stats); +} + +// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` +// for dynamic libraries that are unloaded and need to release all their allocated memory. +void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) { + mi_arenas_unsafe_destroy(); + _mi_arenas_collect(true /* force purge */, stats); // purge non-owned arenas +} + +// Is a pointer inside any of our arenas? +bool _mi_arena_contains(const void* p) { + const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); + for (size_t i = 0; i < max_arena; i++) { + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); + if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) { + return true; + } + } + return false; +} + +/* ----------------------------------------------------------- + Abandoned blocks/segments. + This is used to atomically abandon/reclaim segments + (and crosses the arena API but it is convenient to have here). + Abandoned segments still have live blocks; they get reclaimed + when a thread frees a block in it, or when a thread needs a fresh + segment; these threads scan the abandoned segments through + the arena bitmaps. +----------------------------------------------------------- */ + +// Maintain a count of all abandoned segments +static mi_decl_cache_align _Atomic(size_t)abandoned_count; + +size_t _mi_arena_segment_abandoned_count(void) { + return mi_atomic_load_relaxed(&abandoned_count); +} + +// reclaim a specific abandoned segment; `true` on success. +// sets the thread_id. +bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment ) +{ + if (segment->memid.memkind != MI_MEM_ARENA) { + // not in an arena, consider it un-abandoned now. + // but we need to still claim it atomically -- we use the thread_id for that. + size_t expected = 0; + if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected, _mi_thread_id())) { + mi_atomic_decrement_relaxed(&abandoned_count); + return true; + } + else { + return false; + } + } + // arena segment: use the blocks_abandoned bitmap. + size_t arena_idx; + size_t bitmap_idx; + mi_arena_memid_indices(segment->memid, &arena_idx, &bitmap_idx); + mi_assert_internal(arena_idx < MI_MAX_ARENAS); + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]); + mi_assert_internal(arena != NULL); + bool was_marked = _mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx); + if (was_marked) { + mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0); + mi_atomic_decrement_relaxed(&abandoned_count); + mi_atomic_store_release(&segment->thread_id, _mi_thread_id()); + } + // mi_assert_internal(was_marked); + mi_assert_internal(!was_marked || _mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); + //mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx)); + return was_marked; +} + +// mark a specific segment as abandoned +// clears the thread_id. +void _mi_arena_segment_mark_abandoned(mi_segment_t* segment) +{ + mi_atomic_store_release(&segment->thread_id, 0); + mi_assert_internal(segment->used == segment->abandoned); + if (segment->memid.memkind != MI_MEM_ARENA) { + // not in an arena; count it as abandoned and return + mi_atomic_increment_relaxed(&abandoned_count); + return; + } + size_t arena_idx; + size_t bitmap_idx; + mi_arena_memid_indices(segment->memid, &arena_idx, &bitmap_idx); + mi_assert_internal(arena_idx < MI_MAX_ARENAS); + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]); + mi_assert_internal(arena != NULL); + const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL); + if (was_unmarked) { mi_atomic_increment_relaxed(&abandoned_count); } + mi_assert_internal(was_unmarked); + mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); +} + +// start a cursor at a randomized arena +void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current) { + const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); + current->start = (max_arena == 0 ? 0 : (mi_arena_id_t)( _mi_heap_random_next(heap) % max_arena)); + current->count = 0; + current->bitmap_idx = 0; +} + +// reclaim abandoned segments +// this does not set the thread id (so it appears as still abandoned) +mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous ) +{ + const int max_arena = (int)mi_atomic_load_relaxed(&mi_arena_count); + if (max_arena <= 0 || mi_atomic_load_relaxed(&abandoned_count) == 0) return NULL; + + int count = previous->count; + size_t field_idx = mi_bitmap_index_field(previous->bitmap_idx); + size_t bit_idx = mi_bitmap_index_bit_in_field(previous->bitmap_idx) + 1; + // visit arena's (from previous) + for (; count < max_arena; count++, field_idx = 0, bit_idx = 0) { + mi_arena_id_t arena_idx = previous->start + count; + if (arena_idx >= max_arena) { arena_idx = arena_idx % max_arena; } // wrap around + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]); + if (arena != NULL) { + // visit the abandoned fields (starting at previous_idx) + for ( ; field_idx < arena->field_count; field_idx++, bit_idx = 0) { + size_t field = mi_atomic_load_relaxed(&arena->blocks_abandoned[field_idx]); + if mi_unlikely(field != 0) { // skip zero fields quickly + // visit each set bit in the field (todo: maybe use `ctz` here?) + for ( ; bit_idx < MI_BITMAP_FIELD_BITS; bit_idx++) { + // pre-check if the bit is set + size_t mask = ((size_t)1 << bit_idx); + if mi_unlikely((field & mask) == mask) { + mi_bitmap_index_t bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx); + // try to reclaim it atomically + if (_mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx)) { + mi_atomic_decrement_relaxed(&abandoned_count); + previous->bitmap_idx = bitmap_idx; + previous->count = count; + mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); + mi_segment_t* segment = (mi_segment_t*)mi_arena_block_start(arena, bitmap_idx); + mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0); + //mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx)); + return segment; + } + } + } + } + } + } + } + // no more found + previous->bitmap_idx = 0; + previous->count = 0; + return NULL; +} + + +/* ----------------------------------------------------------- + Add an arena. +----------------------------------------------------------- */ + +static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id, mi_stats_t* stats) { + mi_assert_internal(arena != NULL); + mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0); + mi_assert_internal(arena->block_count > 0); + if (arena_id != NULL) { *arena_id = -1; } + + size_t i = mi_atomic_increment_acq_rel(&mi_arena_count); + if (i >= MI_MAX_ARENAS) { + mi_atomic_decrement_acq_rel(&mi_arena_count); + return false; + } + _mi_stat_counter_increase(&stats->arena_count,1); + arena->id = mi_arena_id_create(i); + mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena); + if (arena_id != NULL) { *arena_id = arena->id; } + return true; +} + +static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept +{ + if (arena_id != NULL) *arena_id = _mi_arena_id_none(); + if (size < MI_ARENA_BLOCK_SIZE) return false; + + if (is_large) { + mi_assert_internal(memid.initially_committed && memid.is_pinned); + } + + const size_t bcount = size / MI_ARENA_BLOCK_SIZE; + const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS); + const size_t bitmaps = (memid.is_pinned ? 3 : 5); + const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t)); + mi_memid_t meta_memid; + mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS? + if (arena == NULL) return false; + + // already zero'd due to zalloc + // _mi_memzero(arena, asize); + arena->id = _mi_arena_id_none(); + arena->memid = memid; + arena->exclusive = exclusive; + arena->meta_size = asize; + arena->meta_memid = meta_memid; + arena->block_count = bcount; + arena->field_count = fields; + arena->start = (uint8_t*)start; + arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1) + arena->is_large = is_large; + arena->purge_expire = 0; + arena->search_idx = 0; + // consequetive bitmaps + arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap + arena->blocks_abandoned = &arena->blocks_inuse[2 * fields]; // just after dirty bitmap + arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after abandoned bitmap + arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[4*fields]); // just after committed bitmap + // initialize committed bitmap? + if (arena->blocks_committed != NULL && arena->memid.initially_committed) { + memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning + } + + // and claim leftover blocks if needed (so we never allocate there) + ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount; + mi_assert_internal(post >= 0); + if (post > 0) { + // don't use leftover bits at the end + mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post); + _mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL); + } + return mi_arena_add(arena, arena_id, &_mi_stats_main); + +} + +bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept { + mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL); + memid.initially_committed = is_committed; + memid.initially_zero = is_zero; + memid.is_pinned = is_large; + return mi_manage_os_memory_ex2(start,size,is_large,numa_node,exclusive,memid, arena_id); +} + +// Reserve a range of regular OS memory +int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept { + if (arena_id != NULL) *arena_id = _mi_arena_id_none(); + size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block + mi_memid_t memid; + void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main); + if (start == NULL) return ENOMEM; + const bool is_large = memid.is_pinned; // todo: use separate is_large field? + if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) { + _mi_os_free_ex(start, size, commit, memid, &_mi_stats_main); + _mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024)); + return ENOMEM; + } + _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size, 1024), is_large ? " (in large os pages)" : ""); + return 0; +} + + +// Manage a range of regular OS memory +bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept { + return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL); +} + +// Reserve a range of regular OS memory +int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept { + return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL); +} + + +/* ----------------------------------------------------------- + Debugging +----------------------------------------------------------- */ + +static size_t mi_debug_show_bitmap(const char* prefix, const char* header, size_t block_count, mi_bitmap_field_t* fields, size_t field_count ) { + _mi_verbose_message("%s%s:\n", prefix, header); + size_t bcount = 0; + size_t inuse_count = 0; + for (size_t i = 0; i < field_count; i++) { + char buf[MI_BITMAP_FIELD_BITS + 1]; + uintptr_t field = mi_atomic_load_relaxed(&fields[i]); + for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++, bcount++) { + if (bcount < block_count) { + bool inuse = ((((uintptr_t)1 << bit) & field) != 0); + if (inuse) inuse_count++; + buf[bit] = (inuse ? 'x' : '.'); + } + else { + buf[bit] = ' '; + } + } + buf[MI_BITMAP_FIELD_BITS] = 0; + _mi_verbose_message("%s %s\n", prefix, buf); + } + _mi_verbose_message("%s total ('x'): %zu\n", prefix, inuse_count); + return inuse_count; +} + +void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge) mi_attr_noexcept { + size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count); + size_t inuse_total = 0; + size_t abandoned_total = 0; + size_t purge_total = 0; + for (size_t i = 0; i < max_arenas; i++) { + mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]); + if (arena == NULL) break; + _mi_verbose_message("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n", i, arena->block_count, MI_ARENA_BLOCK_SIZE / MI_MiB, arena->field_count, (arena->memid.is_pinned ? ", pinned" : "")); + if (show_inuse) { + inuse_total += mi_debug_show_bitmap(" ", "inuse blocks", arena->block_count, arena->blocks_inuse, arena->field_count); + } + if (arena->blocks_committed != NULL) { + mi_debug_show_bitmap(" ", "committed blocks", arena->block_count, arena->blocks_committed, arena->field_count); + } + if (show_abandoned) { + abandoned_total += mi_debug_show_bitmap(" ", "abandoned blocks", arena->block_count, arena->blocks_abandoned, arena->field_count); + } + if (show_purge && arena->blocks_purge != NULL) { + purge_total += mi_debug_show_bitmap(" ", "purgeable blocks", arena->block_count, arena->blocks_purge, arena->field_count); + } + } + if (show_inuse) _mi_verbose_message("total inuse blocks : %zu\n", inuse_total); + if (show_abandoned) _mi_verbose_message("total abandoned blocks: %zu\n", abandoned_total); + if (show_purge) _mi_verbose_message("total purgeable blocks: %zu\n", purge_total); +} + + +/* ----------------------------------------------------------- + Reserve a huge page arena. +----------------------------------------------------------- */ +// reserve at a specific numa node +int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept { + if (arena_id != NULL) *arena_id = -1; + if (pages==0) return 0; + if (numa_node < -1) numa_node = -1; + if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count(); + size_t hsize = 0; + size_t pages_reserved = 0; + mi_memid_t memid; + void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize, &memid); + if (p==NULL || pages_reserved==0) { + _mi_warning_message("failed to reserve %zu GiB huge pages\n", pages); + return ENOMEM; + } + _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages); + + if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) { + _mi_os_free(p, hsize, memid, &_mi_stats_main); + return ENOMEM; + } + return 0; +} + +int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept { + return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL); +} + +// reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected) +int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept { + if (pages == 0) return 0; + + // pages per numa node + size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count()); + if (numa_count <= 0) numa_count = 1; + const size_t pages_per = pages / numa_count; + const size_t pages_mod = pages % numa_count; + const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50); + + // reserve evenly among numa nodes + for (size_t numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) { + size_t node_pages = pages_per; // can be 0 + if (numa_node < pages_mod) node_pages++; + int err = mi_reserve_huge_os_pages_at(node_pages, (int)numa_node, timeout_per); + if (err) return err; + if (pages < node_pages) { + pages = 0; + } + else { + pages -= node_pages; + } + } + + return 0; +} + +int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept { + MI_UNUSED(max_secs); + _mi_warning_message("mi_reserve_huge_os_pages is deprecated: use mi_reserve_huge_os_pages_interleave/at instead\n"); + if (pages_reserved != NULL) *pages_reserved = 0; + int err = mi_reserve_huge_os_pages_interleave(pages, 0, (size_t)(max_secs * 1000.0)); + if (err==0 && pages_reserved!=NULL) *pages_reserved = pages; + return err; +} + diff --git a/yass/third_party/mimalloc/src/bitmap.c b/yass/third_party/mimalloc/src/bitmap.c new file mode 100644 index 0000000000..4b6be66bcd --- /dev/null +++ b/yass/third_party/mimalloc/src/bitmap.c @@ -0,0 +1,436 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2023 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ---------------------------------------------------------------------------- +Concurrent bitmap that can set/reset sequences of bits atomically, +represented as an array of fields where each field is a machine word (`size_t`) + +There are two api's; the standard one cannot have sequences that cross +between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS). + +The `_across` postfixed functions do allow sequences that can cross over +between the fields. (This is used in arena allocation) +---------------------------------------------------------------------------- */ + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "bitmap.h" + +/* ----------------------------------------------------------- + Bitmap definition +----------------------------------------------------------- */ + +// The bit mask for a given number of blocks at a specified bit index. +static inline size_t mi_bitmap_mask_(size_t count, size_t bitidx) { + mi_assert_internal(count + bitidx <= MI_BITMAP_FIELD_BITS); + mi_assert_internal(count > 0); + if (count >= MI_BITMAP_FIELD_BITS) return MI_BITMAP_FIELD_FULL; + if (count == 0) return 0; + return ((((size_t)1 << count) - 1) << bitidx); +} + + +/* ----------------------------------------------------------- + Claim a bit sequence atomically +----------------------------------------------------------- */ + +// Try to atomically claim a sequence of `count` bits in a single +// field at `idx` in `bitmap`. Returns `true` on success. +inline bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx) +{ + mi_assert_internal(bitmap_idx != NULL); + mi_assert_internal(count <= MI_BITMAP_FIELD_BITS); + mi_assert_internal(count > 0); + mi_bitmap_field_t* field = &bitmap[idx]; + size_t map = mi_atomic_load_relaxed(field); + if (map==MI_BITMAP_FIELD_FULL) return false; // short cut + + // search for 0-bit sequence of length count + const size_t mask = mi_bitmap_mask_(count, 0); + const size_t bitidx_max = MI_BITMAP_FIELD_BITS - count; + +#ifdef MI_HAVE_FAST_BITSCAN + size_t bitidx = mi_ctz(~map); // quickly find the first zero bit if possible +#else + size_t bitidx = 0; // otherwise start at 0 +#endif + size_t m = (mask << bitidx); // invariant: m == mask shifted by bitidx + + // scan linearly for a free range of zero bits + while (bitidx <= bitidx_max) { + const size_t mapm = (map & m); + if (mapm == 0) { // are the mask bits free at bitidx? + mi_assert_internal((m >> bitidx) == mask); // no overflow? + const size_t newmap = (map | m); + mi_assert_internal((newmap^map) >> bitidx == mask); + if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { // TODO: use weak cas here? + // no success, another thread claimed concurrently.. keep going (with updated `map`) + continue; + } + else { + // success, we claimed the bits! + *bitmap_idx = mi_bitmap_index_create(idx, bitidx); + return true; + } + } + else { + // on to the next bit range +#ifdef MI_HAVE_FAST_BITSCAN + mi_assert_internal(mapm != 0); + const size_t shift = (count == 1 ? 1 : (MI_INTPTR_BITS - mi_clz(mapm) - bitidx)); + mi_assert_internal(shift > 0 && shift <= count); +#else + const size_t shift = 1; +#endif + bitidx += shift; + m <<= shift; + } + } + // no bits found + return false; +} + +// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success. +// Starts at idx, and wraps around to search in all `bitmap_fields` fields. +// `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields. +bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) { + size_t idx = start_field_idx; + for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) { + if (idx >= bitmap_fields) { idx = 0; } // wrap + if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) { + return true; + } + } + return false; +} + +// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled +bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, + const size_t start_field_idx, const size_t count, + mi_bitmap_pred_fun_t pred_fun, void* pred_arg, + mi_bitmap_index_t* bitmap_idx) { + size_t idx = start_field_idx; + for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) { + if (idx >= bitmap_fields) idx = 0; // wrap + if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) { + if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) { + return true; + } + // predicate returned false, unclaim and look further + _mi_bitmap_unclaim(bitmap, bitmap_fields, count, *bitmap_idx); + } + } + return false; +} + +// Set `count` bits at `bitmap_idx` to 0 atomically +// Returns `true` if all `count` bits were 1 previously. +bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + const size_t idx = mi_bitmap_index_field(bitmap_idx); + const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); + const size_t mask = mi_bitmap_mask_(count, bitidx); + mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); + // mi_assert_internal((bitmap[idx] & mask) == mask); + const size_t prev = mi_atomic_and_acq_rel(&bitmap[idx], ~mask); + return ((prev & mask) == mask); +} + + +// Set `count` bits at `bitmap_idx` to 1 atomically +// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. +bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero) { + const size_t idx = mi_bitmap_index_field(bitmap_idx); + const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); + const size_t mask = mi_bitmap_mask_(count, bitidx); + mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); + //mi_assert_internal(any_zero != NULL || (bitmap[idx] & mask) == 0); + size_t prev = mi_atomic_or_acq_rel(&bitmap[idx], mask); + if (any_zero != NULL) { *any_zero = ((prev & mask) != mask); } + return ((prev & mask) == 0); +} + +// Returns `true` if all `count` bits were 1. `any_ones` is `true` if there was at least one bit set to one. +static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_ones) { + const size_t idx = mi_bitmap_index_field(bitmap_idx); + const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); + const size_t mask = mi_bitmap_mask_(count, bitidx); + mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); + const size_t field = mi_atomic_load_relaxed(&bitmap[idx]); + if (any_ones != NULL) { *any_ones = ((field & mask) != 0); } + return ((field & mask) == mask); +} + +// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically. +// Returns `true` if successful when all previous `count` bits were 0. +bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + const size_t idx = mi_bitmap_index_field(bitmap_idx); + const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); + const size_t mask = mi_bitmap_mask_(count, bitidx); + mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); + size_t expected = mi_atomic_load_relaxed(&bitmap[idx]); + do { + if ((expected & mask) != 0) return false; + } + while (!mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, expected | mask)); + mi_assert_internal((expected & mask) == 0); + return true; +} + + +bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + return mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, NULL); +} + +bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + bool any_ones; + mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, &any_ones); + return any_ones; +} + + +//-------------------------------------------------------------------------- +// the `_across` functions work on bitmaps where sequences can cross over +// between the fields. This is used in arena allocation +//-------------------------------------------------------------------------- + +// Try to atomically claim a sequence of `count` bits starting from the field +// at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success. +// Only needs to consider crossing into the next fields (see `mi_bitmap_try_find_from_claim_across`) +static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) +{ + mi_assert_internal(bitmap_idx != NULL); + + // check initial trailing zeros + mi_bitmap_field_t* field = &bitmap[idx]; + size_t map = mi_atomic_load_relaxed(field); + const size_t initial = mi_clz(map); // count of initial zeros starting at idx + mi_assert_internal(initial <= MI_BITMAP_FIELD_BITS); + if (initial == 0) return false; + if (initial >= count) return _mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx); // no need to cross fields (this case won't happen for us) + if (_mi_divide_up(count - initial, MI_BITMAP_FIELD_BITS) >= (bitmap_fields - idx)) return false; // not enough entries + + // scan ahead + size_t found = initial; + size_t mask = 0; // mask bits for the final field + while(found < count) { + field++; + map = mi_atomic_load_relaxed(field); + const size_t mask_bits = (found + MI_BITMAP_FIELD_BITS <= count ? MI_BITMAP_FIELD_BITS : (count - found)); + mi_assert_internal(mask_bits > 0 && mask_bits <= MI_BITMAP_FIELD_BITS); + mask = mi_bitmap_mask_(mask_bits, 0); + if ((map & mask) != 0) return false; // some part is already claimed + found += mask_bits; + } + mi_assert_internal(field < &bitmap[bitmap_fields]); + + // we found a range of contiguous zeros up to the final field; mask contains mask in the final field + // now try to claim the range atomically + mi_bitmap_field_t* const final_field = field; + const size_t final_mask = mask; + mi_bitmap_field_t* const initial_field = &bitmap[idx]; + const size_t initial_idx = MI_BITMAP_FIELD_BITS - initial; + const size_t initial_mask = mi_bitmap_mask_(initial, initial_idx); + + // initial field + size_t newmap; + field = initial_field; + map = mi_atomic_load_relaxed(field); + do { + newmap = (map | initial_mask); + if ((map & initial_mask) != 0) { goto rollback; }; + } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); + + // intermediate fields + while (++field < final_field) { + newmap = MI_BITMAP_FIELD_FULL; + map = 0; + if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { goto rollback; } + } + + // final field + mi_assert_internal(field == final_field); + map = mi_atomic_load_relaxed(field); + do { + newmap = (map | final_mask); + if ((map & final_mask) != 0) { goto rollback; } + } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); + + // claimed! + mi_stat_counter_increase(stats->arena_crossover_count,1); + *bitmap_idx = mi_bitmap_index_create(idx, initial_idx); + return true; + +rollback: + // roll back intermediate fields + // (we just failed to claim `field` so decrement first) + while (--field > initial_field) { + newmap = 0; + map = MI_BITMAP_FIELD_FULL; + mi_assert_internal(mi_atomic_load_relaxed(field) == map); + mi_atomic_store_release(field, newmap); + } + if (field == initial_field) { // (if we failed on the initial field, `field + 1 == initial_field`) + map = mi_atomic_load_relaxed(field); + do { + mi_assert_internal((map & initial_mask) == initial_mask); + newmap = (map & ~initial_mask); + } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); + } + mi_stat_counter_increase(stats->arena_rollback_count,1); + // retry? (we make a recursive call instead of goto to be able to use const declarations) + if (retries <= 2) { + return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx, stats); + } + else { + return false; + } +} + + +// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success. +// Starts at idx, and wraps around to search in all `bitmap_fields` fields. +bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) { + mi_assert_internal(count > 0); + if (count <= 2) { + // we don't bother with crossover fields for small counts + return _mi_bitmap_try_find_from_claim(bitmap, bitmap_fields, start_field_idx, count, bitmap_idx); + } + + // visit the fields + size_t idx = start_field_idx; + for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) { + if (idx >= bitmap_fields) { idx = 0; } // wrap + // first try to claim inside a field + /* + if (count <= MI_BITMAP_FIELD_BITS) { + if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) { + return true; + } + } + */ + // if that fails, then try to claim across fields + if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx, stats)) { + return true; + } + } + return false; +} + +// Helper for masks across fields; returns the mid count, post_mask may be 0 +static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) { + MI_UNUSED(bitmap_fields); + const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); + if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) { + *pre_mask = mi_bitmap_mask_(count, bitidx); + *mid_mask = 0; + *post_mask = 0; + mi_assert_internal(mi_bitmap_index_field(bitmap_idx) < bitmap_fields); + return 0; + } + else { + const size_t pre_bits = MI_BITMAP_FIELD_BITS - bitidx; + mi_assert_internal(pre_bits < count); + *pre_mask = mi_bitmap_mask_(pre_bits, bitidx); + count -= pre_bits; + const size_t mid_count = (count / MI_BITMAP_FIELD_BITS); + *mid_mask = MI_BITMAP_FIELD_FULL; + count %= MI_BITMAP_FIELD_BITS; + *post_mask = (count==0 ? 0 : mi_bitmap_mask_(count, 0)); + mi_assert_internal(mi_bitmap_index_field(bitmap_idx) + mid_count + (count==0 ? 0 : 1) < bitmap_fields); + return mid_count; + } +} + +// Set `count` bits at `bitmap_idx` to 0 atomically +// Returns `true` if all `count` bits were 1 previously. +bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + size_t idx = mi_bitmap_index_field(bitmap_idx); + size_t pre_mask; + size_t mid_mask; + size_t post_mask; + size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); + bool all_one = true; + mi_bitmap_field_t* field = &bitmap[idx]; + size_t prev = mi_atomic_and_acq_rel(field++, ~pre_mask); // clear first part + if ((prev & pre_mask) != pre_mask) all_one = false; + while(mid_count-- > 0) { + prev = mi_atomic_and_acq_rel(field++, ~mid_mask); // clear mid part + if ((prev & mid_mask) != mid_mask) all_one = false; + } + if (post_mask!=0) { + prev = mi_atomic_and_acq_rel(field, ~post_mask); // clear end part + if ((prev & post_mask) != post_mask) all_one = false; + } + return all_one; +} + +// Set `count` bits at `bitmap_idx` to 1 atomically +// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. +bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero) { + size_t idx = mi_bitmap_index_field(bitmap_idx); + size_t pre_mask; + size_t mid_mask; + size_t post_mask; + size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); + bool all_zero = true; + bool any_zero = false; + _Atomic(size_t)*field = &bitmap[idx]; + size_t prev = mi_atomic_or_acq_rel(field++, pre_mask); + if ((prev & pre_mask) != 0) all_zero = false; + if ((prev & pre_mask) != pre_mask) any_zero = true; + while (mid_count-- > 0) { + prev = mi_atomic_or_acq_rel(field++, mid_mask); + if ((prev & mid_mask) != 0) all_zero = false; + if ((prev & mid_mask) != mid_mask) any_zero = true; + } + if (post_mask!=0) { + prev = mi_atomic_or_acq_rel(field, post_mask); + if ((prev & post_mask) != 0) all_zero = false; + if ((prev & post_mask) != post_mask) any_zero = true; + } + if (pany_zero != NULL) { *pany_zero = any_zero; } + return all_zero; +} + + +// Returns `true` if all `count` bits were 1. +// `any_ones` is `true` if there was at least one bit set to one. +static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones) { + size_t idx = mi_bitmap_index_field(bitmap_idx); + size_t pre_mask; + size_t mid_mask; + size_t post_mask; + size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); + bool all_ones = true; + bool any_ones = false; + mi_bitmap_field_t* field = &bitmap[idx]; + size_t prev = mi_atomic_load_relaxed(field++); + if ((prev & pre_mask) != pre_mask) all_ones = false; + if ((prev & pre_mask) != 0) any_ones = true; + while (mid_count-- > 0) { + prev = mi_atomic_load_relaxed(field++); + if ((prev & mid_mask) != mid_mask) all_ones = false; + if ((prev & mid_mask) != 0) any_ones = true; + } + if (post_mask!=0) { + prev = mi_atomic_load_relaxed(field); + if ((prev & post_mask) != post_mask) all_ones = false; + if ((prev & post_mask) != 0) any_ones = true; + } + if (pany_ones != NULL) { *pany_ones = any_ones; } + return all_ones; +} + +bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL); +} + +bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + bool any_ones; + mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones); + return any_ones; +} diff --git a/yass/third_party/mimalloc/src/bitmap.h b/yass/third_party/mimalloc/src/bitmap.h new file mode 100644 index 0000000000..d8316b83f4 --- /dev/null +++ b/yass/third_party/mimalloc/src/bitmap.h @@ -0,0 +1,115 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2023 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ---------------------------------------------------------------------------- +Concurrent bitmap that can set/reset sequences of bits atomically, +represented as an array of fields where each field is a machine word (`size_t`) + +There are two api's; the standard one cannot have sequences that cross +between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS). +(this is used in region allocation) + +The `_across` postfixed functions do allow sequences that can cross over +between the fields. (This is used in arena allocation) +---------------------------------------------------------------------------- */ +#pragma once +#ifndef MI_BITMAP_H +#define MI_BITMAP_H + +/* ----------------------------------------------------------- + Bitmap definition +----------------------------------------------------------- */ + +#define MI_BITMAP_FIELD_BITS (8*MI_SIZE_SIZE) +#define MI_BITMAP_FIELD_FULL (~((size_t)0)) // all bits set + +// An atomic bitmap of `size_t` fields +typedef _Atomic(size_t) mi_bitmap_field_t; +typedef mi_bitmap_field_t* mi_bitmap_t; + +// A bitmap index is the index of the bit in a bitmap. +typedef size_t mi_bitmap_index_t; + +// Create a bit index. +static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx) { + mi_assert_internal(bitidx < MI_BITMAP_FIELD_BITS); + return (idx*MI_BITMAP_FIELD_BITS) + bitidx; +} + +// Create a bit index. +static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) { + return mi_bitmap_index_create(full_bitidx / MI_BITMAP_FIELD_BITS, full_bitidx % MI_BITMAP_FIELD_BITS); +} + +// Get the field index from a bit index. +static inline size_t mi_bitmap_index_field(mi_bitmap_index_t bitmap_idx) { + return (bitmap_idx / MI_BITMAP_FIELD_BITS); +} + +// Get the bit index in a bitmap field +static inline size_t mi_bitmap_index_bit_in_field(mi_bitmap_index_t bitmap_idx) { + return (bitmap_idx % MI_BITMAP_FIELD_BITS); +} + +// Get the full bit index +static inline size_t mi_bitmap_index_bit(mi_bitmap_index_t bitmap_idx) { + return bitmap_idx; +} + +/* ----------------------------------------------------------- + Claim a bit sequence atomically +----------------------------------------------------------- */ + +// Try to atomically claim a sequence of `count` bits in a single +// field at `idx` in `bitmap`. Returns `true` on success. +bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx); + +// Starts at idx, and wraps around to search in all `bitmap_fields` fields. +// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields. +bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx); + +// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled +typedef bool (mi_cdecl *mi_bitmap_pred_fun_t)(mi_bitmap_index_t bitmap_idx, void* pred_arg); +bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_pred_fun_t pred_fun, void* pred_arg, mi_bitmap_index_t* bitmap_idx); + +// Set `count` bits at `bitmap_idx` to 0 atomically +// Returns `true` if all `count` bits were 1 previously. +bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); + +// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically. +// Returns `true` if successful when all previous `count` bits were 0. +bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); + +// Set `count` bits at `bitmap_idx` to 1 atomically +// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. +bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero); + +bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); +bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); + + +//-------------------------------------------------------------------------- +// the `_across` functions work on bitmaps where sequences can cross over +// between the fields. This is used in arena allocation +//-------------------------------------------------------------------------- + +// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success. +// Starts at idx, and wraps around to search in all `bitmap_fields` fields. +bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats); + +// Set `count` bits at `bitmap_idx` to 0 atomically +// Returns `true` if all `count` bits were 1 previously. +bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); + +// Set `count` bits at `bitmap_idx` to 1 atomically +// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. +bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero); + +bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); +bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); + +#endif diff --git a/yass/third_party/mimalloc/src/free.c b/yass/third_party/mimalloc/src/free.c new file mode 100644 index 0000000000..b9cb634616 --- /dev/null +++ b/yass/third_party/mimalloc/src/free.c @@ -0,0 +1,530 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#if !defined(MI_IN_ALLOC_C) +#error "this file should be included from 'alloc.c' (so aliases can work from alloc-override)" +// add includes help an IDE +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" // _mi_prim_thread_id() +#endif + +// forward declarations +static void mi_check_padding(const mi_page_t* page, const mi_block_t* block); +static bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block); +static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block); +static void mi_stat_free(const mi_page_t* page, const mi_block_t* block); + + +// ------------------------------------------------------ +// Free +// ------------------------------------------------------ + +// forward declaration of multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON) +static mi_decl_noinline void mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block); + +// regular free of a (thread local) block pointer +// fast path written carefully to prevent spilling on the stack +static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool track_stats, bool check_full) +{ + // checks + if mi_unlikely(mi_check_is_double_free(page, block)) return; + mi_check_padding(page, block); + if (track_stats) { mi_stat_free(page, block); } + #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN + if (!mi_page_is_huge(page)) { // huge page content may be already decommitted + memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); + } + #endif + if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned + + // actual free: push on the local free list + mi_block_set_next(page, block, page->local_free); + page->local_free = block; + if mi_unlikely(--page->used == 0) { + _mi_page_retire(page); + } + else if mi_unlikely(check_full && mi_page_is_in_full(page)) { + _mi_page_unfull(page); + } +} + +// Adjust a block that was allocated aligned, to the actual start of the block in the page. +// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the +// `page_start` and `block_size` fields; however these are constant and the page won't be +// deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently. +mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) { + mi_assert_internal(page!=NULL && p!=NULL); + + size_t diff = (uint8_t*)p - page->page_start; + size_t adjust; + if mi_likely(page->block_size_shift != 0) { + adjust = diff & (((size_t)1 << page->block_size_shift) - 1); + } + else { + adjust = diff % mi_page_block_size(page); + } + + return (mi_block_t*)((uintptr_t)p - adjust); +} + +// free a local pointer (page parameter comes first for better codegen) +static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept { + MI_UNUSED(segment); + mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p); + mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */); +} + +// free a pointer owned by another thread (page parameter comes first for better codegen) +static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept { + mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865) + mi_free_block_mt(page, segment, block); +} + +// generic free (for runtime integration) +void mi_decl_noinline _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept { + if (is_local) mi_free_generic_local(page,segment,p); + else mi_free_generic_mt(page,segment,p); +} + +// Get the segment data belonging to a pointer +// This is just a single `and` in release mode but does further checks in debug mode +// (and secure mode) to see if this was a valid pointer. +static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg) +{ + MI_UNUSED(msg); + +#if (MI_DEBUG>0) + if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) { + _mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p); + return NULL; + } +#endif + + mi_segment_t* const segment = _mi_ptr_segment(p); + if mi_unlikely(segment==NULL) return segment; + +#if (MI_DEBUG>0) + if mi_unlikely(!mi_is_in_heap_region(p)) { + #if (MI_INTPTR_SIZE == 8 && defined(__linux__)) + if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640) + #else + { + #endif + _mi_warning_message("%s: pointer might not point to a valid heap region: %p\n" + "(this may still be a valid very large allocation (over 64MiB))\n", msg, p); + if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) { + _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p); + } + } + } +#endif +#if (MI_DEBUG>0 || MI_SECURE>=4) + if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) { + _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p); + return NULL; + } +#endif + + return segment; +} + +// Free a block +// Fast path written carefully to prevent register spilling on the stack +void mi_free(void* p) mi_attr_noexcept +{ + mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free"); + if mi_unlikely(segment==NULL) return; + + const bool is_local = (_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id)); + mi_page_t* const page = _mi_segment_page_of(segment, p); + + if mi_likely(is_local) { // thread-local free? + if mi_likely(page->flags.full_aligned == 0) { // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned) + // thread-local, aligned, and not a full page + mi_block_t* const block = (mi_block_t*)p; + mi_free_block_local(page, block, true /* track stats */, false /* no need to check if the page is full */); + } + else { + // page is full or contains (inner) aligned blocks; use generic path + mi_free_generic_local(page, segment, p); + } + } + else { + // not thread-local; use generic path + mi_free_generic_mt(page, segment, p); + } +} + +// return true if successful +bool _mi_free_delayed_block(mi_block_t* block) { + // get segment and page + mi_assert_internal(block!=NULL); + const mi_segment_t* const segment = _mi_ptr_segment(block); + mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); + mi_assert_internal(_mi_thread_id() == segment->thread_id); + mi_page_t* const page = _mi_segment_page_of(segment, block); + + // Clear the no-delayed flag so delayed freeing is used again for this page. + // This must be done before collecting the free lists on this page -- otherwise + // some blocks may end up in the page `thread_free` list with no blocks in the + // heap `thread_delayed_free` list which may cause the page to be never freed! + // (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`) + if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) { + return false; + } + + // collect all other non-local frees (move from `thread_free` to `free`) to ensure up-to-date `used` count + _mi_page_free_collect(page, false); + + // and free the block (possibly freeing the page as well since `used` is updated) + mi_free_block_local(page, block, false /* stats have already been adjusted */, true /* check for a full page */); + return true; +} + +// ------------------------------------------------------ +// Multi-threaded Free (`_mt`) +// ------------------------------------------------------ + +// Push a block that is owned by another thread on its page-local thread free +// list or it's heap delayed free list. Such blocks are later collected by +// the owning thread in `_mi_free_delayed_block`. +static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block_t* block ) +{ + // Try to put the block on either the page-local thread free list, + // or the heap delayed free list (if this is the first non-local free in that page) + mi_thread_free_t tfreex; + bool use_delayed; + mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free); + do { + use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE); + if mi_unlikely(use_delayed) { + // unlikely: this only happens on the first concurrent free in a page that is in the full list + tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING); + } + else { + // usual: directly add to page thread_free list + mi_block_set_next(page, block, mi_tf_block(tfree)); + tfreex = mi_tf_set_block(tfree,block); + } + } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); + + // If this was the first non-local free, we need to push it on the heap delayed free list instead + if mi_unlikely(use_delayed) { + // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`) + mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page); + mi_assert_internal(heap != NULL); + if (heap != NULL) { + // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity) + mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); + do { + mi_block_set_nextx(heap,block,dfree, heap->keys); + } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block)); + } + + // and reset the MI_DELAYED_FREEING flag + tfree = mi_atomic_load_relaxed(&page->xthread_free); + do { + tfreex = tfree; + mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING); + tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE); + } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); + } +} + +// Multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON) +static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block) +{ + // first see if the segment was abandoned and if we can reclaim it into our thread + if (mi_option_is_enabled(mi_option_abandoned_reclaim_on_free) && + #if MI_HUGE_PAGE_ABANDON + segment->page_kind != MI_PAGE_HUGE && + #endif + mi_atomic_load_relaxed(&segment->thread_id) == 0) + { + // the segment is abandoned, try to reclaim it into our heap + if (_mi_segment_attempt_reclaim(mi_heap_get_default(), segment)) { + mi_assert_internal(_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id)); + mi_free(block); // recursively free as now it will be a local free in our heap + return; + } + } + + // The padding check may access the non-thread-owned page for the key values. + // that is safe as these are constant and the page won't be freed (as the block is not freed yet). + mi_check_padding(page, block); + + // adjust stats (after padding check and potentially recursive `mi_free` above) + mi_stat_free(page, block); // stat_free may access the padding + mi_track_free_size(block, mi_page_usable_size_of(page,block)); + + // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection + _mi_padding_shrink(page, block, sizeof(mi_block_t)); + + if (segment->kind == MI_SEGMENT_HUGE) { + #if MI_HUGE_PAGE_ABANDON + // huge page segments are always abandoned and can be freed immediately + _mi_segment_huge_page_free(segment, page, block); + return; + #else + // huge pages are special as they occupy the entire segment + // as these are large we reset the memory occupied by the page so it is available to other threads + // (as the owning thread needs to actually free the memory later). + _mi_segment_huge_page_reset(segment, page, block); + #endif + } + else { + #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading + memset(block, MI_DEBUG_FREED, mi_usable_size(block)); + #endif + } + + // and finally free the actual block by pushing it on the owning heap + // thread_delayed free list (or heap delayed free list) + mi_free_block_delayed_mt(page,block); +} + + +// ------------------------------------------------------ +// Usable size +// ------------------------------------------------------ + +// Bytes available in a block +static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* page, const void* p) mi_attr_noexcept { + const mi_block_t* block = _mi_page_ptr_unalign(page, p); + const size_t size = mi_page_usable_size_of(page, block); + const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block; + mi_assert_internal(adjust >= 0 && (size_t)adjust <= size); + return (size - adjust); +} + +static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept { + const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg); + if mi_unlikely(segment==NULL) return 0; + const mi_page_t* const page = _mi_segment_page_of(segment, p); + if mi_likely(!mi_page_has_aligned(page)) { + const mi_block_t* block = (const mi_block_t*)p; + return mi_page_usable_size_of(page, block); + } + else { + // split out to separate routine for improved code generation + return mi_page_usable_aligned_size_of(page, p); + } +} + +mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept { + return _mi_usable_size(p, "mi_usable_size"); +} + + +// ------------------------------------------------------ +// Free variants +// ------------------------------------------------------ + +void mi_free_size(void* p, size_t size) mi_attr_noexcept { + MI_UNUSED_RELEASE(size); + mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size")); + mi_free(p); +} + +void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept { + MI_UNUSED_RELEASE(alignment); + mi_assert(((uintptr_t)p % alignment) == 0); + mi_free_size(p,size); +} + +void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept { + MI_UNUSED_RELEASE(alignment); + mi_assert(((uintptr_t)p % alignment) == 0); + mi_free(p); +} + + +// ------------------------------------------------------ +// Check for double free in secure and debug mode +// This is somewhat expensive so only enabled for secure mode 4 +// ------------------------------------------------------ + +#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0)) +// linear check if the free list contains a specific element +static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) { + while (list != NULL) { + if (elem==list) return true; + list = mi_block_next(page, list); + } + return false; +} + +static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) { + // The decoded value is in the same page (or NULL). + // Walk the free lists to verify positively if it is already freed + if (mi_list_contains(page, page->free, block) || + mi_list_contains(page, page->local_free, block) || + mi_list_contains(page, mi_page_thread_free(page), block)) + { + _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page)); + return true; + } + return false; +} + +#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); } + +static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { + bool is_double_free = false; + mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field + if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer? + (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL? + { + // Suspicious: decoded value a in block is in the same page (or NULL) -- maybe a double free? + // (continue in separate function to improve code generation) + is_double_free = mi_check_is_double_freex(page, block); + } + return is_double_free; +} +#else +static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(page); + MI_UNUSED(block); + return false; +} +#endif + + +// --------------------------------------------------------------------------- +// Check for heap block overflow by setting up padding at the end of the block +// --------------------------------------------------------------------------- + +#if MI_PADDING // && !MI_TRACK_ENABLED +static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) { + *bsize = mi_page_usable_block_size(page); + const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize); + mi_track_mem_defined(padding,sizeof(mi_padding_t)); + *delta = padding->delta; + uint32_t canary = padding->canary; + uintptr_t keys[2]; + keys[0] = page->keys[0]; + keys[1] = page->keys[1]; + bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize); + mi_track_mem_noaccess(padding,sizeof(mi_padding_t)); + return ok; +} + +// Return the exact usable size of a block. +static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { + size_t bsize; + size_t delta; + bool ok = mi_page_decode_padding(page, block, &delta, &bsize); + mi_assert_internal(ok); mi_assert_internal(delta <= bsize); + return (ok ? bsize - delta : 0); +} + +// When a non-thread-local block is freed, it becomes part of the thread delayed free +// list that is freed later by the owning heap. If the exact usable size is too small to +// contain the pointer for the delayed list, then shrink the padding (by decreasing delta) +// so it will later not trigger an overflow error in `mi_free_block`. +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { + size_t bsize; + size_t delta; + bool ok = mi_page_decode_padding(page, block, &delta, &bsize); + mi_assert_internal(ok); + if (!ok || (bsize - delta) >= min_size) return; // usually already enough space + mi_assert_internal(bsize >= min_size); + if (bsize < min_size) return; // should never happen + size_t new_delta = (bsize - min_size); + mi_assert_internal(new_delta < bsize); + mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize); + mi_track_mem_defined(padding,sizeof(mi_padding_t)); + padding->delta = (uint32_t)new_delta; + mi_track_mem_noaccess(padding,sizeof(mi_padding_t)); +} +#else +static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(block); + return mi_page_usable_block_size(page); +} + +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { + MI_UNUSED(page); + MI_UNUSED(block); + MI_UNUSED(min_size); +} +#endif + +#if MI_PADDING && MI_PADDING_CHECK + +static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) { + size_t bsize; + size_t delta; + bool ok = mi_page_decode_padding(page, block, &delta, &bsize); + *size = *wrong = bsize; + if (!ok) return false; + mi_assert_internal(bsize >= delta); + *size = bsize - delta; + if (!mi_page_is_huge(page)) { + uint8_t* fill = (uint8_t*)block + bsize - delta; + const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes + mi_track_mem_defined(fill, maxpad); + for (size_t i = 0; i < maxpad; i++) { + if (fill[i] != MI_DEBUG_PADDING) { + *wrong = bsize - delta + i; + ok = false; + break; + } + } + mi_track_mem_noaccess(fill, maxpad); + } + return ok; +} + +static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { + size_t size; + size_t wrong; + if (!mi_verify_padding(page,block,&size,&wrong)) { + _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong ); + } +} + +#else + +static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(page); + MI_UNUSED(block); +} + +#endif + +// only maintain stats for smaller objects if requested +#if (MI_STAT>0) +static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { + #if (MI_STAT < 2) + MI_UNUSED(block); + #endif + mi_heap_t* const heap = mi_heap_get_default(); + const size_t bsize = mi_page_usable_block_size(page); + #if (MI_STAT>1) + const size_t usize = mi_page_usable_size_of(page, block); + mi_heap_stat_decrease(heap, malloc, usize); + #endif + if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { + mi_heap_stat_decrease(heap, normal, bsize); + #if (MI_STAT > 1) + mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1); + #endif + } + else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + mi_heap_stat_decrease(heap, large, bsize); + } + else { + mi_heap_stat_decrease(heap, huge, bsize); + } +} +#else +static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(page); MI_UNUSED(block); +} +#endif diff --git a/yass/third_party/mimalloc/src/heap.c b/yass/third_party/mimalloc/src/heap.c new file mode 100644 index 0000000000..e498fdb209 --- /dev/null +++ b/yass/third_party/mimalloc/src/heap.c @@ -0,0 +1,653 @@ +/*---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" // mi_prim_get_default_heap + +#include // memset, memcpy + +#if defined(_MSC_VER) && (_MSC_VER < 1920) +#pragma warning(disable:4204) // non-constant aggregate initializer +#endif + +/* ----------------------------------------------------------- + Helpers +----------------------------------------------------------- */ + +// return `true` if ok, `false` to break +typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2); + +// Visit all pages in a heap; returns `false` if break was called. +static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2) +{ + if (heap==NULL || heap->page_count==0) return 0; + + // visit all pages + #if MI_DEBUG>1 + size_t total = heap->page_count; + size_t count = 0; + #endif + + for (size_t i = 0; i <= MI_BIN_FULL; i++) { + mi_page_queue_t* pq = &heap->pages[i]; + mi_page_t* page = pq->first; + while(page != NULL) { + mi_page_t* next = page->next; // save next in case the page gets removed from the queue + mi_assert_internal(mi_page_heap(page) == heap); + #if MI_DEBUG>1 + count++; + #endif + if (!fn(heap, pq, page, arg1, arg2)) return false; + page = next; // and continue + } + } + mi_assert_internal(count == total); + return true; +} + + +#if MI_DEBUG>=2 +static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { + MI_UNUSED(arg1); + MI_UNUSED(arg2); + MI_UNUSED(pq); + mi_assert_internal(mi_page_heap(page) == heap); + mi_segment_t* segment = _mi_page_segment(page); + mi_assert_internal(segment->thread_id == heap->thread_id); + mi_assert_expensive(_mi_page_is_valid(page)); + return true; +} +#endif +#if MI_DEBUG>=3 +static bool mi_heap_is_valid(mi_heap_t* heap) { + mi_assert_internal(heap!=NULL); + mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL); + return true; +} +#endif + + + + +/* ----------------------------------------------------------- + "Collect" pages by migrating `local_free` and `thread_free` + lists and freeing empty pages. This is done when a thread + stops (and in that case abandons pages if there are still + blocks alive) +----------------------------------------------------------- */ + +typedef enum mi_collect_e { + MI_NORMAL, + MI_FORCE, + MI_ABANDON +} mi_collect_t; + + +static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) { + MI_UNUSED(arg2); + MI_UNUSED(heap); + mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL)); + mi_collect_t collect = *((mi_collect_t*)arg_collect); + _mi_page_free_collect(page, collect >= MI_FORCE); + if (collect == MI_FORCE) { + // note: call before a potential `_mi_page_free` as the segment may be freed if this was the last used page in that segment. + mi_segment_t* segment = _mi_page_segment(page); + _mi_segment_collect(segment, true /* force? */, &heap->tld->segments); + } + if (mi_page_all_free(page)) { + // no more used blocks, free the page. + // note: this will free retired pages as well. + _mi_page_free(page, pq, collect >= MI_FORCE); + } + else if (collect == MI_ABANDON) { + // still used blocks but the thread is done; abandon the page + _mi_page_abandon(page, pq); + } + return true; // don't break +} + +static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { + MI_UNUSED(arg1); + MI_UNUSED(arg2); + MI_UNUSED(heap); + MI_UNUSED(pq); + _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); + return true; // don't break +} + +static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) +{ + if (heap==NULL || !mi_heap_is_initialized(heap)) return; + + const bool force = (collect >= MI_FORCE); + _mi_deferred_free(heap, force); + + // python/cpython#112532: we may be called from a thread that is not the owner of the heap + const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id()); + + // note: never reclaim on collect but leave it to threads that need storage to reclaim + const bool force_main = + #ifdef NDEBUG + collect == MI_FORCE + #else + collect >= MI_FORCE + #endif + && is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim; + + if (force_main) { + // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments. + // if all memory is freed by now, all segments should be freed. + _mi_abandoned_reclaim_all(heap, &heap->tld->segments); + } + + // if abandoning, mark all pages to no longer add to delayed_free + if (collect == MI_ABANDON) { + mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL); + } + + // free all current thread delayed blocks. + // (if abandoning, after this there are no more thread-delayed references into the pages.) + _mi_heap_delayed_free_all(heap); + + // collect retired pages + _mi_heap_collect_retired(heap, force); + + // collect all pages owned by this thread + mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL); + mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL ); + + // collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list) + // note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment + _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments); + + // if forced, collect thread data cache on program-exit (or shared library unload) + if (force && is_main_thread && mi_heap_is_backing(heap)) { + _mi_thread_data_collect(); // collect thread data cache + } + + // collect arenas (this is program wide so don't force purges on abandonment of threads) + _mi_arenas_collect(collect == MI_FORCE /* force purge? */, &heap->tld->stats); +} + +void _mi_heap_collect_abandon(mi_heap_t* heap) { + mi_heap_collect_ex(heap, MI_ABANDON); +} + +void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept { + mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL)); +} + +void mi_collect(bool force) mi_attr_noexcept { + mi_heap_collect(mi_prim_get_default_heap(), force); +} + + +/* ----------------------------------------------------------- + Heap new +----------------------------------------------------------- */ + +mi_heap_t* mi_heap_get_default(void) { + mi_thread_init(); + return mi_prim_get_default_heap(); +} + +static bool mi_heap_is_default(const mi_heap_t* heap) { + return (heap == mi_prim_get_default_heap()); +} + + +mi_heap_t* mi_heap_get_backing(void) { + mi_heap_t* heap = mi_heap_get_default(); + mi_assert_internal(heap!=NULL); + mi_heap_t* bheap = heap->tld->heap_backing; + mi_assert_internal(bheap!=NULL); + mi_assert_internal(bheap->thread_id == _mi_thread_id()); + return bheap; +} + +void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag) { + _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t)); + heap->tld = tld; + heap->thread_id = _mi_thread_id(); + heap->arena_id = arena_id; + heap->no_reclaim = noreclaim; + heap->tag = tag; + if (heap == tld->heap_backing) { + _mi_random_init(&heap->random); + } + else { + _mi_random_split(&tld->heap_backing->random, &heap->random); + } + heap->cookie = _mi_heap_random_next(heap) | 1; + heap->keys[0] = _mi_heap_random_next(heap); + heap->keys[1] = _mi_heap_random_next(heap); + // push on the thread local heaps list + heap->next = heap->tld->heaps; + heap->tld->heaps = heap; +} + +mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) { + mi_heap_t* bheap = mi_heap_get_backing(); + mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode? + if (heap == NULL) return NULL; + // don't reclaim abandoned pages or otherwise destroy is unsafe + _mi_heap_init(heap, bheap->tld, arena_id, true /* no reclaim */, 0 /* default tag */); + return heap; +} + +mi_decl_nodiscard mi_heap_t* mi_heap_new(void) { + return mi_heap_new_in_arena(_mi_arena_id_none()); +} + +bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) { + return _mi_arena_memid_is_suitable(memid, heap->arena_id); +} + +uintptr_t _mi_heap_random_next(mi_heap_t* heap) { + return _mi_random_next(&heap->random); +} + +// zero out the page queues +static void mi_heap_reset_pages(mi_heap_t* heap) { + mi_assert_internal(heap != NULL); + mi_assert_internal(mi_heap_is_initialized(heap)); + // TODO: copy full empty heap instead? + memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct)); + _mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages)); + heap->thread_delayed_free = NULL; + heap->page_count = 0; +} + +// called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources. +static void mi_heap_free(mi_heap_t* heap) { + mi_assert(heap != NULL); + mi_assert_internal(mi_heap_is_initialized(heap)); + if (heap==NULL || !mi_heap_is_initialized(heap)) return; + if (mi_heap_is_backing(heap)) return; // dont free the backing heap + + // reset default + if (mi_heap_is_default(heap)) { + _mi_heap_set_default_direct(heap->tld->heap_backing); + } + + // remove ourselves from the thread local heaps list + // linear search but we expect the number of heaps to be relatively small + mi_heap_t* prev = NULL; + mi_heap_t* curr = heap->tld->heaps; + while (curr != heap && curr != NULL) { + prev = curr; + curr = curr->next; + } + mi_assert_internal(curr == heap); + if (curr == heap) { + if (prev != NULL) { prev->next = heap->next; } + else { heap->tld->heaps = heap->next; } + } + mi_assert_internal(heap->tld->heaps != NULL); + + // and free the used memory + mi_free(heap); +} + +// return a heap on the same thread as `heap` specialized for the specified tag (if it exists) +mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag) { + if (heap->tag == tag) { + return heap; + } + for (mi_heap_t *curr = heap->tld->heaps; curr != NULL; curr = curr->next) { + if (curr->tag == tag) { + return curr; + } + } + return NULL; +} + +/* ----------------------------------------------------------- + Heap destroy +----------------------------------------------------------- */ + +static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { + MI_UNUSED(arg1); + MI_UNUSED(arg2); + MI_UNUSED(heap); + MI_UNUSED(pq); + + // ensure no more thread_delayed_free will be added + _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); + + // stats + const size_t bsize = mi_page_block_size(page); + if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) { + if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + mi_heap_stat_decrease(heap, large, bsize); + } + else { + mi_heap_stat_decrease(heap, huge, bsize); + } + } +#if (MI_STAT) + _mi_page_free_collect(page, false); // update used count + const size_t inuse = page->used; + if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + mi_heap_stat_decrease(heap, normal, bsize * inuse); +#if (MI_STAT>1) + mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse); +#endif + } + mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks... +#endif + + /// pretend it is all free now + mi_assert_internal(mi_page_thread_free(page) == NULL); + page->used = 0; + + // and free the page + // mi_page_free(page,false); + page->next = NULL; + page->prev = NULL; + _mi_segment_page_free(page,false /* no force? */, &heap->tld->segments); + + return true; // keep going +} + +void _mi_heap_destroy_pages(mi_heap_t* heap) { + mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL); + mi_heap_reset_pages(heap); +} + +#if MI_TRACK_HEAP_DESTROY +static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) { + MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size); + mi_track_free_size(block,mi_usable_size(block)); + return true; +} +#endif + +void mi_heap_destroy(mi_heap_t* heap) { + mi_assert(heap != NULL); + mi_assert(mi_heap_is_initialized(heap)); + mi_assert(heap->no_reclaim); + mi_assert_expensive(mi_heap_is_valid(heap)); + if (heap==NULL || !mi_heap_is_initialized(heap)) return; + if (!heap->no_reclaim) { + // don't free in case it may contain reclaimed pages + mi_heap_delete(heap); + } + else { + // track all blocks as freed + #if MI_TRACK_HEAP_DESTROY + mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL); + #endif + // free all pages + _mi_heap_destroy_pages(heap); + mi_heap_free(heap); + } +} + +// forcefully destroy all heaps in the current thread +void _mi_heap_unsafe_destroy_all(void) { + mi_heap_t* bheap = mi_heap_get_backing(); + mi_heap_t* curr = bheap->tld->heaps; + while (curr != NULL) { + mi_heap_t* next = curr->next; + if (curr->no_reclaim) { + mi_heap_destroy(curr); + } + else { + _mi_heap_destroy_pages(curr); + } + curr = next; + } +} + +/* ----------------------------------------------------------- + Safe Heap delete +----------------------------------------------------------- */ + +// Transfer the pages from one heap to the other +static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { + mi_assert_internal(heap!=NULL); + if (from==NULL || from->page_count == 0) return; + + // reduce the size of the delayed frees + _mi_heap_delayed_free_partial(from); + + // transfer all pages by appending the queues; this will set a new heap field + // so threads may do delayed frees in either heap for a while. + // note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state + // so after this only the new heap will get delayed frees + for (size_t i = 0; i <= MI_BIN_FULL; i++) { + mi_page_queue_t* pq = &heap->pages[i]; + mi_page_queue_t* append = &from->pages[i]; + size_t pcount = _mi_page_queue_append(heap, pq, append); + heap->page_count += pcount; + from->page_count -= pcount; + } + mi_assert_internal(from->page_count == 0); + + // and do outstanding delayed frees in the `from` heap + // note: be careful here as the `heap` field in all those pages no longer point to `from`, + // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a + // the regular `_mi_free_delayed_block` which is safe. + _mi_heap_delayed_free_all(from); + #if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353 + mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL); + #endif + + // and reset the `from` heap + mi_heap_reset_pages(from); +} + +// Safe delete a heap without freeing any still allocated blocks in that heap. +void mi_heap_delete(mi_heap_t* heap) +{ + mi_assert(heap != NULL); + mi_assert(mi_heap_is_initialized(heap)); + mi_assert_expensive(mi_heap_is_valid(heap)); + if (heap==NULL || !mi_heap_is_initialized(heap)) return; + + if (!mi_heap_is_backing(heap)) { + // transfer still used pages to the backing heap + mi_heap_absorb(heap->tld->heap_backing, heap); + } + else { + // the backing heap abandons its pages + _mi_heap_collect_abandon(heap); + } + mi_assert_internal(heap->page_count==0); + mi_heap_free(heap); +} + +mi_heap_t* mi_heap_set_default(mi_heap_t* heap) { + mi_assert(heap != NULL); + mi_assert(mi_heap_is_initialized(heap)); + if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL; + mi_assert_expensive(mi_heap_is_valid(heap)); + mi_heap_t* old = mi_prim_get_default_heap(); + _mi_heap_set_default_direct(heap); + return old; +} + + + + +/* ----------------------------------------------------------- + Analysis +----------------------------------------------------------- */ + +// static since it is not thread safe to access heaps from other threads. +static mi_heap_t* mi_heap_of_block(const void* p) { + if (p == NULL) return NULL; + mi_segment_t* segment = _mi_ptr_segment(p); + bool valid = (_mi_ptr_cookie(segment) == segment->cookie); + mi_assert_internal(valid); + if mi_unlikely(!valid) return NULL; + return mi_page_heap(_mi_segment_page_of(segment,p)); +} + +bool mi_heap_contains_block(mi_heap_t* heap, const void* p) { + mi_assert(heap != NULL); + if (heap==NULL || !mi_heap_is_initialized(heap)) return false; + return (heap == mi_heap_of_block(p)); +} + + +static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) { + MI_UNUSED(heap); + MI_UNUSED(pq); + bool* found = (bool*)vfound; + void* start = mi_page_start(page); + void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page)); + *found = (p >= start && p < end); + return (!*found); // continue if not found +} + +bool mi_heap_check_owned(mi_heap_t* heap, const void* p) { + mi_assert(heap != NULL); + if (heap==NULL || !mi_heap_is_initialized(heap)) return false; + if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers + bool found = false; + mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found); + return found; +} + +bool mi_check_owned(const void* p) { + return mi_heap_check_owned(mi_prim_get_default_heap(), p); +} + +/* ----------------------------------------------------------- + Visit all heap blocks and areas + Todo: enable visiting abandoned pages, and + enable visiting all blocks of all heaps across threads +----------------------------------------------------------- */ + +// Separate struct to keep `mi_page_t` out of the public interface +typedef struct mi_heap_area_ex_s { + mi_heap_area_t area; + mi_page_t* page; +} mi_heap_area_ex_t; + +static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_visit_fun* visitor, void* arg) { + mi_assert(xarea != NULL); + if (xarea==NULL) return true; + const mi_heap_area_t* area = &xarea->area; + mi_page_t* page = xarea->page; + mi_assert(page != NULL); + if (page == NULL) return true; + + _mi_page_free_collect(page,true); + mi_assert_internal(page->local_free == NULL); + if (page->used == 0) return true; + + const size_t bsize = mi_page_block_size(page); + const size_t ubsize = mi_page_usable_block_size(page); // without padding + size_t psize; + uint8_t* pstart = _mi_segment_page_start(_mi_page_segment(page), page, &psize); + + if (page->capacity == 1) { + // optimize page with one block + mi_assert_internal(page->used == 1 && page->free == NULL); + return visitor(mi_page_heap(page), area, pstart, ubsize, arg); + } + + // create a bitmap of free blocks. + #define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*)) + uintptr_t free_map[MI_MAX_BLOCKS / sizeof(uintptr_t)]; + memset(free_map, 0, sizeof(free_map)); + + #if MI_DEBUG>1 + size_t free_count = 0; + #endif + for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) { + #if MI_DEBUG>1 + free_count++; + #endif + mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize)); + size_t offset = (uint8_t*)block - pstart; + mi_assert_internal(offset % bsize == 0); + size_t blockidx = offset / bsize; // Todo: avoid division? + mi_assert_internal( blockidx < MI_MAX_BLOCKS); + size_t bitidx = (blockidx / sizeof(uintptr_t)); + size_t bit = blockidx - (bitidx * sizeof(uintptr_t)); + free_map[bitidx] |= ((uintptr_t)1 << bit); + } + mi_assert_internal(page->capacity == (free_count + page->used)); + + // walk through all blocks skipping the free ones + #if MI_DEBUG>1 + size_t used_count = 0; + #endif + for (size_t i = 0; i < page->capacity; i++) { + size_t bitidx = (i / sizeof(uintptr_t)); + size_t bit = i - (bitidx * sizeof(uintptr_t)); + uintptr_t m = free_map[bitidx]; + if (bit == 0 && m == UINTPTR_MAX) { + i += (sizeof(uintptr_t) - 1); // skip a run of free blocks + } + else if ((m & ((uintptr_t)1 << bit)) == 0) { + #if MI_DEBUG>1 + used_count++; + #endif + uint8_t* block = pstart + (i * bsize); + if (!visitor(mi_page_heap(page), area, block, ubsize, arg)) return false; + } + } + mi_assert_internal(page->used == used_count); + return true; +} + +typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg); + + +static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) { + MI_UNUSED(heap); + MI_UNUSED(pq); + mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun; + mi_heap_area_ex_t xarea; + const size_t bsize = mi_page_block_size(page); + const size_t ubsize = mi_page_usable_block_size(page); + xarea.page = page; + xarea.area.reserved = page->reserved * bsize; + xarea.area.committed = page->capacity * bsize; + xarea.area.blocks = mi_page_start(page); + xarea.area.used = page->used; // number of blocks in use (#553) + xarea.area.block_size = ubsize; + xarea.area.full_block_size = bsize; + return fun(heap, &xarea, arg); +} + +// Visit all heap pages as areas +static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) { + if (visitor == NULL) return false; + return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{ +} + +// Just to pass arguments +typedef struct mi_visit_blocks_args_s { + bool visit_blocks; + mi_block_visit_fun* visitor; + void* arg; +} mi_visit_blocks_args_t; + +static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) { + mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg; + if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false; + if (args->visit_blocks) { + return mi_heap_area_visit_blocks(xarea, args->visitor, args->arg); + } + else { + return true; + } +} + +// Visit all blocks in a heap +bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { + mi_visit_blocks_args_t args = { visit_blocks, visitor, arg }; + return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args); +} diff --git a/yass/third_party/mimalloc/src/init.c b/yass/third_party/mimalloc/src/init.c new file mode 100644 index 0000000000..6f51ca8923 --- /dev/null +++ b/yass/third_party/mimalloc/src/init.c @@ -0,0 +1,714 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2022, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" + +#include // memcpy, memset +#include // atexit + + +// Empty page used to initialize the small free pages array +const mi_page_t _mi_page_empty = { + 0, + false, false, false, false, + 0, // capacity + 0, // reserved capacity + { 0 }, // flags + false, // is_zero + 0, // retire_expire + NULL, // free + NULL, // local_free + 0, // used + 0, // block size shift + 0, // heap tag + 0, // block_size + NULL, // page_start + #if (MI_PADDING || MI_ENCODE_FREELIST) + { 0, 0 }, + #endif + MI_ATOMIC_VAR_INIT(0), // xthread_free + MI_ATOMIC_VAR_INIT(0), // xheap + NULL, NULL + , { 0 } // padding +}; + +#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty) + +#if (MI_SMALL_WSIZE_MAX==128) +#if (MI_PADDING>0) && (MI_INTPTR_SIZE >= 8) +#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() } +#elif (MI_PADDING>0) +#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() } +#else +#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() } +#endif +#else +#error "define right initialization sizes corresponding to MI_SMALL_WSIZE_MAX" +#endif + +// Empty page queues for every bin +#define QNULL(sz) { NULL, NULL, (sz)*sizeof(uintptr_t) } +#define MI_PAGE_QUEUES_EMPTY \ + { QNULL(1), \ + QNULL( 1), QNULL( 2), QNULL( 3), QNULL( 4), QNULL( 5), QNULL( 6), QNULL( 7), QNULL( 8), /* 8 */ \ + QNULL( 10), QNULL( 12), QNULL( 14), QNULL( 16), QNULL( 20), QNULL( 24), QNULL( 28), QNULL( 32), /* 16 */ \ + QNULL( 40), QNULL( 48), QNULL( 56), QNULL( 64), QNULL( 80), QNULL( 96), QNULL( 112), QNULL( 128), /* 24 */ \ + QNULL( 160), QNULL( 192), QNULL( 224), QNULL( 256), QNULL( 320), QNULL( 384), QNULL( 448), QNULL( 512), /* 32 */ \ + QNULL( 640), QNULL( 768), QNULL( 896), QNULL( 1024), QNULL( 1280), QNULL( 1536), QNULL( 1792), QNULL( 2048), /* 40 */ \ + QNULL( 2560), QNULL( 3072), QNULL( 3584), QNULL( 4096), QNULL( 5120), QNULL( 6144), QNULL( 7168), QNULL( 8192), /* 48 */ \ + QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \ + QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \ + QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \ + QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \ + QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 2) /* Full queue */ } + +#define MI_STAT_COUNT_NULL() {0,0,0,0} + +// Empty statistics +#if MI_STAT>1 +#define MI_STAT_COUNT_END_NULL() , { MI_STAT_COUNT_NULL(), MI_INIT32(MI_STAT_COUNT_NULL) } +#else +#define MI_STAT_COUNT_END_NULL() +#endif + +#define MI_STATS_NULL \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 } \ + MI_STAT_COUNT_END_NULL() + + +// Empty slice span queues for every bin +#define SQNULL(sz) { NULL, NULL, sz } +#define MI_SEGMENT_SPAN_QUEUES_EMPTY \ + { SQNULL(1), \ + SQNULL( 1), SQNULL( 2), SQNULL( 3), SQNULL( 4), SQNULL( 5), SQNULL( 6), SQNULL( 7), SQNULL( 10), /* 8 */ \ + SQNULL( 12), SQNULL( 14), SQNULL( 16), SQNULL( 20), SQNULL( 24), SQNULL( 28), SQNULL( 32), SQNULL( 40), /* 16 */ \ + SQNULL( 48), SQNULL( 56), SQNULL( 64), SQNULL( 80), SQNULL( 96), SQNULL( 112), SQNULL( 128), SQNULL( 160), /* 24 */ \ + SQNULL( 192), SQNULL( 224), SQNULL( 256), SQNULL( 320), SQNULL( 384), SQNULL( 448), SQNULL( 512), SQNULL( 640), /* 32 */ \ + SQNULL( 768), SQNULL( 896), SQNULL( 1024) /* 35 */ } + + +// -------------------------------------------------------- +// Statically allocate an empty heap as the initial +// thread local value for the default heap, +// and statically allocate the backing heap for the main +// thread so it can function without doing any allocation +// itself (as accessing a thread local for the first time +// may lead to allocation itself on some platforms) +// -------------------------------------------------------- + +mi_decl_cache_align const mi_heap_t _mi_heap_empty = { + NULL, + MI_ATOMIC_VAR_INIT(NULL), + 0, // tid + 0, // cookie + 0, // arena id + { 0, 0 }, // keys + { {0}, {0}, 0, true }, // random + 0, // page count + MI_BIN_FULL, 0, // page retired min/max + NULL, // next + false, // can reclaim + 0, // tag + MI_SMALL_PAGES_EMPTY, + MI_PAGE_QUEUES_EMPTY +}; + +#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats))) +#define tld_empty_os ((mi_os_tld_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,os))) + +mi_decl_cache_align static const mi_tld_t tld_empty = { + 0, + false, + NULL, NULL, + { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, tld_empty_stats, tld_empty_os }, // segments + { 0, tld_empty_stats }, // os + { MI_STATS_NULL } // stats +}; + +mi_threadid_t _mi_thread_id(void) mi_attr_noexcept { + return _mi_prim_thread_id(); +} + +// the thread-local default heap for allocation +mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty; + +extern mi_heap_t _mi_heap_main; + +static mi_tld_t tld_main = { + 0, false, + &_mi_heap_main, & _mi_heap_main, + { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &tld_main.stats, &tld_main.os }, // segments + { 0, &tld_main.stats }, // os + { MI_STATS_NULL } // stats +}; + +mi_heap_t _mi_heap_main = { + &tld_main, + MI_ATOMIC_VAR_INIT(NULL), + 0, // thread id + 0, // initial cookie + 0, // arena id + { 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!) + { {0x846ca68b}, {0}, 0, true }, // random + 0, // page count + MI_BIN_FULL, 0, // page retired min/max + NULL, // next heap + false, // can reclaim + 0, // tag + MI_SMALL_PAGES_EMPTY, + MI_PAGE_QUEUES_EMPTY +}; + +bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`. + +mi_stats_t _mi_stats_main = { MI_STATS_NULL }; + + +static void mi_heap_main_init(void) { + if (_mi_heap_main.cookie == 0) { + _mi_heap_main.thread_id = _mi_thread_id(); + _mi_heap_main.cookie = 1; + #if defined(_WIN32) && !defined(MI_SHARED_LIB) + _mi_random_init_weak(&_mi_heap_main.random); // prevent allocation failure during bcrypt dll initialization with static linking + #else + _mi_random_init(&_mi_heap_main.random); + #endif + _mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main); + _mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main); + _mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main); + } +} + +mi_heap_t* _mi_heap_main_get(void) { + mi_heap_main_init(); + return &_mi_heap_main; +} + + +/* ----------------------------------------------------------- + Initialization and freeing of the thread local heaps +----------------------------------------------------------- */ + +// note: in x64 in release build `sizeof(mi_thread_data_t)` is under 4KiB (= OS page size). +typedef struct mi_thread_data_s { + mi_heap_t heap; // must come first due to cast in `_mi_heap_done` + mi_tld_t tld; + mi_memid_t memid; // must come last due to zero'ing +} mi_thread_data_t; + + +// Thread meta-data is allocated directly from the OS. For +// some programs that do not use thread pools and allocate and +// destroy many OS threads, this may causes too much overhead +// per thread so we maintain a small cache of recently freed metadata. + +#define TD_CACHE_SIZE (16) +static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE]; + +static mi_thread_data_t* mi_thread_data_zalloc(void) { + // try to find thread metadata in the cache + bool is_zero = false; + mi_thread_data_t* td = NULL; + for (int i = 0; i < TD_CACHE_SIZE; i++) { + td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]); + if (td != NULL) { + // found cached allocation, try use it + td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL); + if (td != NULL) { + break; + } + } + } + + // if that fails, allocate as meta data + if (td == NULL) { + mi_memid_t memid; + td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main); + if (td == NULL) { + // if this fails, try once more. (issue #257) + td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main); + if (td == NULL) { + // really out of memory + _mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t)); + } + } + if (td != NULL) { + td->memid = memid; + is_zero = memid.initially_zero; + } + } + + if (td != NULL && !is_zero) { + _mi_memzero_aligned(td, offsetof(mi_thread_data_t,memid)); + } + return td; +} + +static void mi_thread_data_free( mi_thread_data_t* tdfree ) { + // try to add the thread metadata to the cache + for (int i = 0; i < TD_CACHE_SIZE; i++) { + mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]); + if (td == NULL) { + mi_thread_data_t* expected = NULL; + if (mi_atomic_cas_ptr_weak_acq_rel(mi_thread_data_t, &td_cache[i], &expected, tdfree)) { + return; + } + } + } + // if that fails, just free it directly + _mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid, &_mi_stats_main); +} + +void _mi_thread_data_collect(void) { + // free all thread metadata from the cache + for (int i = 0; i < TD_CACHE_SIZE; i++) { + mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]); + if (td != NULL) { + td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL); + if (td != NULL) { + _mi_os_free(td, sizeof(mi_thread_data_t), td->memid, &_mi_stats_main); + } + } + } +} + +// Initialize the thread local default heap, called from `mi_thread_init` +static bool _mi_thread_heap_init(void) { + if (mi_heap_is_initialized(mi_prim_get_default_heap())) return true; + if (_mi_is_main_thread()) { + // mi_assert_internal(_mi_heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization + // the main heap is statically allocated + mi_heap_main_init(); + _mi_heap_set_default_direct(&_mi_heap_main); + //mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_prim_get_default_heap()); + } + else { + // use `_mi_os_alloc` to allocate directly from the OS + mi_thread_data_t* td = mi_thread_data_zalloc(); + if (td == NULL) return false; + + mi_tld_t* tld = &td->tld; + mi_heap_t* heap = &td->heap; + _mi_tld_init(tld, heap); // must be before `_mi_heap_init` + _mi_heap_init(heap, tld, _mi_arena_id_none(), false /* can reclaim */, 0 /* default tag */); + _mi_heap_set_default_direct(heap); + } + return false; +} + +// initialize thread local data +void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) { + _mi_memcpy_aligned(tld, &tld_empty, sizeof(mi_tld_t)); + tld->heap_backing = bheap; + tld->heaps = NULL; + tld->segments.stats = &tld->stats; + tld->segments.os = &tld->os; + tld->os.stats = &tld->stats; +} + +// Free the thread local default heap (called from `mi_thread_done`) +static bool _mi_thread_heap_done(mi_heap_t* heap) { + if (!mi_heap_is_initialized(heap)) return true; + + // reset default heap + _mi_heap_set_default_direct(_mi_is_main_thread() ? &_mi_heap_main : (mi_heap_t*)&_mi_heap_empty); + + // switch to backing heap + heap = heap->tld->heap_backing; + if (!mi_heap_is_initialized(heap)) return false; + + // delete all non-backing heaps in this thread + mi_heap_t* curr = heap->tld->heaps; + while (curr != NULL) { + mi_heap_t* next = curr->next; // save `next` as `curr` will be freed + if (curr != heap) { + mi_assert_internal(!mi_heap_is_backing(curr)); + mi_heap_delete(curr); + } + curr = next; + } + mi_assert_internal(heap->tld->heaps == heap && heap->next == NULL); + mi_assert_internal(mi_heap_is_backing(heap)); + + // collect if not the main thread + if (heap != &_mi_heap_main) { + _mi_heap_collect_abandon(heap); + } + + // merge stats + _mi_stats_done(&heap->tld->stats); + + // free if not the main thread + if (heap != &_mi_heap_main) { + // the following assertion does not always hold for huge segments as those are always treated + // as abondened: one may allocate it in one thread, but deallocate in another in which case + // the count can be too large or negative. todo: perhaps not count huge segments? see issue #363 + // mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id()); + mi_thread_data_free((mi_thread_data_t*)heap); + } + else { + #if 0 + // never free the main thread even in debug mode; if a dll is linked statically with mimalloc, + // there may still be delete/free calls after the mi_fls_done is called. Issue #207 + _mi_heap_destroy_pages(heap); + mi_assert_internal(heap->tld->heap_backing == &_mi_heap_main); + #endif + } + return false; +} + + + +// -------------------------------------------------------- +// Try to run `mi_thread_done()` automatically so any memory +// owned by the thread but not yet released can be abandoned +// and re-owned by another thread. +// +// 1. windows dynamic library: +// call from DllMain on DLL_THREAD_DETACH +// 2. windows static library: +// use `FlsAlloc` to call a destructor when the thread is done +// 3. unix, pthreads: +// use a pthread key to call a destructor when a pthread is done +// +// In the last two cases we also need to call `mi_process_init` +// to set up the thread local keys. +// -------------------------------------------------------- + +// Set up handlers so `mi_thread_done` is called automatically +static void mi_process_setup_auto_thread_done(void) { + static bool tls_initialized = false; // fine if it races + if (tls_initialized) return; + tls_initialized = true; + _mi_prim_thread_init_auto_done(); + _mi_heap_set_default_direct(&_mi_heap_main); +} + + +bool _mi_is_main_thread(void) { + return (_mi_heap_main.thread_id==0 || _mi_heap_main.thread_id == _mi_thread_id()); +} + +static _Atomic(size_t) thread_count = MI_ATOMIC_VAR_INIT(1); + +size_t _mi_current_thread_count(void) { + return mi_atomic_load_relaxed(&thread_count); +} + +// This is called from the `mi_malloc_generic` +void mi_thread_init(void) mi_attr_noexcept +{ + // ensure our process has started already + mi_process_init(); + + // initialize the thread local default heap + // (this will call `_mi_heap_set_default_direct` and thus set the + // fiber/pthread key to a non-zero value, ensuring `_mi_thread_done` is called) + if (_mi_thread_heap_init()) return; // returns true if already initialized + + _mi_stat_increase(&_mi_stats_main.threads, 1); + mi_atomic_increment_relaxed(&thread_count); + //_mi_verbose_message("thread init: 0x%zx\n", _mi_thread_id()); +} + +void mi_thread_done(void) mi_attr_noexcept { + _mi_thread_done(NULL); +} + +void _mi_thread_done(mi_heap_t* heap) +{ + // calling with NULL implies using the default heap + if (heap == NULL) { + heap = mi_prim_get_default_heap(); + if (heap == NULL) return; + } + + // prevent re-entrancy through heap_done/heap_set_default_direct (issue #699) + if (!mi_heap_is_initialized(heap)) { + return; + } + + // adjust stats + mi_atomic_decrement_relaxed(&thread_count); + _mi_stat_decrease(&_mi_stats_main.threads, 1); + + // check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps... + if (heap->thread_id != _mi_thread_id()) return; + + // abandon the thread local heap + if (_mi_thread_heap_done(heap)) return; // returns true if already ran +} + +void _mi_heap_set_default_direct(mi_heap_t* heap) { + mi_assert_internal(heap != NULL); + #if defined(MI_TLS_SLOT) + mi_prim_tls_slot_set(MI_TLS_SLOT,heap); + #elif defined(MI_TLS_PTHREAD_SLOT_OFS) + *mi_prim_tls_pthread_heap_slot() = heap; + #elif defined(MI_TLS_PTHREAD) + // we use _mi_heap_default_key + #else + _mi_heap_default = heap; + #endif + + // ensure the default heap is passed to `_mi_thread_done` + // setting to a non-NULL value also ensures `mi_thread_done` is called. + _mi_prim_thread_associate_default_heap(heap); +} + + +// -------------------------------------------------------- +// Run functions on process init/done, and thread init/done +// -------------------------------------------------------- +static void mi_cdecl mi_process_done(void); + +static bool os_preloading = true; // true until this module is initialized +static bool mi_redirected = false; // true if malloc redirects to mi_malloc + +// Returns true if this module has not been initialized; Don't use C runtime routines until it returns false. +bool mi_decl_noinline _mi_preloading(void) { + return os_preloading; +} + +mi_decl_nodiscard bool mi_is_redirected(void) mi_attr_noexcept { + return mi_redirected; +} + +// Communicate with the redirection module on Windows +#if defined(_WIN32) && defined(MI_SHARED_LIB) && !defined(MI_WIN_NOREDIRECT) +#ifdef __cplusplus +extern "C" { +#endif +mi_decl_export void _mi_redirect_entry(DWORD reason) { + // called on redirection; careful as this may be called before DllMain + if (reason == DLL_PROCESS_ATTACH) { + mi_redirected = true; + } + else if (reason == DLL_PROCESS_DETACH) { + mi_redirected = false; + } + else if (reason == DLL_THREAD_DETACH) { + mi_thread_done(); + } +} +__declspec(dllimport) bool mi_cdecl mi_allocator_init(const char** message); +__declspec(dllimport) void mi_cdecl mi_allocator_done(void); +#ifdef __cplusplus +} +#endif +#else +static bool mi_allocator_init(const char** message) { + if (message != NULL) *message = NULL; + return true; +} +static void mi_allocator_done(void) { + // nothing to do +} +#endif + +// Called once by the process loader +static void mi_process_load(void) { + mi_heap_main_init(); + #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD) + volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true; + if (dummy == NULL) return; // use dummy or otherwise the access may get optimized away (issue #697) + #endif + os_preloading = false; + mi_assert_internal(_mi_is_main_thread()); + #if !(defined(_WIN32) && defined(MI_SHARED_LIB)) // use Dll process detach (see below) instead of atexit (issue #521) + atexit(&mi_process_done); + #endif + _mi_options_init(); + mi_process_setup_auto_thread_done(); + mi_process_init(); + if (mi_redirected) _mi_verbose_message("malloc is redirected.\n"); + + // show message from the redirector (if present) + const char* msg = NULL; + mi_allocator_init(&msg); + if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) { + _mi_fputs(NULL,NULL,NULL,msg); + } + + // reseed random + _mi_random_reinit_if_weak(&_mi_heap_main.random); +} + +#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) +#include +mi_decl_cache_align bool _mi_cpu_has_fsrm = false; + +static void mi_detect_cpu_features(void) { + // FSRM for fast rep movsb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017)) + int32_t cpu_info[4]; + __cpuid(cpu_info, 7); + _mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see +} +#else +static void mi_detect_cpu_features(void) { + // nothing +} +#endif + +// Initialize the process; called by thread_init or the process loader +void mi_process_init(void) mi_attr_noexcept { + // ensure we are called once + static mi_atomic_once_t process_init; + #if _MSC_VER < 1920 + mi_heap_main_init(); // vs2017 can dynamically re-initialize _mi_heap_main + #endif + if (!mi_atomic_once(&process_init)) return; + _mi_process_is_initialized = true; + _mi_verbose_message("process init: 0x%zx\n", _mi_thread_id()); + mi_process_setup_auto_thread_done(); + + mi_detect_cpu_features(); + _mi_os_init(); + mi_heap_main_init(); + #if MI_DEBUG + _mi_verbose_message("debug level : %d\n", MI_DEBUG); + #endif + _mi_verbose_message("secure level: %d\n", MI_SECURE); + _mi_verbose_message("mem tracking: %s\n", MI_TRACK_TOOL); + #if MI_TSAN + _mi_verbose_message("thread santizer enabled\n"); + #endif + mi_thread_init(); + + #if defined(_WIN32) + // On windows, when building as a static lib the FLS cleanup happens to early for the main thread. + // To avoid this, set the FLS value for the main thread to NULL so the fls cleanup + // will not call _mi_thread_done on the (still executing) main thread. See issue #508. + _mi_prim_thread_associate_default_heap(NULL); + #endif + + mi_stats_reset(); // only call stat reset *after* thread init (or the heap tld == NULL) + mi_track_init(); + + if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) { + size_t pages = mi_option_get_clamp(mi_option_reserve_huge_os_pages, 0, 128*1024); + long reserve_at = mi_option_get(mi_option_reserve_huge_os_pages_at); + if (reserve_at != -1) { + mi_reserve_huge_os_pages_at(pages, reserve_at, pages*500); + } else { + mi_reserve_huge_os_pages_interleave(pages, 0, pages*500); + } + } + if (mi_option_is_enabled(mi_option_reserve_os_memory)) { + long ksize = mi_option_get(mi_option_reserve_os_memory); + if (ksize > 0) { + mi_reserve_os_memory((size_t)ksize*MI_KiB, true /* commit? */, true /* allow large pages? */); + } + } +} + +// Called when the process is done (through `at_exit`) +static void mi_cdecl mi_process_done(void) { + // only shutdown if we were initialized + if (!_mi_process_is_initialized) return; + // ensure we are called once + static bool process_done = false; + if (process_done) return; + process_done = true; + + // release any thread specific resources and ensure _mi_thread_done is called on all but the main thread + _mi_prim_thread_done_auto_done(); + + #ifndef MI_SKIP_COLLECT_ON_EXIT + #if (MI_DEBUG || !defined(MI_SHARED_LIB)) + // free all memory if possible on process exit. This is not needed for a stand-alone process + // but should be done if mimalloc is statically linked into another shared library which + // is repeatedly loaded/unloaded, see issue #281. + mi_collect(true /* force */ ); + #endif + #endif + + // Forcefully release all retained memory; this can be dangerous in general if overriding regular malloc/free + // since after process_done there might still be other code running that calls `free` (like at_exit routines, + // or C-runtime termination code. + if (mi_option_is_enabled(mi_option_destroy_on_exit)) { + mi_collect(true /* force */); + _mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!) + _mi_arena_unsafe_destroy_all(& _mi_heap_main_get()->tld->stats); + } + + if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) { + mi_stats_print(NULL); + } + mi_allocator_done(); + _mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id); + os_preloading = true; // don't call the C runtime anymore +} + + + +#if defined(_WIN32) && defined(MI_SHARED_LIB) + // Windows DLL: easy to hook into process_init and thread_done + __declspec(dllexport) BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) { + MI_UNUSED(reserved); + MI_UNUSED(inst); + if (reason==DLL_PROCESS_ATTACH) { + mi_process_load(); + } + else if (reason==DLL_PROCESS_DETACH) { + mi_process_done(); + } + else if (reason==DLL_THREAD_DETACH) { + if (!mi_is_redirected()) { + mi_thread_done(); + } + } + return TRUE; + } + +#elif defined(_MSC_VER) + // MSVC: use data section magic for static libraries + // See + static int _mi_process_init(void) { + mi_process_load(); + return 0; + } + typedef int(*_mi_crt_callback_t)(void); + #if defined(_M_X64) || defined(_M_ARM64) + __pragma(comment(linker, "/include:" "_mi_msvc_initu")) + #pragma section(".CRT$XIU", long, read) + #else + __pragma(comment(linker, "/include:" "__mi_msvc_initu")) + #endif + #pragma data_seg(".CRT$XIU") + mi_decl_externc _mi_crt_callback_t _mi_msvc_initu[] = { &_mi_process_init }; + #pragma data_seg() + +#elif defined(__cplusplus) + // C++: use static initialization to detect process start + static bool _mi_process_init(void) { + mi_process_load(); + return (_mi_heap_main.thread_id != 0); + } + static bool mi_initialized = _mi_process_init(); + +#elif defined(__GNUC__) || defined(__clang__) + // GCC,Clang: use the constructor attribute + static void __attribute__((constructor)) _mi_process_init(void) { + mi_process_load(); + } + +#else +#pragma message("define a way to call mi_process_load on your platform") +#endif diff --git a/yass/third_party/mimalloc/src/libc.c b/yass/third_party/mimalloc/src/libc.c new file mode 100644 index 0000000000..dd6b400737 --- /dev/null +++ b/yass/third_party/mimalloc/src/libc.c @@ -0,0 +1,273 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// -------------------------------------------------------- +// This module defines various std libc functions to reduce +// the dependency on libc, and also prevent errors caused +// by some libc implementations when called before `main` +// executes (due to malloc redirection) +// -------------------------------------------------------- + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" // mi_prim_getenv + +char _mi_toupper(char c) { + if (c >= 'a' && c <= 'z') return (c - 'a' + 'A'); + else return c; +} + +int _mi_strnicmp(const char* s, const char* t, size_t n) { + if (n == 0) return 0; + for (; *s != 0 && *t != 0 && n > 0; s++, t++, n--) { + if (_mi_toupper(*s) != _mi_toupper(*t)) break; + } + return (n == 0 ? 0 : *s - *t); +} + +void _mi_strlcpy(char* dest, const char* src, size_t dest_size) { + if (dest==NULL || src==NULL || dest_size == 0) return; + // copy until end of src, or when dest is (almost) full + while (*src != 0 && dest_size > 1) { + *dest++ = *src++; + dest_size--; + } + // always zero terminate + *dest = 0; +} + +void _mi_strlcat(char* dest, const char* src, size_t dest_size) { + if (dest==NULL || src==NULL || dest_size == 0) return; + // find end of string in the dest buffer + while (*dest != 0 && dest_size > 1) { + dest++; + dest_size--; + } + // and catenate + _mi_strlcpy(dest, src, dest_size); +} + +size_t _mi_strlen(const char* s) { + if (s==NULL) return 0; + size_t len = 0; + while(s[len] != 0) { len++; } + return len; +} + +size_t _mi_strnlen(const char* s, size_t max_len) { + if (s==NULL) return 0; + size_t len = 0; + while(s[len] != 0 && len < max_len) { len++; } + return len; +} + +#ifdef MI_NO_GETENV +bool _mi_getenv(const char* name, char* result, size_t result_size) { + MI_UNUSED(name); + MI_UNUSED(result); + MI_UNUSED(result_size); + return false; +} +#else +bool _mi_getenv(const char* name, char* result, size_t result_size) { + if (name==NULL || result == NULL || result_size < 64) return false; + return _mi_prim_getenv(name,result,result_size); +} +#endif + +// -------------------------------------------------------- +// Define our own limited `_mi_vsnprintf` and `_mi_snprintf` +// This is mostly to avoid calling these when libc is not yet +// initialized (and to reduce dependencies) +// +// format: d i, p x u, s +// prec: z l ll L +// width: 10 +// align-left: - +// fill: 0 +// plus: + +// -------------------------------------------------------- + +static void mi_outc(char c, char** out, char* end) { + char* p = *out; + if (p >= end) return; + *p = c; + *out = p + 1; +} + +static void mi_outs(const char* s, char** out, char* end) { + if (s == NULL) return; + char* p = *out; + while (*s != 0 && p < end) { + *p++ = *s++; + } + *out = p; +} + +static void mi_out_fill(char fill, size_t len, char** out, char* end) { + char* p = *out; + for (size_t i = 0; i < len && p < end; i++) { + *p++ = fill; + } + *out = p; +} + +static void mi_out_alignright(char fill, char* start, size_t len, size_t extra, char* end) { + if (len == 0 || extra == 0) return; + if (start + len + extra >= end) return; + // move `len` characters to the right (in reverse since it can overlap) + for (size_t i = 1; i <= len; i++) { + start[len + extra - i] = start[len - i]; + } + // and fill the start + for (size_t i = 0; i < extra; i++) { + start[i] = fill; + } +} + + +static void mi_out_num(uintptr_t x, size_t base, char prefix, char** out, char* end) +{ + if (x == 0 || base == 0 || base > 16) { + if (prefix != 0) { mi_outc(prefix, out, end); } + mi_outc('0',out,end); + } + else { + // output digits in reverse + char* start = *out; + while (x > 0) { + char digit = (char)(x % base); + mi_outc((digit <= 9 ? '0' + digit : 'A' + digit - 10),out,end); + x = x / base; + } + if (prefix != 0) { + mi_outc(prefix, out, end); + } + size_t len = *out - start; + // and reverse in-place + for (size_t i = 0; i < (len / 2); i++) { + char c = start[len - i - 1]; + start[len - i - 1] = start[i]; + start[i] = c; + } + } +} + + +#define MI_NEXTC() c = *in; if (c==0) break; in++; + +void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) { + if (buf == NULL || bufsize == 0 || fmt == NULL) return; + buf[bufsize - 1] = 0; + char* const end = buf + (bufsize - 1); + const char* in = fmt; + char* out = buf; + while (true) { + if (out >= end) break; + char c; + MI_NEXTC(); + if (c != '%') { + if ((c >= ' ' && c <= '~') || c=='\n' || c=='\r' || c=='\t') { // output visible ascii or standard control only + mi_outc(c, &out, end); + } + } + else { + MI_NEXTC(); + char fill = ' '; + size_t width = 0; + char numtype = 'd'; + char numplus = 0; + bool alignright = true; + if (c == '+' || c == ' ') { numplus = c; MI_NEXTC(); } + if (c == '-') { alignright = false; MI_NEXTC(); } + if (c == '0') { fill = '0'; MI_NEXTC(); } + if (c >= '1' && c <= '9') { + width = (c - '0'); MI_NEXTC(); + while (c >= '0' && c <= '9') { + width = (10 * width) + (c - '0'); MI_NEXTC(); + } + if (c == 0) break; // extra check due to while + } + if (c == 'z' || c == 't' || c == 'L') { numtype = c; MI_NEXTC(); } + else if (c == 'l') { + numtype = c; MI_NEXTC(); + if (c == 'l') { numtype = 'L'; MI_NEXTC(); } + } + + char* start = out; + if (c == 's') { + // string + const char* s = va_arg(args, const char*); + mi_outs(s, &out, end); + } + else if (c == 'p' || c == 'x' || c == 'u') { + // unsigned + uintptr_t x = 0; + if (c == 'x' || c == 'u') { + if (numtype == 'z') x = va_arg(args, size_t); + else if (numtype == 't') x = va_arg(args, uintptr_t); // unsigned ptrdiff_t + else if (numtype == 'L') x = (uintptr_t)va_arg(args, unsigned long long); + else x = va_arg(args, unsigned long); + } + else if (c == 'p') { + x = va_arg(args, uintptr_t); + mi_outs("0x", &out, end); + start = out; + width = (width >= 2 ? width - 2 : 0); + } + if (width == 0 && (c == 'x' || c == 'p')) { + if (c == 'p') { width = 2 * (x <= UINT32_MAX ? 4 : ((x >> 16) <= UINT32_MAX ? 6 : sizeof(void*))); } + if (width == 0) { width = 2; } + fill = '0'; + } + mi_out_num(x, (c == 'x' || c == 'p' ? 16 : 10), numplus, &out, end); + } + else if (c == 'i' || c == 'd') { + // signed + intptr_t x = 0; + if (numtype == 'z') x = va_arg(args, intptr_t ); + else if (numtype == 't') x = va_arg(args, ptrdiff_t); + else if (numtype == 'L') x = (intptr_t)va_arg(args, long long); + else x = va_arg(args, long); + char pre = 0; + if (x < 0) { + pre = '-'; + if (x > INTPTR_MIN) { x = -x; } + } + else if (numplus != 0) { + pre = numplus; + } + mi_out_num((uintptr_t)x, 10, pre, &out, end); + } + else if (c >= ' ' && c <= '~') { + // unknown format + mi_outc('%', &out, end); + mi_outc(c, &out, end); + } + + // fill & align + mi_assert_internal(out <= end); + mi_assert_internal(out >= start); + const size_t len = out - start; + if (len < width) { + mi_out_fill(fill, width - len, &out, end); + if (alignright && out <= end) { + mi_out_alignright(fill, start, len, width - len, end); + } + } + } + } + mi_assert_internal(out <= end); + *out = 0; +} + +void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + _mi_vsnprintf(buf, buflen, fmt, args); + va_end(args); +} diff --git a/yass/third_party/mimalloc/src/options.c b/yass/third_party/mimalloc/src/options.c new file mode 100644 index 0000000000..a62727dd69 --- /dev/null +++ b/yass/third_party/mimalloc/src/options.c @@ -0,0 +1,526 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" // mi_prim_out_stderr + +#include // stdin/stdout +#include // abort + + + +static long mi_max_error_count = 16; // stop outputting errors after this (use < 0 for no limit) +static long mi_max_warning_count = 16; // stop outputting warnings after this (use < 0 for no limit) + +static void mi_add_stderr_output(void); + +int mi_version(void) mi_attr_noexcept { + return MI_MALLOC_VERSION; +} + + +// -------------------------------------------------------- +// Options +// These can be accessed by multiple threads and may be +// concurrently initialized, but an initializing data race +// is ok since they resolve to the same value. +// -------------------------------------------------------- +typedef enum mi_init_e { + UNINIT, // not yet initialized + DEFAULTED, // not found in the environment, use default value + INITIALIZED // found in environment or set explicitly +} mi_init_t; + +typedef struct mi_option_desc_s { + long value; // the value + mi_init_t init; // is it initialized yet? (from the environment) + mi_option_t option; // for debugging: the option index should match the option + const char* name; // option name without `mimalloc_` prefix + const char* legacy_name; // potential legacy option name +} mi_option_desc_t; + +#define MI_OPTION(opt) mi_option_##opt, #opt, NULL +#define MI_OPTION_LEGACY(opt,legacy) mi_option_##opt, #opt, #legacy + +static mi_option_desc_t options[_mi_option_last] = +{ + // stable options + #if MI_DEBUG || defined(MI_SHOW_ERRORS) + { 1, UNINIT, MI_OPTION(show_errors) }, + #else + { 0, UNINIT, MI_OPTION(show_errors) }, + #endif + { 0, UNINIT, MI_OPTION(show_stats) }, + { 0, UNINIT, MI_OPTION(verbose) }, + + // the following options are experimental and not all combinations make sense. + { 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`) + { 2, UNINIT, MI_OPTION_LEGACY(arena_eager_commit,eager_region_commit) }, // eager commit arena's? 2 is used to enable this only on an OS that has overcommit (i.e. linux) + { 1, UNINIT, MI_OPTION_LEGACY(purge_decommits,reset_decommits) }, // purge decommits memory (instead of reset) (note: on linux this uses MADV_DONTNEED for decommit) + { 0, UNINIT, MI_OPTION_LEGACY(allow_large_os_pages,large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's + { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages + {-1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N + { 0, UNINIT, MI_OPTION(reserve_os_memory) }, // reserve N KiB OS memory in advance (use `option_get_size`) + { 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread + { 0, UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free + { 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_purge,abandoned_page_reset) }, // reset free page memory when a thread terminates + { 0, UNINIT, MI_OPTION(deprecated_segment_reset) }, // reset segment memory on free (needs eager commit) +#if defined(__NetBSD__) + { 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed +#else + { 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand) +#endif + { 10, UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds + { 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes. + { 0, UNINIT, MI_OPTION_LEGACY(disallow_os_alloc,limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas) + { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose + { 32, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output + { 32, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output + { 10, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. percentage of the abandoned segments to be reclaimed per try. + { 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees! + #if (MI_INTPTR_SIZE>4) + { 1024L*1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`) + #else + { 128L*1024L, UNINIT, MI_OPTION(arena_reserve) }, // =128MiB on 32-bit + #endif + { 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's + { 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) }, + { 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free + { 0, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's) + { 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. +}; + +static void mi_option_init(mi_option_desc_t* desc); + +static bool mi_option_has_size_in_kib(mi_option_t option) { + return (option == mi_option_reserve_os_memory || option == mi_option_arena_reserve); +} + +void _mi_options_init(void) { + // called on process load; should not be called before the CRT is initialized! + // (e.g. do not call this from process_init as that may run before CRT initialization) + mi_add_stderr_output(); // now it safe to use stderr for output + for(int i = 0; i < _mi_option_last; i++ ) { + mi_option_t option = (mi_option_t)i; + long l = mi_option_get(option); MI_UNUSED(l); // initialize + // if (option != mi_option_verbose) + { + mi_option_desc_t* desc = &options[option]; + _mi_verbose_message("option '%s': %ld %s\n", desc->name, desc->value, (mi_option_has_size_in_kib(option) ? "KiB" : "")); + } + } + mi_max_error_count = mi_option_get(mi_option_max_errors); + mi_max_warning_count = mi_option_get(mi_option_max_warnings); +} + +mi_decl_nodiscard long mi_option_get(mi_option_t option) { + mi_assert(option >= 0 && option < _mi_option_last); + if (option < 0 || option >= _mi_option_last) return 0; + mi_option_desc_t* desc = &options[option]; + mi_assert(desc->option == option); // index should match the option + if mi_unlikely(desc->init == UNINIT) { + mi_option_init(desc); + } + return desc->value; +} + +mi_decl_nodiscard long mi_option_get_clamp(mi_option_t option, long min, long max) { + long x = mi_option_get(option); + return (x < min ? min : (x > max ? max : x)); +} + +mi_decl_nodiscard size_t mi_option_get_size(mi_option_t option) { + mi_assert_internal(mi_option_has_size_in_kib(option)); + const long x = mi_option_get(option); + size_t size = (x < 0 ? 0 : (size_t)x); + if (mi_option_has_size_in_kib(option)) { + size *= MI_KiB; + } + return size; +} + +void mi_option_set(mi_option_t option, long value) { + mi_assert(option >= 0 && option < _mi_option_last); + if (option < 0 || option >= _mi_option_last) return; + mi_option_desc_t* desc = &options[option]; + mi_assert(desc->option == option); // index should match the option + desc->value = value; + desc->init = INITIALIZED; +} + +void mi_option_set_default(mi_option_t option, long value) { + mi_assert(option >= 0 && option < _mi_option_last); + if (option < 0 || option >= _mi_option_last) return; + mi_option_desc_t* desc = &options[option]; + if (desc->init != INITIALIZED) { + desc->value = value; + } +} + +mi_decl_nodiscard bool mi_option_is_enabled(mi_option_t option) { + return (mi_option_get(option) != 0); +} + +void mi_option_set_enabled(mi_option_t option, bool enable) { + mi_option_set(option, (enable ? 1 : 0)); +} + +void mi_option_set_enabled_default(mi_option_t option, bool enable) { + mi_option_set_default(option, (enable ? 1 : 0)); +} + +void mi_option_enable(mi_option_t option) { + mi_option_set_enabled(option,true); +} + +void mi_option_disable(mi_option_t option) { + mi_option_set_enabled(option,false); +} + +static void mi_cdecl mi_out_stderr(const char* msg, void* arg) { + MI_UNUSED(arg); + if (msg != NULL && msg[0] != 0) { + _mi_prim_out_stderr(msg); + } +} + +// Since an output function can be registered earliest in the `main` +// function we also buffer output that happens earlier. When +// an output function is registered it is called immediately with +// the output up to that point. +#ifndef MI_MAX_DELAY_OUTPUT +#define MI_MAX_DELAY_OUTPUT ((size_t)(32*1024)) +#endif +static char out_buf[MI_MAX_DELAY_OUTPUT+1]; +static _Atomic(size_t) out_len; + +static void mi_cdecl mi_out_buf(const char* msg, void* arg) { + MI_UNUSED(arg); + if (msg==NULL) return; + if (mi_atomic_load_relaxed(&out_len)>=MI_MAX_DELAY_OUTPUT) return; + size_t n = _mi_strlen(msg); + if (n==0) return; + // claim space + size_t start = mi_atomic_add_acq_rel(&out_len, n); + if (start >= MI_MAX_DELAY_OUTPUT) return; + // check bound + if (start+n >= MI_MAX_DELAY_OUTPUT) { + n = MI_MAX_DELAY_OUTPUT-start-1; + } + _mi_memcpy(&out_buf[start], msg, n); +} + +static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf, void* arg) { + if (out==NULL) return; + // claim (if `no_more_buf == true`, no more output will be added after this point) + size_t count = mi_atomic_add_acq_rel(&out_len, (no_more_buf ? MI_MAX_DELAY_OUTPUT : 1)); + // and output the current contents + if (count>MI_MAX_DELAY_OUTPUT) count = MI_MAX_DELAY_OUTPUT; + out_buf[count] = 0; + out(out_buf,arg); + if (!no_more_buf) { + out_buf[count] = '\n'; // if continue with the buffer, insert a newline + } +} + + +// Once this module is loaded, switch to this routine +// which outputs to stderr and the delayed output buffer. +static void mi_cdecl mi_out_buf_stderr(const char* msg, void* arg) { + mi_out_stderr(msg,arg); + mi_out_buf(msg,arg); +} + + + +// -------------------------------------------------------- +// Default output handler +// -------------------------------------------------------- + +// Should be atomic but gives errors on many platforms as generally we cannot cast a function pointer to a uintptr_t. +// For now, don't register output from multiple threads. +static mi_output_fun* volatile mi_out_default; // = NULL +static _Atomic(void*) mi_out_arg; // = NULL + +static mi_output_fun* mi_out_get_default(void** parg) { + if (parg != NULL) { *parg = mi_atomic_load_ptr_acquire(void,&mi_out_arg); } + mi_output_fun* out = mi_out_default; + return (out == NULL ? &mi_out_buf : out); +} + +void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept { + mi_out_default = (out == NULL ? &mi_out_stderr : out); // stop using the delayed output buffer + mi_atomic_store_ptr_release(void,&mi_out_arg, arg); + if (out!=NULL) mi_out_buf_flush(out,true,arg); // output all the delayed output now +} + +// add stderr to the delayed output after the module is loaded +static void mi_add_stderr_output(void) { + mi_assert_internal(mi_out_default == NULL); + mi_out_buf_flush(&mi_out_stderr, false, NULL); // flush current contents to stderr + mi_out_default = &mi_out_buf_stderr; // and add stderr to the delayed output +} + +// -------------------------------------------------------- +// Messages, all end up calling `_mi_fputs`. +// -------------------------------------------------------- +static _Atomic(size_t) error_count; // = 0; // when >= max_error_count stop emitting errors +static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop emitting warnings + +// When overriding malloc, we may recurse into mi_vfprintf if an allocation +// inside the C runtime causes another message. +// In some cases (like on macOS) the loader already allocates which +// calls into mimalloc; if we then access thread locals (like `recurse`) +// this may crash as the access may call _tlv_bootstrap that tries to +// (recursively) invoke malloc again to allocate space for the thread local +// variables on demand. This is why we use a _mi_preloading test on such +// platforms. However, C code generator may move the initial thread local address +// load before the `if` and we therefore split it out in a separate funcion. +static mi_decl_thread bool recurse = false; + +static mi_decl_noinline bool mi_recurse_enter_prim(void) { + if (recurse) return false; + recurse = true; + return true; +} + +static mi_decl_noinline void mi_recurse_exit_prim(void) { + recurse = false; +} + +static bool mi_recurse_enter(void) { + #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD) + if (_mi_preloading()) return false; + #endif + return mi_recurse_enter_prim(); +} + +static void mi_recurse_exit(void) { + #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD) + if (_mi_preloading()) return; + #endif + mi_recurse_exit_prim(); +} + +void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message) { + if (out==NULL || (void*)out==(void*)stdout || (void*)out==(void*)stderr) { // TODO: use mi_out_stderr for stderr? + if (!mi_recurse_enter()) return; + out = mi_out_get_default(&arg); + if (prefix != NULL) out(prefix, arg); + out(message, arg); + mi_recurse_exit(); + } + else { + if (prefix != NULL) out(prefix, arg); + out(message, arg); + } +} + +// Define our own limited `fprintf` that avoids memory allocation. +// We do this using `_mi_vsnprintf` with a limited buffer. +static void mi_vfprintf( mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args ) { + char buf[512]; + if (fmt==NULL) return; + if (!mi_recurse_enter()) return; + _mi_vsnprintf(buf, sizeof(buf)-1, fmt, args); + mi_recurse_exit(); + _mi_fputs(out,arg,prefix,buf); +} + +void _mi_fprintf( mi_output_fun* out, void* arg, const char* fmt, ... ) { + va_list args; + va_start(args,fmt); + mi_vfprintf(out,arg,NULL,fmt,args); + va_end(args); +} + +static void mi_vfprintf_thread(mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args) { + if (prefix != NULL && _mi_strnlen(prefix,33) <= 32 && !_mi_is_main_thread()) { + char tprefix[64]; + _mi_snprintf(tprefix, sizeof(tprefix), "%sthread 0x%tx: ", prefix, (uintptr_t)_mi_thread_id()); + mi_vfprintf(out, arg, tprefix, fmt, args); + } + else { + mi_vfprintf(out, arg, prefix, fmt, args); + } +} + +void _mi_trace_message(const char* fmt, ...) { + if (mi_option_get(mi_option_verbose) <= 1) return; // only with verbose level 2 or higher + va_list args; + va_start(args, fmt); + mi_vfprintf_thread(NULL, NULL, "mimalloc: ", fmt, args); + va_end(args); +} + +void _mi_verbose_message(const char* fmt, ...) { + if (!mi_option_is_enabled(mi_option_verbose)) return; + va_list args; + va_start(args,fmt); + mi_vfprintf(NULL, NULL, "mimalloc: ", fmt, args); + va_end(args); +} + +static void mi_show_error_message(const char* fmt, va_list args) { + if (!mi_option_is_enabled(mi_option_verbose)) { + if (!mi_option_is_enabled(mi_option_show_errors)) return; + if (mi_max_error_count >= 0 && (long)mi_atomic_increment_acq_rel(&error_count) > mi_max_error_count) return; + } + mi_vfprintf_thread(NULL, NULL, "mimalloc: error: ", fmt, args); +} + +void _mi_warning_message(const char* fmt, ...) { + if (!mi_option_is_enabled(mi_option_verbose)) { + if (!mi_option_is_enabled(mi_option_show_errors)) return; + if (mi_max_warning_count >= 0 && (long)mi_atomic_increment_acq_rel(&warning_count) > mi_max_warning_count) return; + } + va_list args; + va_start(args,fmt); + mi_vfprintf_thread(NULL, NULL, "mimalloc: warning: ", fmt, args); + va_end(args); +} + + +#if MI_DEBUG +void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, const char* func ) { + _mi_fprintf(NULL, NULL, "mimalloc: assertion failed: at \"%s\":%u, %s\n assertion: \"%s\"\n", fname, line, (func==NULL?"":func), assertion); + abort(); +} +#endif + +// -------------------------------------------------------- +// Errors +// -------------------------------------------------------- + +static mi_error_fun* volatile mi_error_handler; // = NULL +static _Atomic(void*) mi_error_arg; // = NULL + +static void mi_error_default(int err) { + MI_UNUSED(err); +#if (MI_DEBUG>0) + if (err==EFAULT) { + #ifdef _MSC_VER + __debugbreak(); + #endif + abort(); + } +#endif +#if (MI_SECURE>0) + if (err==EFAULT) { // abort on serious errors in secure mode (corrupted meta-data) + abort(); + } +#endif +#if defined(MI_XMALLOC) + if (err==ENOMEM || err==EOVERFLOW) { // abort on memory allocation fails in xmalloc mode + abort(); + } +#endif +} + +void mi_register_error(mi_error_fun* fun, void* arg) { + mi_error_handler = fun; // can be NULL + mi_atomic_store_ptr_release(void,&mi_error_arg, arg); +} + +void _mi_error_message(int err, const char* fmt, ...) { + // show detailed error message + va_list args; + va_start(args, fmt); + mi_show_error_message(fmt, args); + va_end(args); + // and call the error handler which may abort (or return normally) + if (mi_error_handler != NULL) { + mi_error_handler(err, mi_atomic_load_ptr_acquire(void,&mi_error_arg)); + } + else { + mi_error_default(err); + } +} + +// -------------------------------------------------------- +// Initialize options by checking the environment +// -------------------------------------------------------- + +// TODO: implement ourselves to reduce dependencies on the C runtime +#include // strtol +#include // strstr + + +static void mi_option_init(mi_option_desc_t* desc) { + // Read option value from the environment + char s[64 + 1]; + char buf[64+1]; + _mi_strlcpy(buf, "mimalloc_", sizeof(buf)); + _mi_strlcat(buf, desc->name, sizeof(buf)); + bool found = _mi_getenv(buf, s, sizeof(s)); + if (!found && desc->legacy_name != NULL) { + _mi_strlcpy(buf, "mimalloc_", sizeof(buf)); + _mi_strlcat(buf, desc->legacy_name, sizeof(buf)); + found = _mi_getenv(buf, s, sizeof(s)); + if (found) { + _mi_warning_message("environment option \"mimalloc_%s\" is deprecated -- use \"mimalloc_%s\" instead.\n", desc->legacy_name, desc->name); + } + } + + if (found) { + size_t len = _mi_strnlen(s, sizeof(buf) - 1); + for (size_t i = 0; i < len; i++) { + buf[i] = _mi_toupper(s[i]); + } + buf[len] = 0; + if (buf[0] == 0 || strstr("1;TRUE;YES;ON", buf) != NULL) { + desc->value = 1; + desc->init = INITIALIZED; + } + else if (strstr("0;FALSE;NO;OFF", buf) != NULL) { + desc->value = 0; + desc->init = INITIALIZED; + } + else { + char* end = buf; + long value = strtol(buf, &end, 10); + if (mi_option_has_size_in_kib(desc->option)) { + // this option is interpreted in KiB to prevent overflow of `long` for large allocations + // (long is 32-bit on 64-bit windows, which allows for 4TiB max.) + size_t size = (value < 0 ? 0 : (size_t)value); + bool overflow = false; + if (*end == 'K') { end++; } + else if (*end == 'M') { overflow = mi_mul_overflow(size,MI_KiB,&size); end++; } + else if (*end == 'G') { overflow = mi_mul_overflow(size,MI_MiB,&size); end++; } + else if (*end == 'T') { overflow = mi_mul_overflow(size,MI_GiB,&size); end++; } + else { size = (size + MI_KiB - 1) / MI_KiB; } + if (end[0] == 'I' && end[1] == 'B') { end += 2; } // KiB, MiB, GiB, TiB + else if (*end == 'B') { end++; } // Kb, Mb, Gb, Tb + if (overflow || size > MI_MAX_ALLOC_SIZE) { size = (MI_MAX_ALLOC_SIZE / MI_KiB); } + value = (size > LONG_MAX ? LONG_MAX : (long)size); + } + if (*end == 0) { + desc->value = value; + desc->init = INITIALIZED; + } + else { + // set `init` first to avoid recursion through _mi_warning_message on mimalloc_verbose. + desc->init = DEFAULTED; + if (desc->option == mi_option_verbose && desc->value == 0) { + // if the 'mimalloc_verbose' env var has a bogus value we'd never know + // (since the value defaults to 'off') so in that case briefly enable verbose + desc->value = 1; + _mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name); + desc->value = 0; + } + else { + _mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name); + } + } + } + mi_assert_internal(desc->init != UNINIT); + } + else if (!_mi_preloading()) { + desc->init = DEFAULTED; + } +} diff --git a/yass/third_party/mimalloc/src/os.c b/yass/third_party/mimalloc/src/os.c new file mode 100644 index 0000000000..ce104273bf --- /dev/null +++ b/yass/third_party/mimalloc/src/os.c @@ -0,0 +1,678 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" + + +/* ----------------------------------------------------------- + Initialization. +----------------------------------------------------------- */ + +static mi_os_mem_config_t mi_os_mem_config = { + 4096, // page size + 0, // large page size (usually 2MiB) + 4096, // allocation granularity + true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems) + false, // can we partially free allocated blocks? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span) + true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory) +}; + +bool _mi_os_has_overcommit(void) { + return mi_os_mem_config.has_overcommit; +} + +bool _mi_os_has_virtual_reserve(void) { + return mi_os_mem_config.has_virtual_reserve; +} + + +// OS (small) page size +size_t _mi_os_page_size(void) { + return mi_os_mem_config.page_size; +} + +// if large OS pages are supported (2 or 4MiB), then return the size, otherwise return the small page size (4KiB) +size_t _mi_os_large_page_size(void) { + return (mi_os_mem_config.large_page_size != 0 ? mi_os_mem_config.large_page_size : _mi_os_page_size()); +} + +bool _mi_os_use_large_page(size_t size, size_t alignment) { + // if we have access, check the size and alignment requirements + if (mi_os_mem_config.large_page_size == 0 || !mi_option_is_enabled(mi_option_allow_large_os_pages)) return false; + return ((size % mi_os_mem_config.large_page_size) == 0 && (alignment % mi_os_mem_config.large_page_size) == 0); +} + +// round to a good OS allocation size (bounded by max 12.5% waste) +size_t _mi_os_good_alloc_size(size_t size) { + size_t align_size; + if (size < 512*MI_KiB) align_size = _mi_os_page_size(); + else if (size < 2*MI_MiB) align_size = 64*MI_KiB; + else if (size < 8*MI_MiB) align_size = 256*MI_KiB; + else if (size < 32*MI_MiB) align_size = 1*MI_MiB; + else align_size = 4*MI_MiB; + if mi_unlikely(size >= (SIZE_MAX - align_size)) return size; // possible overflow? + return _mi_align_up(size, align_size); +} + +void _mi_os_init(void) { + _mi_prim_mem_init(&mi_os_mem_config); +} + + +/* ----------------------------------------------------------- + Util +-------------------------------------------------------------- */ +bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); +bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats); + + +/* ----------------------------------------------------------- + aligned hinting +-------------------------------------------------------------- */ + +// On 64-bit systems, we can do efficient aligned allocation by using +// the 2TiB to 30TiB area to allocate those. +#if (MI_INTPTR_SIZE >= 8) +static mi_decl_cache_align _Atomic(uintptr_t)aligned_base; + +// Return a MI_SEGMENT_SIZE aligned address that is probably available. +// If this returns NULL, the OS will determine the address but on some OS's that may not be +// properly aligned which can be more costly as it needs to be adjusted afterwards. +// For a size > 1GiB this always returns NULL in order to guarantee good ASLR randomization; +// (otherwise an initial large allocation of say 2TiB has a 50% chance to include (known) addresses +// in the middle of the 2TiB - 6TiB address range (see issue #372)) + +#define MI_HINT_BASE ((uintptr_t)2 << 40) // 2TiB start +#define MI_HINT_AREA ((uintptr_t)4 << 40) // upto 6TiB (since before win8 there is "only" 8TiB available to processes) +#define MI_HINT_MAX ((uintptr_t)30 << 40) // wrap after 30TiB (area after 32TiB is used for huge OS pages) + +void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) +{ + if (try_alignment <= 1 || try_alignment > MI_SEGMENT_SIZE) return NULL; + size = _mi_align_up(size, MI_SEGMENT_SIZE); + if (size > 1*MI_GiB) return NULL; // guarantee the chance of fixed valid address is at most 1/(MI_HINT_AREA / 1<<30) = 1/4096. + #if (MI_SECURE>0) + size += MI_SEGMENT_SIZE; // put in `MI_SEGMENT_SIZE` virtual gaps between hinted blocks; this splits VLA's but increases guarded areas. + #endif + + uintptr_t hint = mi_atomic_add_acq_rel(&aligned_base, size); + if (hint == 0 || hint > MI_HINT_MAX) { // wrap or initialize + uintptr_t init = MI_HINT_BASE; + #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of aligned allocations unless in debug mode + uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap()); + init = init + ((MI_SEGMENT_SIZE * ((r>>17) & 0xFFFFF)) % MI_HINT_AREA); // (randomly 20 bits)*4MiB == 0 to 4TiB + #endif + uintptr_t expected = hint + size; + mi_atomic_cas_strong_acq_rel(&aligned_base, &expected, init); + hint = mi_atomic_add_acq_rel(&aligned_base, size); // this may still give 0 or > MI_HINT_MAX but that is ok, it is a hint after all + } + if (hint%try_alignment != 0) return NULL; + return (void*)hint; +} +#else +void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) { + MI_UNUSED(try_alignment); MI_UNUSED(size); + return NULL; +} +#endif + + +/* ----------------------------------------------------------- + Free memory +-------------------------------------------------------------- */ + +static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats); + +static void mi_os_prim_free(void* addr, size_t size, bool still_committed, mi_stats_t* tld_stats) { + MI_UNUSED(tld_stats); + mi_stats_t* stats = &_mi_stats_main; + mi_assert_internal((size % _mi_os_page_size()) == 0); + if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr) + int err = _mi_prim_free(addr, size); + if (err != 0) { + _mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr); + } + if (still_committed) { _mi_stat_decrease(&stats->committed, size); } + _mi_stat_decrease(&stats->reserved, size); +} + +void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* tld_stats) { + if (mi_memkind_is_os(memid.memkind)) { + size_t csize = _mi_os_good_alloc_size(size); + void* base = addr; + // different base? (due to alignment) + if (memid.mem.os.base != NULL) { + mi_assert(memid.mem.os.base <= addr); + mi_assert((uint8_t*)memid.mem.os.base + memid.mem.os.alignment >= (uint8_t*)addr); + base = memid.mem.os.base; + csize += ((uint8_t*)addr - (uint8_t*)memid.mem.os.base); + } + // free it + if (memid.memkind == MI_MEM_OS_HUGE) { + mi_assert(memid.is_pinned); + mi_os_free_huge_os_pages(base, csize, tld_stats); + } + else { + mi_os_prim_free(base, csize, still_committed, tld_stats); + } + } + else { + // nothing to do + mi_assert(memid.memkind < MI_MEM_OS); + } +} + +void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* tld_stats) { + _mi_os_free_ex(p, size, true, memid, tld_stats); +} + + +/* ----------------------------------------------------------- + Primitive allocation from the OS. +-------------------------------------------------------------- */ + +// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. +static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* tld_stats) { + mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); + mi_assert_internal(is_zero != NULL); + mi_assert_internal(is_large != NULL); + if (size == 0) return NULL; + if (!commit) { allow_large = false; } + if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning + *is_zero = false; + void* p = NULL; + int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, is_zero, &p); + if (err != 0) { + _mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large); + } + + MI_UNUSED(tld_stats); + mi_stats_t* stats = &_mi_stats_main; + mi_stat_counter_increase(stats->mmap_calls, 1); + if (p != NULL) { + _mi_stat_increase(&stats->reserved, size); + if (commit) { + _mi_stat_increase(&stats->committed, size); + // seems needed for asan (or `mimalloc-test-api` fails) + #ifdef MI_TRACK_ASAN + if (*is_zero) { mi_track_mem_defined(p,size); } + else { mi_track_mem_undefined(p,size); } + #endif + } + } + return p; +} + + +// Primitive aligned allocation from the OS. +// This function guarantees the allocated memory is aligned. +static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base, mi_stats_t* stats) { + mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0)); + mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); + mi_assert_internal(is_large != NULL); + mi_assert_internal(is_zero != NULL); + mi_assert_internal(base != NULL); + if (!commit) allow_large = false; + if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL; + size = _mi_align_up(size, _mi_os_page_size()); + + // try first with a hint (this will be aligned directly on Win 10+ or BSD) + void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats); + if (p == NULL) return NULL; + + // aligned already? + if (((uintptr_t)p % alignment) == 0) { + *base = p; + } + else { + // if not aligned, free it, overallocate, and unmap around it + _mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit); + mi_os_prim_free(p, size, commit, stats); + if (size >= (SIZE_MAX - alignment)) return NULL; // overflow + const size_t over_size = size + alignment; + + if (!mi_os_mem_config.has_partial_free) { // win32 virtualAlloc cannot free parts of an allocated block + // over-allocate uncommitted (virtual) memory + p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats); + if (p == NULL) return NULL; + + // set p to the aligned part in the full region + // note: this is dangerous on Windows as VirtualFree needs the actual base pointer + // this is handled though by having the `base` field in the memid's + *base = p; // remember the base + p = mi_align_up_ptr(p, alignment); + + // explicitly commit only the aligned part + if (commit) { + _mi_os_commit(p, size, NULL, stats); + } + } + else { // mmap can free inside an allocation + // overallocate... + p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats); + if (p == NULL) return NULL; + + // and selectively unmap parts around the over-allocated area. + void* aligned_p = mi_align_up_ptr(p, alignment); + size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p; + size_t mid_size = _mi_align_up(size, _mi_os_page_size()); + size_t post_size = over_size - pre_size - mid_size; + mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size); + if (pre_size > 0) { mi_os_prim_free(p, pre_size, commit, stats); } + if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); } + // we can return the aligned pointer on `mmap` systems + p = aligned_p; + *base = aligned_p; // since we freed the pre part, `*base == p`. + } + } + + mi_assert_internal(p == NULL || (p != NULL && *base != NULL && ((uintptr_t)p % alignment) == 0)); + return p; +} + + +/* ----------------------------------------------------------- + OS API: alloc and alloc_aligned +----------------------------------------------------------- */ + +void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) { + *memid = _mi_memid_none(); + if (size == 0) return NULL; + size = _mi_os_good_alloc_size(size); + bool os_is_large = false; + bool os_is_zero = false; + void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats); + if (p != NULL) { + *memid = _mi_memid_create_os(true, os_is_zero, os_is_large); + } + return p; +} + +void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats) +{ + MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings + *memid = _mi_memid_none(); + if (size == 0) return NULL; + size = _mi_os_good_alloc_size(size); + alignment = _mi_align_up(alignment, _mi_os_page_size()); + + bool os_is_large = false; + bool os_is_zero = false; + void* os_base = NULL; + void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base, stats ); + if (p != NULL) { + *memid = _mi_memid_create_os(commit, os_is_zero, os_is_large); + memid->mem.os.base = os_base; + memid->mem.os.alignment = alignment; + } + return p; +} + +/* ----------------------------------------------------------- + OS aligned allocation with an offset. This is used + for large alignments > MI_BLOCK_ALIGNMENT_MAX. We use a large mimalloc + page where the object can be aligned at an offset from the start of the segment. + As we may need to overallocate, we need to free such pointers using `mi_free_aligned` + to use the actual start of the memory region. +----------------------------------------------------------- */ + +void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats) { + mi_assert(offset <= MI_SEGMENT_SIZE); + mi_assert(offset <= size); + mi_assert((alignment % _mi_os_page_size()) == 0); + *memid = _mi_memid_none(); + if (offset > MI_SEGMENT_SIZE) return NULL; + if (offset == 0) { + // regular aligned allocation + return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, stats); + } + else { + // overallocate to align at an offset + const size_t extra = _mi_align_up(offset, alignment) - offset; + const size_t oversize = size + extra; + void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid, stats); + if (start == NULL) return NULL; + + void* const p = (uint8_t*)start + extra; + mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment)); + // decommit the overallocation at the start + if (commit && extra > _mi_os_page_size()) { + _mi_os_decommit(start, extra, stats); + } + return p; + } +} + +/* ----------------------------------------------------------- + OS memory API: reset, commit, decommit, protect, unprotect. +----------------------------------------------------------- */ + +// OS page align within a given area, either conservative (pages inside the area only), +// or not (straddling pages outside the area is possible) +static void* mi_os_page_align_areax(bool conservative, void* addr, size_t size, size_t* newsize) { + mi_assert(addr != NULL && size > 0); + if (newsize != NULL) *newsize = 0; + if (size == 0 || addr == NULL) return NULL; + + // page align conservatively within the range + void* start = (conservative ? mi_align_up_ptr(addr, _mi_os_page_size()) + : mi_align_down_ptr(addr, _mi_os_page_size())); + void* end = (conservative ? mi_align_down_ptr((uint8_t*)addr + size, _mi_os_page_size()) + : mi_align_up_ptr((uint8_t*)addr + size, _mi_os_page_size())); + ptrdiff_t diff = (uint8_t*)end - (uint8_t*)start; + if (diff <= 0) return NULL; + + mi_assert_internal((conservative && (size_t)diff <= size) || (!conservative && (size_t)diff >= size)); + if (newsize != NULL) *newsize = (size_t)diff; + return start; +} + +static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t* newsize) { + return mi_os_page_align_areax(true, addr, size, newsize); +} + +bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) { + MI_UNUSED(tld_stats); + mi_stats_t* stats = &_mi_stats_main; + if (is_zero != NULL) { *is_zero = false; } + _mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit + _mi_stat_counter_increase(&stats->commit_calls, 1); + + // page align range + size_t csize; + void* start = mi_os_page_align_areax(false /* conservative? */, addr, size, &csize); + if (csize == 0) return true; + + // commit + bool os_is_zero = false; + int err = _mi_prim_commit(start, csize, &os_is_zero); + if (err != 0) { + _mi_warning_message("cannot commit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); + return false; + } + if (os_is_zero && is_zero != NULL) { + *is_zero = true; + mi_assert_expensive(mi_mem_is_zero(start, csize)); + } + // note: the following seems required for asan (otherwise `mimalloc-test-stress` fails) + #ifdef MI_TRACK_ASAN + if (os_is_zero) { mi_track_mem_defined(start,csize); } + else { mi_track_mem_undefined(start,csize); } + #endif + return true; +} + +static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_stats_t* tld_stats) { + MI_UNUSED(tld_stats); + mi_stats_t* stats = &_mi_stats_main; + mi_assert_internal(needs_recommit!=NULL); + _mi_stat_decrease(&stats->committed, size); + + // page align + size_t csize; + void* start = mi_os_page_align_area_conservative(addr, size, &csize); + if (csize == 0) return true; + + // decommit + *needs_recommit = true; + int err = _mi_prim_decommit(start,csize,needs_recommit); + if (err != 0) { + _mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); + } + mi_assert_internal(err == 0); + return (err == 0); +} + +bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) { + bool needs_recommit; + return mi_os_decommit_ex(addr, size, &needs_recommit, tld_stats); +} + + +// Signal to the OS that the address range is no longer in use +// but may be used later again. This will release physical memory +// pages and reduce swapping while keeping the memory committed. +// We page align to a conservative area inside the range to reset. +bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { + // page align conservatively within the range + size_t csize; + void* start = mi_os_page_align_area_conservative(addr, size, &csize); + if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr) + _mi_stat_increase(&stats->reset, csize); + _mi_stat_counter_increase(&stats->reset_calls, 1); + + #if (MI_DEBUG>1) && !MI_SECURE && !MI_TRACK_ENABLED // && !MI_TSAN + memset(start, 0, csize); // pretend it is eagerly reset + #endif + + int err = _mi_prim_reset(start, csize); + if (err != 0) { + _mi_warning_message("cannot reset OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); + } + return (err == 0); +} + + +// either resets or decommits memory, returns true if the memory needs +// to be recommitted if it is to be re-used later on. +bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats) +{ + if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed? + _mi_stat_counter_increase(&stats->purge_calls, 1); + _mi_stat_increase(&stats->purged, size); + + if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit? + !_mi_preloading()) // don't decommit during preloading (unsafe) + { + bool needs_recommit = true; + mi_os_decommit_ex(p, size, &needs_recommit, stats); + return needs_recommit; + } + else { + if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed + _mi_os_reset(p, size, stats); + } + return false; // needs no recommit + } +} + +// either resets or decommits memory, returns true if the memory needs +// to be recommitted if it is to be re-used later on. +bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) { + return _mi_os_purge_ex(p, size, true, stats); +} + +// Protect a region in memory to be not accessible. +static bool mi_os_protectx(void* addr, size_t size, bool protect) { + // page align conservatively within the range + size_t csize = 0; + void* start = mi_os_page_align_area_conservative(addr, size, &csize); + if (csize == 0) return false; + /* + if (_mi_os_is_huge_reserved(addr)) { + _mi_warning_message("cannot mprotect memory allocated in huge OS pages\n"); + } + */ + int err = _mi_prim_protect(start,csize,protect); + if (err != 0) { + _mi_warning_message("cannot %s OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", (protect ? "protect" : "unprotect"), err, err, start, csize); + } + return (err == 0); +} + +bool _mi_os_protect(void* addr, size_t size) { + return mi_os_protectx(addr, size, true); +} + +bool _mi_os_unprotect(void* addr, size_t size) { + return mi_os_protectx(addr, size, false); +} + + + +/* ---------------------------------------------------------------------------- +Support for allocating huge OS pages (1Gib) that are reserved up-front +and possibly associated with a specific NUMA node. (use `numa_node>=0`) +-----------------------------------------------------------------------------*/ +#define MI_HUGE_OS_PAGE_SIZE (MI_GiB) + + +#if (MI_INTPTR_SIZE >= 8) +// To ensure proper alignment, use our own area for huge OS pages +static mi_decl_cache_align _Atomic(uintptr_t) mi_huge_start; // = 0 + +// Claim an aligned address range for huge pages +static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) { + if (total_size != NULL) *total_size = 0; + const size_t size = pages * MI_HUGE_OS_PAGE_SIZE; + + uintptr_t start = 0; + uintptr_t end = 0; + uintptr_t huge_start = mi_atomic_load_relaxed(&mi_huge_start); + do { + start = huge_start; + if (start == 0) { + // Initialize the start address after the 32TiB area + start = ((uintptr_t)32 << 40); // 32TiB virtual start address + #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode + uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap()); + start = start + ((uintptr_t)MI_HUGE_OS_PAGE_SIZE * ((r>>17) & 0x0FFF)); // (randomly 12bits)*1GiB == between 0 to 4TiB + #endif + } + end = start + size; + mi_assert_internal(end % MI_SEGMENT_SIZE == 0); + } while (!mi_atomic_cas_strong_acq_rel(&mi_huge_start, &huge_start, end)); + + if (total_size != NULL) *total_size = size; + return (uint8_t*)start; +} +#else +static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) { + MI_UNUSED(pages); + if (total_size != NULL) *total_size = 0; + return NULL; +} +#endif + +// Allocate MI_SEGMENT_SIZE aligned huge pages +void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_msecs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid) { + *memid = _mi_memid_none(); + if (psize != NULL) *psize = 0; + if (pages_reserved != NULL) *pages_reserved = 0; + size_t size = 0; + uint8_t* start = mi_os_claim_huge_pages(pages, &size); + if (start == NULL) return NULL; // or 32-bit systems + + // Allocate one page at the time but try to place them contiguously + // We allocate one page at the time to be able to abort if it takes too long + // or to at least allocate as many as available on the system. + mi_msecs_t start_t = _mi_clock_start(); + size_t page = 0; + bool all_zero = true; + while (page < pages) { + // allocate a page + bool is_zero = false; + void* addr = start + (page * MI_HUGE_OS_PAGE_SIZE); + void* p = NULL; + int err = _mi_prim_alloc_huge_os_pages(addr, MI_HUGE_OS_PAGE_SIZE, numa_node, &is_zero, &p); + if (!is_zero) { all_zero = false; } + if (err != 0) { + _mi_warning_message("unable to allocate huge OS page (error: %d (0x%x), address: %p, size: %zx bytes)\n", err, err, addr, MI_HUGE_OS_PAGE_SIZE); + break; + } + + // Did we succeed at a contiguous address? + if (p != addr) { + // no success, issue a warning and break + if (p != NULL) { + _mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr); + mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, true, &_mi_stats_main); + } + break; + } + + // success, record it + page++; // increase before timeout check (see issue #711) + _mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE); + _mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE); + + // check for timeout + if (max_msecs > 0) { + mi_msecs_t elapsed = _mi_clock_end(start_t); + if (page >= 1) { + mi_msecs_t estimate = ((elapsed / (page+1)) * pages); + if (estimate > 2*max_msecs) { // seems like we are going to timeout, break + elapsed = max_msecs + 1; + } + } + if (elapsed > max_msecs) { + _mi_warning_message("huge OS page allocation timed out (after allocating %zu page(s))\n", page); + break; + } + } + } + mi_assert_internal(page*MI_HUGE_OS_PAGE_SIZE <= size); + if (pages_reserved != NULL) { *pages_reserved = page; } + if (psize != NULL) { *psize = page * MI_HUGE_OS_PAGE_SIZE; } + if (page != 0) { + mi_assert(start != NULL); + *memid = _mi_memid_create_os(true /* is committed */, all_zero, true /* is_large */); + memid->memkind = MI_MEM_OS_HUGE; + mi_assert(memid->is_pinned); + #ifdef MI_TRACK_ASAN + if (all_zero) { mi_track_mem_defined(start,size); } + #endif + } + return (page == 0 ? NULL : start); +} + +// free every huge page in a range individually (as we allocated per page) +// note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems. +static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats) { + if (p==NULL || size==0) return; + uint8_t* base = (uint8_t*)p; + while (size >= MI_HUGE_OS_PAGE_SIZE) { + mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, true, stats); + size -= MI_HUGE_OS_PAGE_SIZE; + base += MI_HUGE_OS_PAGE_SIZE; + } +} + +/* ---------------------------------------------------------------------------- +Support NUMA aware allocation +-----------------------------------------------------------------------------*/ + +_Atomic(size_t) _mi_numa_node_count; // = 0 // cache the node count + +size_t _mi_os_numa_node_count_get(void) { + size_t count = mi_atomic_load_acquire(&_mi_numa_node_count); + if (count <= 0) { + long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly? + if (ncount > 0) { + count = (size_t)ncount; + } + else { + count = _mi_prim_numa_node_count(); // or detect dynamically + if (count == 0) count = 1; + } + mi_atomic_store_release(&_mi_numa_node_count, count); // save it + _mi_verbose_message("using %zd numa regions\n", count); + } + return count; +} + +int _mi_os_numa_node_get(mi_os_tld_t* tld) { + MI_UNUSED(tld); + size_t numa_count = _mi_os_numa_node_count(); + if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0 + // never more than the node count and >= 0 + size_t numa_node = _mi_prim_numa_node(); + if (numa_node >= numa_count) { numa_node = numa_node % numa_count; } + return (int)numa_node; +} diff --git a/yass/third_party/mimalloc/src/page-queue.c b/yass/third_party/mimalloc/src/page-queue.c new file mode 100644 index 0000000000..ceea91ee4d --- /dev/null +++ b/yass/third_party/mimalloc/src/page-queue.c @@ -0,0 +1,343 @@ +/*---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ----------------------------------------------------------- + Definition of page queues for each block size +----------------------------------------------------------- */ + +#ifndef MI_IN_PAGE_C +#error "this file should be included from 'page.c'" +// include to help an IDE +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#endif + +/* ----------------------------------------------------------- + Minimal alignment in machine words (i.e. `sizeof(void*)`) +----------------------------------------------------------- */ + +#if (MI_MAX_ALIGN_SIZE > 4*MI_INTPTR_SIZE) + #error "define alignment for more than 4x word size for this platform" +#elif (MI_MAX_ALIGN_SIZE > 2*MI_INTPTR_SIZE) + #define MI_ALIGN4W // 4 machine words minimal alignment +#elif (MI_MAX_ALIGN_SIZE > MI_INTPTR_SIZE) + #define MI_ALIGN2W // 2 machine words minimal alignment +#else + // ok, default alignment is 1 word +#endif + + +/* ----------------------------------------------------------- + Queue query +----------------------------------------------------------- */ + + +static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) { + return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t))); +} + +static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) { + return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t)))); +} + +static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) { + return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX); +} + +/* ----------------------------------------------------------- + Bins +----------------------------------------------------------- */ + +// Return the bin for a given field size. +// Returns MI_BIN_HUGE if the size is too large. +// We use `wsize` for the size in "machine word sizes", +// i.e. byte size == `wsize*sizeof(void*)`. +static inline uint8_t mi_bin(size_t size) { + size_t wsize = _mi_wsize_from_size(size); + uint8_t bin; + if (wsize <= 1) { + bin = 1; + } + #if defined(MI_ALIGN4W) + else if (wsize <= 4) { + bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + } + #elif defined(MI_ALIGN2W) + else if (wsize <= 8) { + bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + } + #else + else if (wsize <= 8) { + bin = (uint8_t)wsize; + } + #endif + else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) { + bin = MI_BIN_HUGE; + } + else { + #if defined(MI_ALIGN4W) + if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes + #endif + wsize--; + // find the highest bit + uint8_t b = (uint8_t)mi_bsr(wsize); // note: wsize != 0 + // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation). + // - adjust with 3 because we use do not round the first 8 sizes + // which each get an exact bin + bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3; + mi_assert_internal(bin < MI_BIN_HUGE); + } + mi_assert_internal(bin > 0 && bin <= MI_BIN_HUGE); + return bin; +} + + + +/* ----------------------------------------------------------- + Queue of pages with free blocks +----------------------------------------------------------- */ + +uint8_t _mi_bin(size_t size) { + return mi_bin(size); +} + +size_t _mi_bin_size(uint8_t bin) { + return _mi_heap_empty.pages[bin].block_size; +} + +// Good size for allocation +size_t mi_good_size(size_t size) mi_attr_noexcept { + if (size <= MI_MEDIUM_OBJ_SIZE_MAX) { + return _mi_bin_size(mi_bin(size + MI_PADDING_SIZE)); + } + else { + return _mi_align_up(size + MI_PADDING_SIZE,_mi_os_page_size()); + } +} + +#if (MI_DEBUG>1) +static bool mi_page_queue_contains(mi_page_queue_t* queue, const mi_page_t* page) { + mi_assert_internal(page != NULL); + mi_page_t* list = queue->first; + while (list != NULL) { + mi_assert_internal(list->next == NULL || list->next->prev == list); + mi_assert_internal(list->prev == NULL || list->prev->next == list); + if (list == page) break; + list = list->next; + } + return (list == page); +} + +#endif + +#if (MI_DEBUG>1) +static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* pq) { + return (pq >= &heap->pages[0] && pq <= &heap->pages[MI_BIN_FULL]); +} +#endif + +static inline bool mi_page_is_large_or_huge(const mi_page_t* page) { + return (mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_huge(page)); +} + +static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) { + mi_assert_internal(heap!=NULL); + uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page)))); + mi_assert_internal(bin <= MI_BIN_FULL); + mi_page_queue_t* pq = &heap->pages[bin]; + mi_assert_internal((mi_page_block_size(page) == pq->block_size) || + (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(pq)) || + (mi_page_is_in_full(page) && mi_page_queue_is_full(pq))); + return pq; +} + +static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) { + mi_heap_t* heap = mi_page_heap(page); + mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); + mi_assert_expensive(mi_page_queue_contains(pq, page)); + return pq; +} + +// The current small page array is for efficiency and for each +// small size (up to 256) it points directly to the page for that +// size without having to compute the bin. This means when the +// current free page queue is updated for a small bin, we need to update a +// range of entries in `_mi_page_small_free`. +static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_queue_t* pq) { + mi_assert_internal(mi_heap_contains_queue(heap,pq)); + size_t size = pq->block_size; + if (size > MI_SMALL_SIZE_MAX) return; + + mi_page_t* page = pq->first; + if (pq->first == NULL) page = (mi_page_t*)&_mi_page_empty; + + // find index in the right direct page array + size_t start; + size_t idx = _mi_wsize_from_size(size); + mi_page_t** pages_free = heap->pages_free_direct; + + if (pages_free[idx] == page) return; // already set + + // find start slot + if (idx<=1) { + start = 0; + } + else { + // find previous size; due to minimal alignment upto 3 previous bins may need to be skipped + uint8_t bin = mi_bin(size); + const mi_page_queue_t* prev = pq - 1; + while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) { + prev--; + } + start = 1 + _mi_wsize_from_size(prev->block_size); + if (start > idx) start = idx; + } + + // set size range to the right page + mi_assert(start <= idx); + for (size_t sz = start; sz <= idx; sz++) { + pages_free[sz] = page; + } +} + +/* +static bool mi_page_queue_is_empty(mi_page_queue_t* queue) { + return (queue->first == NULL); +} +*/ + +static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) { + mi_assert_internal(page != NULL); + mi_assert_expensive(mi_page_queue_contains(queue, page)); + mi_assert_internal(mi_page_block_size(page) == queue->block_size || + (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) || + (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); + mi_heap_t* heap = mi_page_heap(page); + + if (page->prev != NULL) page->prev->next = page->next; + if (page->next != NULL) page->next->prev = page->prev; + if (page == queue->last) queue->last = page->prev; + if (page == queue->first) { + queue->first = page->next; + // update first + mi_assert_internal(mi_heap_contains_queue(heap, queue)); + mi_heap_queue_first_update(heap,queue); + } + heap->page_count--; + page->next = NULL; + page->prev = NULL; + // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), NULL); + mi_page_set_in_full(page,false); +} + + +static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) { + mi_assert_internal(mi_page_heap(page) == heap); + mi_assert_internal(!mi_page_queue_contains(queue, page)); + #if MI_HUGE_PAGE_ABANDON + mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); + #endif + mi_assert_internal(mi_page_block_size(page) == queue->block_size || + (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) || + (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); + + mi_page_set_in_full(page, mi_page_queue_is_full(queue)); + // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap); + page->next = queue->first; + page->prev = NULL; + if (queue->first != NULL) { + mi_assert_internal(queue->first->prev == NULL); + queue->first->prev = page; + queue->first = page; + } + else { + queue->first = queue->last = page; + } + + // update direct + mi_heap_queue_first_update(heap, queue); + heap->page_count++; +} + + +static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) { + mi_assert_internal(page != NULL); + mi_assert_expensive(mi_page_queue_contains(from, page)); + mi_assert_expensive(!mi_page_queue_contains(to, page)); + const size_t bsize = mi_page_block_size(page); + MI_UNUSED(bsize); + mi_assert_internal((bsize == to->block_size && bsize == from->block_size) || + (bsize == to->block_size && mi_page_queue_is_full(from)) || + (bsize == from->block_size && mi_page_queue_is_full(to)) || + (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(to)) || + (mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to))); + + mi_heap_t* heap = mi_page_heap(page); + if (page->prev != NULL) page->prev->next = page->next; + if (page->next != NULL) page->next->prev = page->prev; + if (page == from->last) from->last = page->prev; + if (page == from->first) { + from->first = page->next; + // update first + mi_assert_internal(mi_heap_contains_queue(heap, from)); + mi_heap_queue_first_update(heap, from); + } + + page->prev = to->last; + page->next = NULL; + if (to->last != NULL) { + mi_assert_internal(heap == mi_page_heap(to->last)); + to->last->next = page; + to->last = page; + } + else { + to->first = page; + to->last = page; + mi_heap_queue_first_update(heap, to); + } + + mi_page_set_in_full(page, mi_page_queue_is_full(to)); +} + +// Only called from `mi_heap_absorb`. +size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) { + mi_assert_internal(mi_heap_contains_queue(heap,pq)); + mi_assert_internal(pq->block_size == append->block_size); + + if (append->first==NULL) return 0; + + // set append pages to new heap and count + size_t count = 0; + for (mi_page_t* page = append->first; page != NULL; page = page->next) { + // inline `mi_page_set_heap` to avoid wrong assertion during absorption; + // in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive. + mi_atomic_store_release(&page->xheap, (uintptr_t)heap); + // set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a + // side effect that it spins until any DELAYED_FREEING is finished. This ensures + // that after appending only the new heap will be used for delayed free operations. + _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false); + count++; + } + + if (pq->last==NULL) { + // take over afresh + mi_assert_internal(pq->first==NULL); + pq->first = append->first; + pq->last = append->last; + mi_heap_queue_first_update(heap, pq); + } + else { + // append to end + mi_assert_internal(pq->last!=NULL); + mi_assert_internal(append->first!=NULL); + pq->last->next = append->first; + append->first->prev = pq->last; + pq->last = append->last; + } + return count; +} diff --git a/yass/third_party/mimalloc/src/page.c b/yass/third_party/mimalloc/src/page.c new file mode 100644 index 0000000000..871ed21514 --- /dev/null +++ b/yass/third_party/mimalloc/src/page.c @@ -0,0 +1,943 @@ +/*---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ----------------------------------------------------------- + The core of the allocator. Every segment contains + pages of a certain block size. The main function + exported is `mi_malloc_generic`. +----------------------------------------------------------- */ + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" + +/* ----------------------------------------------------------- + Definition of page queues for each block size +----------------------------------------------------------- */ + +#define MI_IN_PAGE_C +#include "page-queue.c" +#undef MI_IN_PAGE_C + + +/* ----------------------------------------------------------- + Page helpers +----------------------------------------------------------- */ + +// Index a block in a page +static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t block_size, size_t i) { + MI_UNUSED(page); + mi_assert_internal(page != NULL); + mi_assert_internal(i <= page->reserved); + return (mi_block_t*)((uint8_t*)page_start + (i * block_size)); +} + +static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld); +static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld); + +#if (MI_DEBUG>=3) +static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) { + size_t count = 0; + while (head != NULL) { + mi_assert_internal(page == _mi_ptr_page(head)); + count++; + head = mi_block_next(page, head); + } + return count; +} + +/* +// Start of the page available memory +static inline uint8_t* mi_page_area(const mi_page_t* page) { + return _mi_page_start(_mi_page_segment(page), page, NULL); +} +*/ + +static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) { + size_t psize; + uint8_t* page_area = _mi_segment_page_start(_mi_page_segment(page), page, &psize); + mi_block_t* start = (mi_block_t*)page_area; + mi_block_t* end = (mi_block_t*)(page_area + psize); + while(p != NULL) { + if (p < start || p >= end) return false; + p = mi_block_next(page, p); + } +#if MI_DEBUG>3 // generally too expensive to check this + if (page->free_is_zero) { + const size_t ubsize = mi_page_usable_block_size(page); + for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) { + mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t))); + } + } +#endif + return true; +} + +static bool mi_page_is_valid_init(mi_page_t* page) { + mi_assert_internal(mi_page_block_size(page) > 0); + mi_assert_internal(page->used <= page->capacity); + mi_assert_internal(page->capacity <= page->reserved); + + uint8_t* start = mi_page_start(page); + mi_assert_internal(start == _mi_segment_page_start(_mi_page_segment(page), page, NULL)); + mi_assert_internal(page->is_huge == (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE)); + //mi_assert_internal(start + page->capacity*page->block_size == page->top); + + mi_assert_internal(mi_page_list_is_valid(page,page->free)); + mi_assert_internal(mi_page_list_is_valid(page,page->local_free)); + + #if MI_DEBUG>3 // generally too expensive to check this + if (page->free_is_zero) { + const size_t ubsize = mi_page_usable_block_size(page); + for(mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) { + mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t))); + } + } + #endif + + #if !MI_TRACK_ENABLED && !MI_TSAN + mi_block_t* tfree = mi_page_thread_free(page); + mi_assert_internal(mi_page_list_is_valid(page, tfree)); + //size_t tfree_count = mi_page_list_count(page, tfree); + //mi_assert_internal(tfree_count <= page->thread_freed + 1); + #endif + + size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free); + mi_assert_internal(page->used + free_count == page->capacity); + + return true; +} + +extern bool _mi_process_is_initialized; // has mi_process_init been called? + +bool _mi_page_is_valid(mi_page_t* page) { + mi_assert_internal(mi_page_is_valid_init(page)); + #if MI_SECURE + mi_assert_internal(page->keys[0] != 0); + #endif + if (mi_page_heap(page)!=NULL) { + mi_segment_t* segment = _mi_page_segment(page); + + mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id); + #if MI_HUGE_PAGE_ABANDON + if (segment->kind != MI_SEGMENT_HUGE) + #endif + { + mi_page_queue_t* pq = mi_page_queue_of(page); + mi_assert_internal(mi_page_queue_contains(pq, page)); + mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page)); + mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq)); + } + } + return true; +} +#endif + +void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) { + while (!_mi_page_try_use_delayed_free(page, delay, override_never)) { + mi_atomic_yield(); + } +} + +bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) { + mi_thread_free_t tfreex; + mi_delayed_t old_delay; + mi_thread_free_t tfree; + size_t yield_count = 0; + do { + tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS; + tfreex = mi_tf_set_delayed(tfree, delay); + old_delay = mi_tf_delayed(tfree); + if mi_unlikely(old_delay == MI_DELAYED_FREEING) { + if (yield_count >= 4) return false; // give up after 4 tries + yield_count++; + mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done. + // tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail + } + else if (delay == old_delay) { + break; // avoid atomic operation if already equal + } + else if (!override_never && old_delay == MI_NEVER_DELAYED_FREE) { + break; // leave never-delayed flag set + } + } while ((old_delay == MI_DELAYED_FREEING) || + !mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); + + return true; // success +} + +/* ----------------------------------------------------------- + Page collect the `local_free` and `thread_free` lists +----------------------------------------------------------- */ + +// Collect the local `thread_free` list using an atomic exchange. +// Note: The exchange must be done atomically as this is used right after +// moving to the full list in `mi_page_collect_ex` and we need to +// ensure that there was no race where the page became unfull just before the move. +static void _mi_page_thread_free_collect(mi_page_t* page) +{ + mi_block_t* head; + mi_thread_free_t tfreex; + mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free); + do { + head = mi_tf_block(tfree); + tfreex = mi_tf_set_block(tfree,NULL); + } while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tfree, tfreex)); + + // return if the list is empty + if (head == NULL) return; + + // find the tail -- also to get a proper count (without data races) + size_t max_count = page->capacity; // cannot collect more than capacity + size_t count = 1; + mi_block_t* tail = head; + mi_block_t* next; + while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) { + count++; + tail = next; + } + // if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free) + if (count > max_count) { + _mi_error_message(EFAULT, "corrupted thread-free list\n"); + return; // the thread-free items cannot be freed + } + + // and append the current local free list + mi_block_set_next(page,tail, page->local_free); + page->local_free = head; + + // update counts now + page->used -= (uint16_t)count; +} + +void _mi_page_free_collect(mi_page_t* page, bool force) { + mi_assert_internal(page!=NULL); + + // collect the thread free list + if (force || mi_page_thread_free(page) != NULL) { // quick test to avoid an atomic operation + _mi_page_thread_free_collect(page); + } + + // and the local free list + if (page->local_free != NULL) { + if mi_likely(page->free == NULL) { + // usual case + page->free = page->local_free; + page->local_free = NULL; + page->free_is_zero = false; + } + else if (force) { + // append -- only on shutdown (force) as this is a linear operation + mi_block_t* tail = page->local_free; + mi_block_t* next; + while ((next = mi_block_next(page, tail)) != NULL) { + tail = next; + } + mi_block_set_next(page, tail, page->free); + page->free = page->local_free; + page->local_free = NULL; + page->free_is_zero = false; + } + } + + mi_assert_internal(!force || page->local_free == NULL); +} + + + +/* ----------------------------------------------------------- + Page fresh and retire +----------------------------------------------------------- */ + +// called from segments when reclaiming abandoned pages +void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { + mi_assert_expensive(mi_page_is_valid_init(page)); + + mi_assert_internal(mi_page_heap(page) == heap); + mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE); + #if MI_HUGE_PAGE_ABANDON + mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); + #endif + + // TODO: push on full queue immediately if it is full? + mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page)); + mi_page_queue_push(heap, pq, page); + mi_assert_expensive(_mi_page_is_valid(page)); +} + +// allocate a fresh page from a segment +static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) { + #if !MI_HUGE_PAGE_ABANDON + mi_assert_internal(pq != NULL); + mi_assert_internal(mi_heap_contains_queue(heap, pq)); + mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size); + #endif + mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os); + if (page == NULL) { + // this may be out-of-memory, or an abandoned page was reclaimed (and in our queue) + return NULL; + } + #if MI_HUGE_PAGE_ABANDON + mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE); + #endif + mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE); + mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size); + // a fresh page was found, initialize it + const size_t full_block_size = (pq == NULL || mi_page_is_huge(page) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc + mi_assert_internal(full_block_size >= block_size); + mi_page_init(heap, page, full_block_size, heap->tld); + mi_heap_stat_increase(heap, pages, 1); + if (pq != NULL) { mi_page_queue_push(heap, pq, page); } + mi_assert_expensive(_mi_page_is_valid(page)); + return page; +} + +// Get a fresh page to use +static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) { + mi_assert_internal(mi_heap_contains_queue(heap, pq)); + mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size, 0); + if (page==NULL) return NULL; + mi_assert_internal(pq->block_size==mi_page_block_size(page)); + mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page))); + return page; +} + +/* ----------------------------------------------------------- + Do any delayed frees + (put there by other threads if they deallocated in a full page) +----------------------------------------------------------- */ +void _mi_heap_delayed_free_all(mi_heap_t* heap) { + while (!_mi_heap_delayed_free_partial(heap)) { + mi_atomic_yield(); + } +} + +// returns true if all delayed frees were processed +bool _mi_heap_delayed_free_partial(mi_heap_t* heap) { + // take over the list (note: no atomic exchange since it is often NULL) + mi_block_t* block = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); + while (block != NULL && !mi_atomic_cas_ptr_weak_acq_rel(mi_block_t, &heap->thread_delayed_free, &block, NULL)) { /* nothing */ }; + bool all_freed = true; + + // and free them all + while(block != NULL) { + mi_block_t* next = mi_block_nextx(heap,block, heap->keys); + // use internal free instead of regular one to keep stats etc correct + if (!_mi_free_delayed_block(block)) { + // we might already start delayed freeing while another thread has not yet + // reset the delayed_freeing flag; in that case delay it further by reinserting the current block + // into the delayed free list + all_freed = false; + mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); + do { + mi_block_set_nextx(heap, block, dfree, heap->keys); + } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block)); + } + block = next; + } + return all_freed; +} + +/* ----------------------------------------------------------- + Unfull, abandon, free and retire +----------------------------------------------------------- */ + +// Move a page from the full list back to a regular list +void _mi_page_unfull(mi_page_t* page) { + mi_assert_internal(page != NULL); + mi_assert_expensive(_mi_page_is_valid(page)); + mi_assert_internal(mi_page_is_in_full(page)); + if (!mi_page_is_in_full(page)) return; + + mi_heap_t* heap = mi_page_heap(page); + mi_page_queue_t* pqfull = &heap->pages[MI_BIN_FULL]; + mi_page_set_in_full(page, false); // to get the right queue + mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); + mi_page_set_in_full(page, true); + mi_page_queue_enqueue_from(pq, pqfull, page); +} + +static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { + mi_assert_internal(pq == mi_page_queue_of(page)); + mi_assert_internal(!mi_page_immediate_available(page)); + mi_assert_internal(!mi_page_is_in_full(page)); + + if (mi_page_is_in_full(page)) return; + mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page); + _mi_page_free_collect(page,false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set +} + + +// Abandon a page with used blocks at the end of a thread. +// Note: only call if it is ensured that no references exist from +// the `page->heap->thread_delayed_free` into this page. +// Currently only called through `mi_heap_collect_ex` which ensures this. +void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) { + mi_assert_internal(page != NULL); + mi_assert_expensive(_mi_page_is_valid(page)); + mi_assert_internal(pq == mi_page_queue_of(page)); + mi_assert_internal(mi_page_heap(page) != NULL); + + mi_heap_t* pheap = mi_page_heap(page); + + // remove from our page list + mi_segments_tld_t* segments_tld = &pheap->tld->segments; + mi_page_queue_remove(pq, page); + + // page is no longer associated with our heap + mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); + mi_page_set_heap(page, NULL); + +#if (MI_DEBUG>1) && !MI_TRACK_ENABLED + // check there are no references left.. + for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) { + mi_assert_internal(_mi_ptr_page(block) != page); + } +#endif + + // and abandon it + mi_assert_internal(mi_page_heap(page) == NULL); + _mi_segment_page_abandon(page,segments_tld); +} + + +// Free a page with no more free blocks +void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) { + mi_assert_internal(page != NULL); + mi_assert_expensive(_mi_page_is_valid(page)); + mi_assert_internal(pq == mi_page_queue_of(page)); + mi_assert_internal(mi_page_all_free(page)); + mi_assert_internal(mi_page_thread_free_flag(page)!=MI_DELAYED_FREEING); + + // no more aligned blocks in here + mi_page_set_has_aligned(page, false); + + mi_heap_t* heap = mi_page_heap(page); + + // remove from the page list + // (no need to do _mi_heap_delayed_free first as all blocks are already free) + mi_segments_tld_t* segments_tld = &heap->tld->segments; + mi_page_queue_remove(pq, page); + + // and free it + mi_page_set_heap(page,NULL); + _mi_segment_page_free(page, force, segments_tld); +} + +#define MI_MAX_RETIRE_SIZE MI_MEDIUM_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE +#define MI_RETIRE_CYCLES (16) + +// Retire a page with no more used blocks +// Important to not retire too quickly though as new +// allocations might coming. +// Note: called from `mi_free` and benchmarks often +// trigger this due to freeing everything and then +// allocating again so careful when changing this. +void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { + mi_assert_internal(page != NULL); + mi_assert_expensive(_mi_page_is_valid(page)); + mi_assert_internal(mi_page_all_free(page)); + + mi_page_set_has_aligned(page, false); + + // don't retire too often.. + // (or we end up retiring and re-allocating most of the time) + // NOTE: refine this more: we should not retire if this + // is the only page left with free blocks. It is not clear + // how to check this efficiently though... + // for now, we don't retire if it is the only page left of this size class. + mi_page_queue_t* pq = mi_page_queue_of(page); + const size_t bsize = mi_page_block_size(page); + if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue? + if (pq->last==page && pq->first==page) { // the only page in the queue? + mi_stat_counter_increase(_mi_stats_main.page_no_retire,1); + page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4); + mi_heap_t* heap = mi_page_heap(page); + mi_assert_internal(pq >= heap->pages); + const size_t index = pq - heap->pages; + mi_assert_internal(index < MI_BIN_FULL && index < MI_BIN_HUGE); + if (index < heap->page_retired_min) heap->page_retired_min = index; + if (index > heap->page_retired_max) heap->page_retired_max = index; + mi_assert_internal(mi_page_all_free(page)); + return; // don't free after all + } + } + _mi_page_free(page, pq, false); +} + +// free retired pages: we don't need to look at the entire queues +// since we only retire pages that are at the head position in a queue. +void _mi_heap_collect_retired(mi_heap_t* heap, bool force) { + size_t min = MI_BIN_FULL; + size_t max = 0; + for(size_t bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++) { + mi_page_queue_t* pq = &heap->pages[bin]; + mi_page_t* page = pq->first; + if (page != NULL && page->retire_expire != 0) { + if (mi_page_all_free(page)) { + page->retire_expire--; + if (force || page->retire_expire == 0) { + _mi_page_free(pq->first, pq, force); + } + else { + // keep retired, update min/max + if (bin < min) min = bin; + if (bin > max) max = bin; + } + } + else { + page->retire_expire = 0; + } + } + } + heap->page_retired_min = min; + heap->page_retired_max = max; +} + + +/* ----------------------------------------------------------- + Initialize the initial free list in a page. + In secure mode we initialize a randomized list by + alternating between slices. +----------------------------------------------------------- */ + +#define MI_MAX_SLICE_SHIFT (6) // at most 64 slices +#define MI_MAX_SLICES (1UL << MI_MAX_SLICE_SHIFT) +#define MI_MIN_SLICES (2) + +static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) { + MI_UNUSED(stats); + #if (MI_SECURE<=2) + mi_assert_internal(page->free == NULL); + mi_assert_internal(page->local_free == NULL); + #endif + mi_assert_internal(page->capacity + extend <= page->reserved); + mi_assert_internal(bsize == mi_page_block_size(page)); + void* const page_area = mi_page_start(page); + + // initialize a randomized free list + // set up `slice_count` slices to alternate between + size_t shift = MI_MAX_SLICE_SHIFT; + while ((extend >> shift) == 0) { + shift--; + } + const size_t slice_count = (size_t)1U << shift; + const size_t slice_extend = extend / slice_count; + mi_assert_internal(slice_extend >= 1); + mi_block_t* blocks[MI_MAX_SLICES]; // current start of the slice + size_t counts[MI_MAX_SLICES]; // available objects in the slice + for (size_t i = 0; i < slice_count; i++) { + blocks[i] = mi_page_block_at(page, page_area, bsize, page->capacity + i*slice_extend); + counts[i] = slice_extend; + } + counts[slice_count-1] += (extend % slice_count); // final slice holds the modulus too (todo: distribute evenly?) + + // and initialize the free list by randomly threading through them + // set up first element + const uintptr_t r = _mi_heap_random_next(heap); + size_t current = r % slice_count; + counts[current]--; + mi_block_t* const free_start = blocks[current]; + // and iterate through the rest; use `random_shuffle` for performance + uintptr_t rnd = _mi_random_shuffle(r|1); // ensure not 0 + for (size_t i = 1; i < extend; i++) { + // call random_shuffle only every INTPTR_SIZE rounds + const size_t round = i%MI_INTPTR_SIZE; + if (round == 0) rnd = _mi_random_shuffle(rnd); + // select a random next slice index + size_t next = ((rnd >> 8*round) & (slice_count-1)); + while (counts[next]==0) { // ensure it still has space + next++; + if (next==slice_count) next = 0; + } + // and link the current block to it + counts[next]--; + mi_block_t* const block = blocks[current]; + blocks[current] = (mi_block_t*)((uint8_t*)block + bsize); // bump to the following block + mi_block_set_next(page, block, blocks[next]); // and set next; note: we may have `current == next` + current = next; + } + // prepend to the free list (usually NULL) + mi_block_set_next(page, blocks[current], page->free); // end of the list + page->free = free_start; +} + +static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) +{ + MI_UNUSED(stats); + #if (MI_SECURE <= 2) + mi_assert_internal(page->free == NULL); + mi_assert_internal(page->local_free == NULL); + #endif + mi_assert_internal(page->capacity + extend <= page->reserved); + mi_assert_internal(bsize == mi_page_block_size(page)); + void* const page_area = mi_page_start(page); + + mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity); + + // initialize a sequential free list + mi_block_t* const last = mi_page_block_at(page, page_area, bsize, page->capacity + extend - 1); + mi_block_t* block = start; + while(block <= last) { + mi_block_t* next = (mi_block_t*)((uint8_t*)block + bsize); + mi_block_set_next(page,block,next); + block = next; + } + // prepend to free list (usually `NULL`) + mi_block_set_next(page, last, page->free); + page->free = start; +} + +/* ----------------------------------------------------------- + Page initialize and extend the capacity +----------------------------------------------------------- */ + +#define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well. +#if (MI_SECURE>0) +#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many +#else +#define MI_MIN_EXTEND (4) +#endif + +// Extend the capacity (up to reserved) by initializing a free list +// We do at most `MI_MAX_EXTEND` to avoid touching too much memory +// Note: we also experimented with "bump" allocation on the first +// allocations but this did not speed up any benchmark (due to an +// extra test in malloc? or cache effects?) +static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) { + MI_UNUSED(tld); + mi_assert_expensive(mi_page_is_valid_init(page)); + #if (MI_SECURE<=2) + mi_assert(page->free == NULL); + mi_assert(page->local_free == NULL); + if (page->free != NULL) return; + #endif + if (page->capacity >= page->reserved) return; + + mi_stat_counter_increase(tld->stats.pages_extended, 1); + + // calculate the extend count + const size_t bsize = mi_page_block_size(page); + size_t extend = page->reserved - page->capacity; + mi_assert_internal(extend > 0); + + size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/bsize); + if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; } + mi_assert_internal(max_extend > 0); + + if (extend > max_extend) { + // ensure we don't touch memory beyond the page to reduce page commit. + // the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%. + extend = max_extend; + } + + mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved); + mi_assert_internal(extend < (1UL<<16)); + + // and append the extend the free list + if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) { + mi_page_free_list_extend(page, bsize, extend, &tld->stats ); + } + else { + mi_page_free_list_extend_secure(heap, page, bsize, extend, &tld->stats); + } + // enable the new free list + page->capacity += (uint16_t)extend; + mi_stat_increase(tld->stats.page_committed, extend * bsize); + mi_assert_expensive(mi_page_is_valid_init(page)); +} + +// Initialize a fresh page +static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi_tld_t* tld) { + mi_assert(page != NULL); + mi_segment_t* segment = _mi_page_segment(page); + mi_assert(segment != NULL); + mi_assert_internal(block_size > 0); + // set fields + mi_page_set_heap(page, heap); + page->block_size = block_size; + size_t page_size; + page->page_start = _mi_segment_page_start(segment, page, &page_size); + mi_track_mem_noaccess(page->page_start,page_size); + mi_assert_internal(mi_page_block_size(page) <= page_size); + mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE); + mi_assert_internal(page_size / block_size < (1L<<16)); + page->reserved = (uint16_t)(page_size / block_size); + mi_assert_internal(page->reserved > 0); + #if (MI_PADDING || MI_ENCODE_FREELIST) + page->keys[0] = _mi_heap_random_next(heap); + page->keys[1] = _mi_heap_random_next(heap); + #endif + page->free_is_zero = page->is_zero_init; + #if MI_DEBUG>2 + if (page->is_zero_init) { + mi_track_mem_defined(page->page_start, page_size); + mi_assert_expensive(mi_mem_is_zero(page->page_start, page_size)); + } + #endif + mi_assert_internal(page->is_committed); + if (block_size > 0 && _mi_is_power_of_two(block_size)) { + page->block_size_shift = (uint8_t)(mi_ctz((uintptr_t)block_size)); + } + else { + page->block_size_shift = 0; + } + + mi_assert_internal(page->capacity == 0); + mi_assert_internal(page->free == NULL); + mi_assert_internal(page->used == 0); + mi_assert_internal(page->xthread_free == 0); + mi_assert_internal(page->next == NULL); + mi_assert_internal(page->prev == NULL); + mi_assert_internal(page->retire_expire == 0); + mi_assert_internal(!mi_page_has_aligned(page)); + #if (MI_PADDING || MI_ENCODE_FREELIST) + mi_assert_internal(page->keys[0] != 0); + mi_assert_internal(page->keys[1] != 0); + #endif + mi_assert_internal(page->block_size_shift == 0 || (block_size == ((size_t)1 << page->block_size_shift))); + mi_assert_expensive(mi_page_is_valid_init(page)); + + // initialize an initial free list + mi_page_extend_free(heap,page,tld); + mi_assert(mi_page_immediate_available(page)); +} + + +/* ----------------------------------------------------------- + Find pages with free blocks +-------------------------------------------------------------*/ + +// Find a page with free blocks of `page->block_size`. +static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try) +{ + // search through the pages in "next fit" order + #if MI_STAT + size_t count = 0; + #endif + mi_page_t* page = pq->first; + while (page != NULL) + { + mi_page_t* next = page->next; // remember next + #if MI_STAT + count++; + #endif + + // 0. collect freed blocks by us and other threads + _mi_page_free_collect(page, false); + + // 1. if the page contains free blocks, we are done + if (mi_page_immediate_available(page)) { + break; // pick this one + } + + // 2. Try to extend + if (page->capacity < page->reserved) { + mi_page_extend_free(heap, page, heap->tld); + mi_assert_internal(mi_page_immediate_available(page)); + break; + } + + // 3. If the page is completely full, move it to the `mi_pages_full` + // queue so we don't visit long-lived pages too often. + mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); + mi_page_to_full(page, pq); + + page = next; + } // for each page + + mi_heap_stat_counter_increase(heap, searches, count); + + if (page == NULL) { + _mi_heap_collect_retired(heap, false); // perhaps make a page available? + page = mi_page_fresh(heap, pq); + if (page == NULL && first_try) { + // out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again + page = mi_page_queue_find_free_ex(heap, pq, false); + } + } + else { + mi_assert(pq->first == page); + page->retire_expire = 0; + } + mi_assert_internal(page == NULL || mi_page_immediate_available(page)); + return page; +} + + + +// Find a page with free blocks of `size`. +static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { + mi_page_queue_t* pq = mi_page_queue(heap,size); + mi_page_t* page = pq->first; + if (page != NULL) { + #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness + if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) { + mi_page_extend_free(heap, page, heap->tld); + mi_assert_internal(mi_page_immediate_available(page)); + } + else + #endif + { + _mi_page_free_collect(page,false); + } + + if (mi_page_immediate_available(page)) { + page->retire_expire = 0; + return page; // fast path + } + } + return mi_page_queue_find_free_ex(heap, pq, true); +} + + +/* ----------------------------------------------------------- + Users can register a deferred free function called + when the `free` list is empty. Since the `local_free` + is separate this is deterministically called after + a certain number of allocations. +----------------------------------------------------------- */ + +static mi_deferred_free_fun* volatile deferred_free = NULL; +static _Atomic(void*) deferred_arg; // = NULL + +void _mi_deferred_free(mi_heap_t* heap, bool force) { + heap->tld->heartbeat++; + if (deferred_free != NULL && !heap->tld->recurse) { + heap->tld->recurse = true; + deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void,&deferred_arg)); + heap->tld->recurse = false; + } +} + +void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noexcept { + deferred_free = fn; + mi_atomic_store_ptr_release(void,&deferred_arg, arg); +} + + +/* ----------------------------------------------------------- + General allocation +----------------------------------------------------------- */ + +// Large and huge page allocation. +// Huge pages contain just one block, and the segment contains just that page (as `MI_SEGMENT_HUGE`). +// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX) +// so their size is not always `> MI_LARGE_OBJ_SIZE_MAX`. +static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) { + size_t block_size = _mi_os_good_alloc_size(size); + mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0); + bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX || page_alignment > 0); + #if MI_HUGE_PAGE_ABANDON + mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size)); + #else + mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_LARGE_OBJ_SIZE_MAX+1 : block_size); + mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq)); + #endif + mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment); + if (page != NULL) { + mi_assert_internal(mi_page_immediate_available(page)); + + if (is_huge) { + mi_assert_internal(mi_page_is_huge(page)); + mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE); + mi_assert_internal(_mi_page_segment(page)->used==1); + #if MI_HUGE_PAGE_ABANDON + mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue + mi_page_set_heap(page, NULL); + #endif + } + else { + mi_assert_internal(!mi_page_is_huge(page)); + } + + const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding + if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + mi_heap_stat_increase(heap, large, bsize); + mi_heap_stat_counter_increase(heap, large_count, 1); + } + else { + mi_heap_stat_increase(heap, huge, bsize); + mi_heap_stat_counter_increase(heap, huge_count, 1); + } + } + return page; +} + + +// Allocate a page +// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed. +static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept { + // huge allocation? + const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` + if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) { + if mi_unlikely(req_size > MI_MAX_ALLOC_SIZE) { + _mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size); + return NULL; + } + else { + return mi_large_huge_page_alloc(heap,size,huge_alignment); + } + } + else { + // otherwise find a page with free blocks in our size segregated queues + #if MI_PADDING + mi_assert_internal(size >= MI_PADDING_SIZE); + #endif + return mi_find_free_page(heap, size); + } +} + +// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed. +// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed. +// The `huge_alignment` is normally 0 but is set to a multiple of MI_SEGMENT_SIZE for +// very large requested alignments in which case we use a huge segment. +void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept +{ + mi_assert_internal(heap != NULL); + + // initialize if necessary + if mi_unlikely(!mi_heap_is_initialized(heap)) { + heap = mi_heap_get_default(); // calls mi_thread_init + if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; } + } + mi_assert_internal(mi_heap_is_initialized(heap)); + + // call potential deferred free routines + _mi_deferred_free(heap, false); + + // free delayed frees from other threads (but skip contended ones) + _mi_heap_delayed_free_partial(heap); + + // find (or allocate) a page of the right size + mi_page_t* page = mi_find_page(heap, size, huge_alignment); + if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more + mi_heap_collect(heap, true /* force */); + page = mi_find_page(heap, size, huge_alignment); + } + + if mi_unlikely(page == NULL) { // out of memory + const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` + _mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size); + return NULL; + } + + mi_assert_internal(mi_page_immediate_available(page)); + mi_assert_internal(mi_page_block_size(page) >= size); + + // and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc) + if mi_unlikely(zero && page->block_size == 0) { + // note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case. + void* p = _mi_page_malloc(heap, page, size); + mi_assert_internal(p != NULL); + _mi_memzero_aligned(p, mi_page_usable_block_size(page)); + return p; + } + else { + return _mi_page_malloc_zero(heap, page, size, zero); + } +} diff --git a/yass/third_party/mimalloc/src/prim/emscripten/prim.c b/yass/third_party/mimalloc/src/prim/emscripten/prim.c new file mode 100644 index 0000000000..f3797c9e66 --- /dev/null +++ b/yass/third_party/mimalloc/src/prim/emscripten/prim.c @@ -0,0 +1,244 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen, Alon Zakai +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// This file is included in `src/prim/prim.c` + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" + +// Design +// ====== +// +// mimalloc is built on top of emmalloc. emmalloc is a minimal allocator on top +// of sbrk. The reason for having three layers here is that we want mimalloc to +// be able to allocate and release system memory properly, the same way it would +// when using VirtualAlloc on Windows or mmap on POSIX, and sbrk is too limited. +// Specifically, sbrk can only go up and down, and not "skip" over regions, and +// so we end up either never freeing memory to the system, or we can get stuck +// with holes. +// +// Atm wasm generally does *not* free memory back the system: once grown, we do +// not shrink back down (https://github.com/WebAssembly/design/issues/1397). +// However, that is expected to improve +// (https://github.com/WebAssembly/memory-control/blob/main/proposals/memory-control/Overview.md) +// and so we do not want to bake those limitations in here. +// +// Even without that issue, we want our system allocator to handle holes, that +// is, it should merge freed regions and allow allocating new content there of +// the full size, etc., so that we do not waste space. That means that the +// system allocator really does need to handle the general problem of allocating +// and freeing variable-sized chunks of memory in a random order, like malloc/ +// free do. And so it makes sense to layer mimalloc on top of such an +// implementation. +// +// emmalloc makes sense for the lower level because it is small and simple while +// still fully handling merging of holes etc. It is not the most efficient +// allocator, but our assumption is that mimalloc needs to be fast while the +// system allocator underneath it is called much less frequently. +// + +//--------------------------------------------- +// init +//--------------------------------------------- + +void _mi_prim_mem_init( mi_os_mem_config_t* config) { + config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB + config->alloc_granularity = 16; + config->has_overcommit = false; + config->has_partial_free = false; + config->has_virtual_reserve = false; +} + +extern void emmalloc_free(void*); + +int _mi_prim_free(void* addr, size_t size) { + MI_UNUSED(size); + emmalloc_free(addr); + return 0; +} + + +//--------------------------------------------- +// Allocation +//--------------------------------------------- + +extern void* emmalloc_memalign(size_t alignment, size_t size); + +// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. +int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + MI_UNUSED(try_alignment); MI_UNUSED(allow_large); MI_UNUSED(commit); + *is_large = false; + // TODO: Track the highest address ever seen; first uses of it are zeroes. + // That assumes no one else uses sbrk but us (they could go up, + // scribble, and then down), but we could assert on that perhaps. + *is_zero = false; + // emmalloc has a minimum alignment size. + #define MIN_EMMALLOC_ALIGN 8 + if (try_alignment < MIN_EMMALLOC_ALIGN) { + try_alignment = MIN_EMMALLOC_ALIGN; + } + void* p = emmalloc_memalign(try_alignment, size); + *addr = p; + if (p == 0) { + return ENOMEM; + } + return 0; +} + + +//--------------------------------------------- +// Commit/Reset +//--------------------------------------------- + +int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { + MI_UNUSED(addr); MI_UNUSED(size); + // See TODO above. + *is_zero = false; + return 0; +} + +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { + MI_UNUSED(addr); MI_UNUSED(size); + *needs_recommit = false; + return 0; +} + +int _mi_prim_reset(void* addr, size_t size) { + MI_UNUSED(addr); MI_UNUSED(size); + return 0; +} + +int _mi_prim_protect(void* addr, size_t size, bool protect) { + MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(protect); + return 0; +} + + +//--------------------------------------------- +// Huge pages and NUMA nodes +//--------------------------------------------- + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node); + *is_zero = true; + *addr = NULL; + return ENOSYS; +} + +size_t _mi_prim_numa_node(void) { + return 0; +} + +size_t _mi_prim_numa_node_count(void) { + return 1; +} + + +//---------------------------------------------------------------- +// Clock +//---------------------------------------------------------------- + +#include + +mi_msecs_t _mi_prim_clock_now(void) { + return emscripten_date_now(); +} + + +//---------------------------------------------------------------- +// Process info +//---------------------------------------------------------------- + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + // use defaults + MI_UNUSED(pinfo); +} + + +//---------------------------------------------------------------- +// Output +//---------------------------------------------------------------- + +#include + +void _mi_prim_out_stderr( const char* msg) { + emscripten_console_error(msg); +} + + +//---------------------------------------------------------------- +// Environment +//---------------------------------------------------------------- + +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + // For code size reasons, do not support environ customization for now. + MI_UNUSED(name); + MI_UNUSED(result); + MI_UNUSED(result_size); + return false; +} + + +//---------------------------------------------------------------- +// Random +//---------------------------------------------------------------- + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + int err = getentropy(buf, buf_len); + return !err; +} + + +//---------------------------------------------------------------- +// Thread init/done +//---------------------------------------------------------------- + +#ifdef __EMSCRIPTEN_SHARED_MEMORY__ + +// use pthread local storage keys to detect thread ending +// (and used with MI_TLS_PTHREADS for the default heap) +pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1); + +static void mi_pthread_done(void* value) { + if (value!=NULL) { + _mi_thread_done((mi_heap_t*)value); + } +} + +void _mi_prim_thread_init_auto_done(void) { + mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1)); + pthread_key_create(&_mi_heap_default_key, &mi_pthread_done); +} + +void _mi_prim_thread_done_auto_done(void) { + // nothing to do +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD + pthread_setspecific(_mi_heap_default_key, heap); + } +} + +#else + +void _mi_prim_thread_init_auto_done(void) { + // nothing +} + +void _mi_prim_thread_done_auto_done(void) { + // nothing +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); + +} +#endif diff --git a/yass/third_party/mimalloc/src/prim/osx/alloc-override-zone.c b/yass/third_party/mimalloc/src/prim/osx/alloc-override-zone.c new file mode 100644 index 0000000000..1515b886b2 --- /dev/null +++ b/yass/third_party/mimalloc/src/prim/osx/alloc-override-zone.c @@ -0,0 +1,461 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2022, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +#include "mimalloc.h" +#include "mimalloc/internal.h" + +#if defined(MI_MALLOC_OVERRIDE) + +#if !defined(__APPLE__) +#error "this file should only be included on macOS" +#endif + +/* ------------------------------------------------------ + Override system malloc on macOS + This is done through the malloc zone interface. + It seems to be most robust in combination with interposing + though or otherwise we may get zone errors as there are could + be allocations done by the time we take over the + zone. +------------------------------------------------------ */ + +#include +#include +#include // memset +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) +// only available from OSX 10.6 +extern malloc_zone_t* malloc_default_purgeable_zone(void) __attribute__((weak_import)); +#endif + +/* ------------------------------------------------------ + malloc zone members +------------------------------------------------------ */ + +static size_t zone_size(malloc_zone_t* zone, const void* p) { + MI_UNUSED(zone); + if (!mi_is_in_heap_region(p)){ return 0; } // not our pointer, bail out + return mi_usable_size(p); +} + +static void* zone_malloc(malloc_zone_t* zone, size_t size) { + MI_UNUSED(zone); + return mi_malloc(size); +} + +static void* zone_calloc(malloc_zone_t* zone, size_t count, size_t size) { + MI_UNUSED(zone); + return mi_calloc(count, size); +} + +static void* zone_valloc(malloc_zone_t* zone, size_t size) { + MI_UNUSED(zone); + return mi_malloc_aligned(size, _mi_os_page_size()); +} + +static void zone_free(malloc_zone_t* zone, void* p) { + MI_UNUSED(zone); + mi_cfree(p); +} + +static void* zone_realloc(malloc_zone_t* zone, void* p, size_t newsize) { + MI_UNUSED(zone); + return mi_realloc(p, newsize); +} + +static void* zone_memalign(malloc_zone_t* zone, size_t alignment, size_t size) { + MI_UNUSED(zone); + return mi_malloc_aligned(size,alignment); +} + +static void zone_destroy(malloc_zone_t* zone) { + MI_UNUSED(zone); + // todo: ignore for now? +} + +static unsigned zone_batch_malloc(malloc_zone_t* zone, size_t size, void** ps, unsigned count) { + size_t i; + for (i = 0; i < count; i++) { + ps[i] = zone_malloc(zone, size); + if (ps[i] == NULL) break; + } + return i; +} + +static void zone_batch_free(malloc_zone_t* zone, void** ps, unsigned count) { + for(size_t i = 0; i < count; i++) { + zone_free(zone, ps[i]); + ps[i] = NULL; + } +} + +static size_t zone_pressure_relief(malloc_zone_t* zone, size_t size) { + MI_UNUSED(zone); MI_UNUSED(size); + mi_collect(false); + return 0; +} + +static void zone_free_definite_size(malloc_zone_t* zone, void* p, size_t size) { + MI_UNUSED(size); + zone_free(zone,p); +} + +static boolean_t zone_claimed_address(malloc_zone_t* zone, void* p) { + MI_UNUSED(zone); + return mi_is_in_heap_region(p); +} + + +/* ------------------------------------------------------ + Introspection members +------------------------------------------------------ */ + +static kern_return_t intro_enumerator(task_t task, void* p, + unsigned type_mask, vm_address_t zone_address, + memory_reader_t reader, + vm_range_recorder_t recorder) +{ + // todo: enumerate all memory + MI_UNUSED(task); MI_UNUSED(p); MI_UNUSED(type_mask); MI_UNUSED(zone_address); + MI_UNUSED(reader); MI_UNUSED(recorder); + return KERN_SUCCESS; +} + +static size_t intro_good_size(malloc_zone_t* zone, size_t size) { + MI_UNUSED(zone); + return mi_good_size(size); +} + +static boolean_t intro_check(malloc_zone_t* zone) { + MI_UNUSED(zone); + return true; +} + +static void intro_print(malloc_zone_t* zone, boolean_t verbose) { + MI_UNUSED(zone); MI_UNUSED(verbose); + mi_stats_print(NULL); +} + +static void intro_log(malloc_zone_t* zone, void* p) { + MI_UNUSED(zone); MI_UNUSED(p); + // todo? +} + +static void intro_force_lock(malloc_zone_t* zone) { + MI_UNUSED(zone); + // todo? +} + +static void intro_force_unlock(malloc_zone_t* zone) { + MI_UNUSED(zone); + // todo? +} + +static void intro_statistics(malloc_zone_t* zone, malloc_statistics_t* stats) { + MI_UNUSED(zone); + // todo... + stats->blocks_in_use = 0; + stats->size_in_use = 0; + stats->max_size_in_use = 0; + stats->size_allocated = 0; +} + +static boolean_t intro_zone_locked(malloc_zone_t* zone) { + MI_UNUSED(zone); + return false; +} + + +/* ------------------------------------------------------ + At process start, override the default allocator +------------------------------------------------------ */ + +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#endif + +#if defined(__clang__) +#pragma clang diagnostic ignored "-Wc99-extensions" +#endif + +static malloc_introspection_t mi_introspect = { + .enumerator = &intro_enumerator, + .good_size = &intro_good_size, + .check = &intro_check, + .print = &intro_print, + .log = &intro_log, + .force_lock = &intro_force_lock, + .force_unlock = &intro_force_unlock, +#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) && !defined(__ppc__) + .statistics = &intro_statistics, + .zone_locked = &intro_zone_locked, +#endif +}; + +static malloc_zone_t mi_malloc_zone = { + // note: even with designators, the order is important for C++ compilation + //.reserved1 = NULL, + //.reserved2 = NULL, + .size = &zone_size, + .malloc = &zone_malloc, + .calloc = &zone_calloc, + .valloc = &zone_valloc, + .free = &zone_free, + .realloc = &zone_realloc, + .destroy = &zone_destroy, + .zone_name = "mimalloc", + .batch_malloc = &zone_batch_malloc, + .batch_free = &zone_batch_free, + .introspect = &mi_introspect, +#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) && !defined(__ppc__) + #if defined(MAC_OS_X_VERSION_10_14) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_14) + .version = 10, + #else + .version = 9, + #endif + // switch to version 9+ on OSX 10.6 to support memalign. + .memalign = &zone_memalign, + .free_definite_size = &zone_free_definite_size, + #if defined(MAC_OS_X_VERSION_10_7) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_7) + .pressure_relief = &zone_pressure_relief, + #endif + #if defined(MAC_OS_X_VERSION_10_14) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_14) + .claimed_address = &zone_claimed_address, + #endif +#else + .version = 4, +#endif +}; + +#ifdef __cplusplus +} +#endif + + +#if defined(MI_OSX_INTERPOSE) && defined(MI_SHARED_LIB_EXPORT) + +// ------------------------------------------------------ +// Override malloc_xxx and malloc_zone_xxx api's to use only +// our mimalloc zone. Since even the loader uses malloc +// on macOS, this ensures that all allocations go through +// mimalloc (as all calls are interposed). +// The main `malloc`, `free`, etc calls are interposed in `alloc-override.c`, +// Here, we also override macOS specific API's like +// `malloc_zone_calloc` etc. see +// ------------------------------------------------------ + +static inline malloc_zone_t* mi_get_default_zone(void) +{ + static bool init; + if mi_unlikely(!init) { + init = true; + malloc_zone_register(&mi_malloc_zone); // by calling register we avoid a zone error on free (see ) + } + return &mi_malloc_zone; +} + +mi_decl_externc int malloc_jumpstart(uintptr_t cookie); +mi_decl_externc void _malloc_fork_prepare(void); +mi_decl_externc void _malloc_fork_parent(void); +mi_decl_externc void _malloc_fork_child(void); + + +static malloc_zone_t* mi_malloc_create_zone(vm_size_t size, unsigned flags) { + MI_UNUSED(size); MI_UNUSED(flags); + return mi_get_default_zone(); +} + +static malloc_zone_t* mi_malloc_default_zone (void) { + return mi_get_default_zone(); +} + +static malloc_zone_t* mi_malloc_default_purgeable_zone(void) { + return mi_get_default_zone(); +} + +static void mi_malloc_destroy_zone(malloc_zone_t* zone) { + MI_UNUSED(zone); + // nothing. +} + +static kern_return_t mi_malloc_get_all_zones (task_t task, memory_reader_t mr, vm_address_t** addresses, unsigned* count) { + MI_UNUSED(task); MI_UNUSED(mr); + if (addresses != NULL) *addresses = NULL; + if (count != NULL) *count = 0; + return KERN_SUCCESS; +} + +static const char* mi_malloc_get_zone_name(malloc_zone_t* zone) { + return (zone == NULL ? mi_malloc_zone.zone_name : zone->zone_name); +} + +static void mi_malloc_set_zone_name(malloc_zone_t* zone, const char* name) { + MI_UNUSED(zone); MI_UNUSED(name); +} + +static int mi_malloc_jumpstart(uintptr_t cookie) { + MI_UNUSED(cookie); + return 1; // or 0 for no error? +} + +static void mi__malloc_fork_prepare(void) { + // nothing +} +static void mi__malloc_fork_parent(void) { + // nothing +} +static void mi__malloc_fork_child(void) { + // nothing +} + +static void mi_malloc_printf(const char* fmt, ...) { + MI_UNUSED(fmt); +} + +static bool zone_check(malloc_zone_t* zone) { + MI_UNUSED(zone); + return true; +} + +static malloc_zone_t* zone_from_ptr(const void* p) { + MI_UNUSED(p); + return mi_get_default_zone(); +} + +static void zone_log(malloc_zone_t* zone, void* p) { + MI_UNUSED(zone); MI_UNUSED(p); +} + +static void zone_print(malloc_zone_t* zone, bool b) { + MI_UNUSED(zone); MI_UNUSED(b); +} + +static void zone_print_ptr_info(void* p) { + MI_UNUSED(p); +} + +static void zone_register(malloc_zone_t* zone) { + MI_UNUSED(zone); +} + +static void zone_unregister(malloc_zone_t* zone) { + MI_UNUSED(zone); +} + +// use interposing so `DYLD_INSERT_LIBRARIES` works without `DYLD_FORCE_FLAT_NAMESPACE=1` +// See: +struct mi_interpose_s { + const void* replacement; + const void* target; +}; +#define MI_INTERPOSE_FUN(oldfun,newfun) { (const void*)&newfun, (const void*)&oldfun } +#define MI_INTERPOSE_MI(fun) MI_INTERPOSE_FUN(fun,mi_##fun) +#define MI_INTERPOSE_ZONE(fun) MI_INTERPOSE_FUN(malloc_##fun,fun) +__attribute__((used)) static const struct mi_interpose_s _mi_zone_interposes[] __attribute__((section("__DATA, __interpose"))) = +{ + + MI_INTERPOSE_MI(malloc_create_zone), + MI_INTERPOSE_MI(malloc_default_purgeable_zone), + MI_INTERPOSE_MI(malloc_default_zone), + MI_INTERPOSE_MI(malloc_destroy_zone), + MI_INTERPOSE_MI(malloc_get_all_zones), + MI_INTERPOSE_MI(malloc_get_zone_name), + MI_INTERPOSE_MI(malloc_jumpstart), + MI_INTERPOSE_MI(malloc_printf), + MI_INTERPOSE_MI(malloc_set_zone_name), + MI_INTERPOSE_MI(_malloc_fork_child), + MI_INTERPOSE_MI(_malloc_fork_parent), + MI_INTERPOSE_MI(_malloc_fork_prepare), + + MI_INTERPOSE_ZONE(zone_batch_free), + MI_INTERPOSE_ZONE(zone_batch_malloc), + MI_INTERPOSE_ZONE(zone_calloc), + MI_INTERPOSE_ZONE(zone_check), + MI_INTERPOSE_ZONE(zone_free), + MI_INTERPOSE_ZONE(zone_from_ptr), + MI_INTERPOSE_ZONE(zone_log), + MI_INTERPOSE_ZONE(zone_malloc), + MI_INTERPOSE_ZONE(zone_memalign), + MI_INTERPOSE_ZONE(zone_print), + MI_INTERPOSE_ZONE(zone_print_ptr_info), + MI_INTERPOSE_ZONE(zone_realloc), + MI_INTERPOSE_ZONE(zone_register), + MI_INTERPOSE_ZONE(zone_unregister), + MI_INTERPOSE_ZONE(zone_valloc) +}; + + +#else + +// ------------------------------------------------------ +// hook into the zone api's without interposing +// This is the official way of adding an allocator but +// it seems less robust than using interpose. +// ------------------------------------------------------ + +static inline malloc_zone_t* mi_get_default_zone(void) +{ + // The first returned zone is the real default + malloc_zone_t** zones = NULL; + unsigned count = 0; + kern_return_t ret = malloc_get_all_zones(0, NULL, (vm_address_t**)&zones, &count); + if (ret == KERN_SUCCESS && count > 0) { + return zones[0]; + } + else { + // fallback + return malloc_default_zone(); + } +} + +#if defined(__clang__) +__attribute__((constructor(0))) +#else +__attribute__((constructor)) // seems not supported by g++-11 on the M1 +#endif +__attribute__((used)) +static void _mi_macos_override_malloc(void) { + malloc_zone_t* purgeable_zone = NULL; + + #if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) + // force the purgeable zone to exist to avoid strange bugs + if (malloc_default_purgeable_zone) { + purgeable_zone = malloc_default_purgeable_zone(); + } + #endif + + // Register our zone. + // thomcc: I think this is still needed to put us in the zone list. + malloc_zone_register(&mi_malloc_zone); + // Unregister the default zone, this makes our zone the new default + // as that was the last registered. + malloc_zone_t *default_zone = mi_get_default_zone(); + // thomcc: Unsure if the next test is *always* false or just false in the + // cases I've tried. I'm also unsure if the code inside is needed. at all + if (default_zone != &mi_malloc_zone) { + malloc_zone_unregister(default_zone); + + // Reregister the default zone so free and realloc in that zone keep working. + malloc_zone_register(default_zone); + } + + // Unregister, and re-register the purgeable_zone to avoid bugs if it occurs + // earlier than the default zone. + if (purgeable_zone != NULL) { + malloc_zone_unregister(purgeable_zone); + malloc_zone_register(purgeable_zone); + } + +} +#endif // MI_OSX_INTERPOSE + +#endif // MI_MALLOC_OVERRIDE diff --git a/yass/third_party/mimalloc/src/prim/osx/prim.c b/yass/third_party/mimalloc/src/prim/osx/prim.c new file mode 100644 index 0000000000..8a2f4e8aa4 --- /dev/null +++ b/yass/third_party/mimalloc/src/prim/osx/prim.c @@ -0,0 +1,9 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// We use the unix/prim.c with the mmap API on macOSX +#include "../unix/prim.c" diff --git a/yass/third_party/mimalloc/src/prim/prim.c b/yass/third_party/mimalloc/src/prim/prim.c new file mode 100644 index 0000000000..3b7d373642 --- /dev/null +++ b/yass/third_party/mimalloc/src/prim/prim.c @@ -0,0 +1,27 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// Select the implementation of the primitives +// depending on the OS. + +#if defined(_WIN32) +#include "windows/prim.c" // VirtualAlloc (Windows) + +#elif defined(__APPLE__) +#include "osx/prim.c" // macOSX (actually defers to mmap in unix/prim.c) + +#elif defined(__wasi__) +#define MI_USE_SBRK +#include "wasi/prim.c" // memory-grow or sbrk (Wasm) + +#elif defined(__EMSCRIPTEN__) +#include "emscripten/prim.c" // emmalloc_*, + pthread support + +#else +#include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.) + +#endif diff --git a/yass/third_party/mimalloc/src/prim/readme.md b/yass/third_party/mimalloc/src/prim/readme.md new file mode 100644 index 0000000000..380dd3a717 --- /dev/null +++ b/yass/third_party/mimalloc/src/prim/readme.md @@ -0,0 +1,9 @@ +## Portability Primitives + +This is the portability layer where all primitives needed from the OS are defined. + +- `include/mimalloc/prim.h`: primitive portability API definition. +- `prim.c`: Selects one of `unix/prim.c`, `wasi/prim.c`, or `windows/prim.c` depending on the host platform + (and on macOS, `osx/prim.c` defers to `unix/prim.c`). + +Note: still work in progress, there may still be places in the sources that still depend on OS ifdef's. \ No newline at end of file diff --git a/yass/third_party/mimalloc/src/prim/unix/prim.c b/yass/third_party/mimalloc/src/prim/unix/prim.c new file mode 100644 index 0000000000..595ade0c47 --- /dev/null +++ b/yass/third_party/mimalloc/src/prim/unix/prim.c @@ -0,0 +1,879 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// This file is included in `src/prim/prim.c` + +#ifndef _DEFAULT_SOURCE +#define _DEFAULT_SOURCE // ensure mmap flags and syscall are defined +#endif + +#if defined(__sun) +// illumos provides new mman.h api when any of these are defined +// otherwise the old api based on caddr_t which predates the void pointers one. +// stock solaris provides only the former, chose to atomically to discard those +// flags only here rather than project wide tough. +#undef _XOPEN_SOURCE +#undef _POSIX_C_SOURCE +#endif + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" + +#include // mmap +#include // sysconf +#include // open, close, read, access + +#if defined(__linux__) + #include + #if defined(MI_NO_THP) + #include + #endif + #if defined(__GLIBC__) + #include // linux mmap flags + #else + #include + #endif +#elif defined(__APPLE__) + #include + #include + #if !defined(TARGET_OS_OSX) || TARGET_OS_OSX // see issue #879, used to be (!TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR) + #include // VM_MAKE_TAG, VM_FLAGS_SUPERPAGE_SIZE_2MB, etc. + #endif + #if !defined(MAC_OS_X_VERSION_10_7) + #define MAC_OS_X_VERSION_10_7 1070 + #endif +#elif defined(__FreeBSD__) || defined(__DragonFly__) + #include + #if __FreeBSD_version >= 1200000 + #include + #include + #endif + #include +#endif + +#if defined(__linux__) || defined(__FreeBSD__) + #define MI_HAS_SYSCALL_H + #include +#endif + + +//------------------------------------------------------------------------------------ +// Use syscalls for some primitives to allow for libraries that override open/read/close etc. +// and do allocation themselves; using syscalls prevents recursion when mimalloc is +// still initializing (issue #713) +// Declare inline to avoid unused function warnings. +//------------------------------------------------------------------------------------ + +#if defined(MI_HAS_SYSCALL_H) && defined(SYS_open) && defined(SYS_close) && defined(SYS_read) && defined(SYS_access) + +static inline int mi_prim_open(const char* fpath, int open_flags) { + return syscall(SYS_open,fpath,open_flags,0); +} +static inline ssize_t mi_prim_read(int fd, void* buf, size_t bufsize) { + return syscall(SYS_read,fd,buf,bufsize); +} +static inline int mi_prim_close(int fd) { + return syscall(SYS_close,fd); +} +static inline int mi_prim_access(const char *fpath, int mode) { + return syscall(SYS_access,fpath,mode); +} + +#else + +static inline int mi_prim_open(const char* fpath, int open_flags) { + return open(fpath,open_flags); +} +static inline ssize_t mi_prim_read(int fd, void* buf, size_t bufsize) { + return read(fd,buf,bufsize); +} +static inline int mi_prim_close(int fd) { + return close(fd); +} +static inline int mi_prim_access(const char *fpath, int mode) { + return access(fpath,mode); +} + +#endif + + + +//--------------------------------------------- +// init +//--------------------------------------------- + +static bool unix_detect_overcommit(void) { + bool os_overcommit = true; +#if defined(__linux__) + int fd = mi_prim_open("/proc/sys/vm/overcommit_memory", O_RDONLY); + if (fd >= 0) { + char buf[32]; + ssize_t nread = mi_prim_read(fd, &buf, sizeof(buf)); + mi_prim_close(fd); + // + // 0: heuristic overcommit, 1: always overcommit, 2: never overcommit (ignore NORESERVE) + if (nread >= 1) { + os_overcommit = (buf[0] == '0' || buf[0] == '1'); + } + } +#elif defined(__FreeBSD__) + int val = 0; + size_t olen = sizeof(val); + if (sysctlbyname("vm.overcommit", &val, &olen, NULL, 0) == 0) { + os_overcommit = (val != 0); + } +#else + // default: overcommit is true +#endif + return os_overcommit; +} + +void _mi_prim_mem_init( mi_os_mem_config_t* config ) +{ + long psize = sysconf(_SC_PAGESIZE); + if (psize > 0) { + config->page_size = (size_t)psize; + config->alloc_granularity = (size_t)psize; + } + config->large_page_size = 2*MI_MiB; // TODO: can we query the OS for this? + config->has_overcommit = unix_detect_overcommit(); + config->has_partial_free = true; // mmap can free in parts + config->has_virtual_reserve = true; // todo: check if this true for NetBSD? (for anonymous mmap with PROT_NONE) + + // disable transparent huge pages for this process? + #if (defined(__linux__) || defined(__ANDROID__)) && defined(PR_GET_THP_DISABLE) + #if defined(MI_NO_THP) + if (true) + #else + if (!mi_option_is_enabled(mi_option_allow_large_os_pages)) // disable THP also if large OS pages are not allowed in the options + #endif + { + int val = 0; + if (prctl(PR_GET_THP_DISABLE, &val, 0, 0, 0) != 0) { + // Most likely since distros often come with always/madvise settings. + val = 1; + // Disabling only for mimalloc process rather than touching system wide settings + (void)prctl(PR_SET_THP_DISABLE, &val, 0, 0, 0); + } + } + #endif +} + + +//--------------------------------------------- +// free +//--------------------------------------------- + +int _mi_prim_free(void* addr, size_t size ) { + bool err = (munmap(addr, size) == -1); + return (err ? errno : 0); +} + + +//--------------------------------------------- +// mmap +//--------------------------------------------- + +static int unix_madvise(void* addr, size_t size, int advice) { + #if defined(__sun) + return madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520) + #else + return madvise(addr, size, advice); + #endif +} + +static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) { + MI_UNUSED(try_alignment); + void* p = NULL; + #if defined(MAP_ALIGNED) // BSD + if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) { + size_t n = mi_bsr(try_alignment); + if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB + p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0); + if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { + int err = errno; + _mi_trace_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr); + } + if (p!=MAP_FAILED) return p; + // fall back to regular mmap + } + } + #elif defined(MAP_ALIGN) // Solaris + if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) { + p = mmap((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd, 0); // addr parameter is the required alignment + if (p!=MAP_FAILED) return p; + // fall back to regular mmap + } + #endif + #if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED) + // on 64-bit systems, use the virtual address area after 2TiB for 4MiB aligned allocations + if (addr == NULL) { + void* hint = _mi_os_get_aligned_hint(try_alignment, size); + if (hint != NULL) { + p = mmap(hint, size, protect_flags, flags, fd, 0); + if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { + #if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly? + int err = 0; + #else + int err = errno; + #endif + _mi_trace_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint); + } + if (p!=MAP_FAILED) return p; + // fall back to regular mmap + } + } + #endif + // regular mmap + p = mmap(addr, size, protect_flags, flags, fd, 0); + if (p!=MAP_FAILED) return p; + // failed to allocate + return NULL; +} + +static int unix_mmap_fd(void) { + #if defined(VM_MAKE_TAG) + // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99) + int os_tag = (int)mi_option_get(mi_option_os_tag); + if (os_tag < 100 || os_tag > 255) { os_tag = 100; } + return VM_MAKE_TAG(os_tag); + #else + return -1; + #endif +} + +static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only, bool allow_large, bool* is_large) { + #if !defined(MAP_ANONYMOUS) + #define MAP_ANONYMOUS MAP_ANON + #endif + #if !defined(MAP_NORESERVE) + #define MAP_NORESERVE 0 + #endif + void* p = NULL; + const int fd = unix_mmap_fd(); + int flags = MAP_PRIVATE | MAP_ANONYMOUS; + if (_mi_os_has_overcommit()) { + flags |= MAP_NORESERVE; + } + #if defined(PROT_MAX) + protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD + #endif + // huge page allocation + if ((large_only || _mi_os_use_large_page(size, try_alignment)) && allow_large) { + static _Atomic(size_t) large_page_try_ok; // = 0; + size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); + if (!large_only && try_ok > 0) { + // If the OS is not configured for large OS pages, or the user does not have + // enough permission, the `mmap` will always fail (but it might also fail for other reasons). + // Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times + // to avoid too many failing calls to mmap. + mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1); + } + else { + int lflags = flags & ~MAP_NORESERVE; // using NORESERVE on huge pages seems to fail on Linux + int lfd = fd; + #ifdef MAP_ALIGNED_SUPER + lflags |= MAP_ALIGNED_SUPER; + #endif + #ifdef MAP_HUGETLB + lflags |= MAP_HUGETLB; + #endif + #ifdef MAP_HUGE_1GB + static bool mi_huge_pages_available = true; + if ((size % MI_GiB) == 0 && mi_huge_pages_available) { + lflags |= MAP_HUGE_1GB; + } + else + #endif + { + #ifdef MAP_HUGE_2MB + lflags |= MAP_HUGE_2MB; + #endif + } + #ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB + lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB; + #endif + if (large_only || lflags != flags) { + // try large OS page allocation + *is_large = true; + p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd); + #ifdef MAP_HUGE_1GB + if (p == NULL && (lflags & MAP_HUGE_1GB) == MAP_HUGE_1GB) { + mi_huge_pages_available = false; // don't try huge 1GiB pages again + _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (errno: %i)\n", errno); + lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB); + p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd); + } + #endif + if (large_only) return p; + if (p == NULL) { + mi_atomic_store_release(&large_page_try_ok, (size_t)8); // on error, don't try again for the next N allocations + } + } + } + } + // regular allocation + if (p == NULL) { + *is_large = false; + p = unix_mmap_prim(addr, size, try_alignment, protect_flags, flags, fd); + if (p != NULL) { + #if defined(MADV_HUGEPAGE) + // Many Linux systems don't allow MAP_HUGETLB but they support instead + // transparent huge pages (THP). Generally, it is not required to call `madvise` with MADV_HUGE + // though since properly aligned allocations will already use large pages if available + // in that case -- in particular for our large regions (in `memory.c`). + // However, some systems only allow THP if called with explicit `madvise`, so + // when large OS pages are enabled for mimalloc, we call `madvise` anyways. + if (allow_large && _mi_os_use_large_page(size, try_alignment)) { + if (unix_madvise(p, size, MADV_HUGEPAGE) == 0) { + *is_large = true; // possibly + }; + } + #elif defined(__sun) + if (allow_large && _mi_os_use_large_page(size, try_alignment)) { + struct memcntl_mha cmd = {0}; + cmd.mha_pagesize = _mi_os_large_page_size(); + cmd.mha_cmd = MHA_MAPSIZE_VA; + if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) { + *is_large = true; + } + } + #endif + } + } + return p; +} + +// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. +int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); + mi_assert_internal(commit || !allow_large); + mi_assert_internal(try_alignment > 0); + + *is_zero = true; + int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); + *addr = unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large); + return (*addr != NULL ? 0 : errno); +} + + +//--------------------------------------------- +// Commit/Reset +//--------------------------------------------- + +static void unix_mprotect_hint(int err) { + #if defined(__linux__) && (MI_SECURE>=2) // guard page around every mimalloc page + if (err == ENOMEM) { + _mi_warning_message("The next warning may be caused by a low memory map limit.\n" + " On Linux this is controlled by the vm.max_map_count -- maybe increase it?\n" + " For example: sudo sysctl -w vm.max_map_count=262144\n"); + } + #else + MI_UNUSED(err); + #endif +} + +int _mi_prim_commit(void* start, size_t size, bool* is_zero) { + // commit: ensure we can access the area + // note: we may think that *is_zero can be true since the memory + // was either from mmap PROT_NONE, or from decommit MADV_DONTNEED, but + // we sometimes call commit on a range with still partially committed + // memory and `mprotect` does not zero the range. + *is_zero = false; + int err = mprotect(start, size, (PROT_READ | PROT_WRITE)); + if (err != 0) { + err = errno; + unix_mprotect_hint(err); + } + return err; +} + +int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) { + int err = 0; + // decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE) + err = unix_madvise(start, size, MADV_DONTNEED); + #if !MI_DEBUG && !MI_SECURE + *needs_recommit = false; + #else + *needs_recommit = true; + mprotect(start, size, PROT_NONE); + #endif + /* + // decommit: use mmap with MAP_FIXED and PROT_NONE to discard the existing memory (and reduce rss) + *needs_recommit = true; + const int fd = unix_mmap_fd(); + void* p = mmap(start, size, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0); + if (p != start) { err = errno; } + */ + return err; +} + +int _mi_prim_reset(void* start, size_t size) { + // We try to use `MADV_FREE` as that is the fastest. A drawback though is that it + // will not reduce the `rss` stats in tools like `top` even though the memory is available + // to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by + // default `MADV_DONTNEED` is used though. + #if defined(MADV_FREE) + static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE); + int oadvice = (int)mi_atomic_load_relaxed(&advice); + int err; + while ((err = unix_madvise(start, size, oadvice)) != 0 && errno == EAGAIN) { errno = 0; }; + if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) { + // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on + mi_atomic_store_release(&advice, (size_t)MADV_DONTNEED); + err = unix_madvise(start, size, MADV_DONTNEED); + } + #else + int err = unix_madvise(start, size, MADV_DONTNEED); + #endif + return err; +} + +int _mi_prim_protect(void* start, size_t size, bool protect) { + int err = mprotect(start, size, protect ? PROT_NONE : (PROT_READ | PROT_WRITE)); + if (err != 0) { err = errno; } + unix_mprotect_hint(err); + return err; +} + + + +//--------------------------------------------- +// Huge page allocation +//--------------------------------------------- + +#if (MI_INTPTR_SIZE >= 8) && !defined(__HAIKU__) && !defined(__CYGWIN__) + +#ifndef MPOL_PREFERRED +#define MPOL_PREFERRED 1 +#endif + +#if defined(MI_HAS_SYSCALL_H) && defined(SYS_mbind) +static long mi_prim_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) { + return syscall(SYS_mbind, start, len, mode, nmask, maxnode, flags); +} +#else +static long mi_prim_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) { + MI_UNUSED(start); MI_UNUSED(len); MI_UNUSED(mode); MI_UNUSED(nmask); MI_UNUSED(maxnode); MI_UNUSED(flags); + return 0; +} +#endif + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + bool is_large = true; + *is_zero = true; + *addr = unix_mmap(hint_addr, size, MI_SEGMENT_SIZE, PROT_READ | PROT_WRITE, true, true, &is_large); + if (*addr != NULL && numa_node >= 0 && numa_node < 8*MI_INTPTR_SIZE) { // at most 64 nodes + unsigned long numa_mask = (1UL << numa_node); + // TODO: does `mbind` work correctly for huge OS pages? should we + // use `set_mempolicy` before calling mmap instead? + // see: + long err = mi_prim_mbind(*addr, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0); + if (err != 0) { + err = errno; + _mi_warning_message("failed to bind huge (1GiB) pages to numa node %d (error: %d (0x%x))\n", numa_node, err, err); + } + } + return (*addr != NULL ? 0 : errno); +} + +#else + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node); + *is_zero = false; + *addr = NULL; + return ENOMEM; +} + +#endif + +//--------------------------------------------- +// NUMA nodes +//--------------------------------------------- + +#if defined(__linux__) + +size_t _mi_prim_numa_node(void) { + #if defined(MI_HAS_SYSCALL_H) && defined(SYS_getcpu) + unsigned long node = 0; + unsigned long ncpu = 0; + long err = syscall(SYS_getcpu, &ncpu, &node, NULL); + if (err != 0) return 0; + return node; + #else + return 0; + #endif +} + +size_t _mi_prim_numa_node_count(void) { + char buf[128]; + unsigned node = 0; + for(node = 0; node < 256; node++) { + // enumerate node entries -- todo: it there a more efficient way to do this? (but ensure there is no allocation) + _mi_snprintf(buf, 127, "/sys/devices/system/node/node%u", node + 1); + if (mi_prim_access(buf,R_OK) != 0) break; + } + return (node+1); +} + +#elif defined(__FreeBSD__) && __FreeBSD_version >= 1200000 + +size_t _mi_prim_numa_node(void) { + domainset_t dom; + size_t node; + int policy; + if (cpuset_getdomain(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, sizeof(dom), &dom, &policy) == -1) return 0ul; + for (node = 0; node < MAXMEMDOM; node++) { + if (DOMAINSET_ISSET(node, &dom)) return node; + } + return 0ul; +} + +size_t _mi_prim_numa_node_count(void) { + size_t ndomains = 0; + size_t len = sizeof(ndomains); + if (sysctlbyname("vm.ndomains", &ndomains, &len, NULL, 0) == -1) return 0ul; + return ndomains; +} + +#elif defined(__DragonFly__) + +size_t _mi_prim_numa_node(void) { + // TODO: DragonFly does not seem to provide any userland means to get this information. + return 0ul; +} + +size_t _mi_prim_numa_node_count(void) { + size_t ncpus = 0, nvirtcoresperphys = 0; + size_t len = sizeof(size_t); + if (sysctlbyname("hw.ncpu", &ncpus, &len, NULL, 0) == -1) return 0ul; + if (sysctlbyname("hw.cpu_topology_ht_ids", &nvirtcoresperphys, &len, NULL, 0) == -1) return 0ul; + return nvirtcoresperphys * ncpus; +} + +#else + +size_t _mi_prim_numa_node(void) { + return 0; +} + +size_t _mi_prim_numa_node_count(void) { + return 1; +} + +#endif + +// ---------------------------------------------------------------- +// Clock +// ---------------------------------------------------------------- + +#include + +#if defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC) + +mi_msecs_t _mi_prim_clock_now(void) { + struct timespec t; + #ifdef CLOCK_MONOTONIC + clock_gettime(CLOCK_MONOTONIC, &t); + #else + clock_gettime(CLOCK_REALTIME, &t); + #endif + return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000); +} + +#else + +// low resolution timer +mi_msecs_t _mi_prim_clock_now(void) { + #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0) + return (mi_msecs_t)clock(); + #elif (CLOCKS_PER_SEC < 1000) + return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); + #else + return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000); + #endif +} + +#endif + + + + +//---------------------------------------------------------------- +// Process info +//---------------------------------------------------------------- + +#if defined(__unix__) || defined(__unix) || defined(unix) || defined(__APPLE__) || defined(__HAIKU__) +#include +#include +#include + +#if defined(__APPLE__) +#include +#endif + +#if defined(__HAIKU__) +#include +#endif + +static mi_msecs_t timeval_secs(const struct timeval* tv) { + return ((mi_msecs_t)tv->tv_sec * 1000L) + ((mi_msecs_t)tv->tv_usec / 1000L); +} + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + struct rusage rusage; + getrusage(RUSAGE_SELF, &rusage); + pinfo->utime = timeval_secs(&rusage.ru_utime); + pinfo->stime = timeval_secs(&rusage.ru_stime); +#if !defined(__HAIKU__) + pinfo->page_faults = rusage.ru_majflt; +#endif +#if defined(__HAIKU__) + // Haiku does not have (yet?) a way to + // get these stats per process + thread_info tid; + area_info mem; + ssize_t c; + get_thread_info(find_thread(0), &tid); + while (get_next_area_info(tid.team, &c, &mem) == B_OK) { + pinfo->peak_rss += mem.ram_size; + } + pinfo->page_faults = 0; +#elif defined(__APPLE__) + pinfo->peak_rss = rusage.ru_maxrss; // macos reports in bytes + #ifdef MACH_TASK_BASIC_INFO + struct mach_task_basic_info info; + mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT; + if (task_info(mach_task_self(), MACH_TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) { + pinfo->current_rss = (size_t)info.resident_size; + } + #else + struct task_basic_info info; + mach_msg_type_number_t infoCount = TASK_BASIC_INFO_COUNT; + if (task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) { + pinfo->current_rss = (size_t)info.resident_size; + } + #endif +#else + pinfo->peak_rss = rusage.ru_maxrss * 1024; // Linux/BSD report in KiB +#endif + // use defaults for commit +} + +#else + +#ifndef __wasi__ +// WebAssembly instances are not processes +#pragma message("define a way to get process info") +#endif + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + // use defaults + MI_UNUSED(pinfo); +} + +#endif + + +//---------------------------------------------------------------- +// Output +//---------------------------------------------------------------- + +void _mi_prim_out_stderr( const char* msg ) { + fputs(msg,stderr); +} + + +//---------------------------------------------------------------- +// Environment +//---------------------------------------------------------------- + +#if !defined(MI_USE_ENVIRON) || (MI_USE_ENVIRON!=0) +// On Posix systemsr use `environ` to access environment variables +// even before the C runtime is initialized. +#if defined(__APPLE__) && defined(__has_include) && __has_include() +#include +static char** mi_get_environ(void) { + return (*_NSGetEnviron()); +} +#else +extern char** environ; +static char** mi_get_environ(void) { + return environ; +} +#endif +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + if (name==NULL) return false; + const size_t len = _mi_strlen(name); + if (len == 0) return false; + char** env = mi_get_environ(); + if (env == NULL) return false; + // compare up to 10000 entries + for (int i = 0; i < 10000 && env[i] != NULL; i++) { + const char* s = env[i]; + if (_mi_strnicmp(name, s, len) == 0 && s[len] == '=') { // case insensitive + // found it + _mi_strlcpy(result, s + len + 1, result_size); + return true; + } + } + return false; +} +#else +#include +// fallback: use standard C `getenv` but this cannot be used while initializing the C runtime +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + // cannot call getenv() when still initializing the C runtime. + if (_mi_preloading()) return false; + const char* s = getenv(name); + if (s == NULL) { + // we check the upper case name too. + char buf[64+1]; + size_t len = _mi_strnlen(name,sizeof(buf)-1); + for (size_t i = 0; i < len; i++) { + buf[i] = _mi_toupper(name[i]); + } + buf[len] = 0; + s = getenv(buf); + } + if (s == NULL || _mi_strnlen(s,result_size) >= result_size) return false; + _mi_strlcpy(result, s, result_size); + return true; +} +#endif // !MI_USE_ENVIRON + + +//---------------------------------------------------------------- +// Random +//---------------------------------------------------------------- + +#if defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_15) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_15) +#include +#include + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + // We prefere CCRandomGenerateBytes as it returns an error code while arc4random_buf + // may fail silently on macOS. See PR #390, and + return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess); +} + +#elif defined(__ANDROID__) || defined(__DragonFly__) || \ + defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \ + defined(__sun) || \ + (defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_7)) + +#include +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + arc4random_buf(buf, buf_len); + return true; +} + +#elif defined(__APPLE__) || defined(__linux__) || defined(__HAIKU__) // also for old apple versions < 10.7 (issue #829) + +#include +#include +#include + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + // Modern Linux provides `getrandom` but different distributions either use `sys/random.h` or `linux/random.h` + // and for the latter the actual `getrandom` call is not always defined. + // (see ) + // We therefore use a syscall directly and fall back dynamically to /dev/urandom when needed. + #if defined(MI_HAS_SYSCALL_H) && defined(SYS_getrandom) + #ifndef GRND_NONBLOCK + #define GRND_NONBLOCK (1) + #endif + static _Atomic(uintptr_t) no_getrandom; // = 0 + if (mi_atomic_load_acquire(&no_getrandom)==0) { + ssize_t ret = syscall(SYS_getrandom, buf, buf_len, GRND_NONBLOCK); + if (ret >= 0) return (buf_len == (size_t)ret); + if (errno != ENOSYS) return false; + mi_atomic_store_release(&no_getrandom, (uintptr_t)1); // don't call again, and fall back to /dev/urandom + } + #endif + int flags = O_RDONLY; + #if defined(O_CLOEXEC) + flags |= O_CLOEXEC; + #endif + int fd = mi_prim_open("/dev/urandom", flags); + if (fd < 0) return false; + size_t count = 0; + while(count < buf_len) { + ssize_t ret = mi_prim_read(fd, (char*)buf + count, buf_len - count); + if (ret<=0) { + if (errno!=EAGAIN && errno!=EINTR) break; + } + else { + count += ret; + } + } + mi_prim_close(fd); + return (count==buf_len); +} + +#else + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + return false; +} + +#endif + + +//---------------------------------------------------------------- +// Thread init/done +//---------------------------------------------------------------- + +#if defined(MI_USE_PTHREADS) + +// use pthread local storage keys to detect thread ending +// (and used with MI_TLS_PTHREADS for the default heap) +pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1); + +static void mi_pthread_done(void* value) { + if (value!=NULL) { + _mi_thread_done((mi_heap_t*)value); + } +} + +void _mi_prim_thread_init_auto_done(void) { + mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1)); + pthread_key_create(&_mi_heap_default_key, &mi_pthread_done); +} + +void _mi_prim_thread_done_auto_done(void) { + if (_mi_heap_default_key != (pthread_key_t)(-1)) { // do not leak the key, see issue #809 + pthread_key_delete(_mi_heap_default_key); + } +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD + pthread_setspecific(_mi_heap_default_key, heap); + } +} + +#else + +void _mi_prim_thread_init_auto_done(void) { + // nothing +} + +void _mi_prim_thread_done_auto_done(void) { + // nothing +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); +} + +#endif diff --git a/yass/third_party/mimalloc/src/prim/wasi/prim.c b/yass/third_party/mimalloc/src/prim/wasi/prim.c new file mode 100644 index 0000000000..e95f67f587 --- /dev/null +++ b/yass/third_party/mimalloc/src/prim/wasi/prim.c @@ -0,0 +1,280 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// This file is included in `src/prim/prim.c` + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" + +#include // fputs +#include // getenv + +//--------------------------------------------- +// Initialize +//--------------------------------------------- + +void _mi_prim_mem_init( mi_os_mem_config_t* config ) { + config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB + config->alloc_granularity = 16; + config->has_overcommit = false; + config->has_partial_free = false; + config->has_virtual_reserve = false; +} + +//--------------------------------------------- +// Free +//--------------------------------------------- + +int _mi_prim_free(void* addr, size_t size ) { + MI_UNUSED(addr); MI_UNUSED(size); + // wasi heap cannot be shrunk + return 0; +} + + +//--------------------------------------------- +// Allocation: sbrk or memory_grow +//--------------------------------------------- + +#if defined(MI_USE_SBRK) + #include // for sbrk + + static void* mi_memory_grow( size_t size ) { + void* p = sbrk(size); + if (p == (void*)(-1)) return NULL; + #if !defined(__wasi__) // on wasi this is always zero initialized already (?) + memset(p,0,size); + #endif + return p; + } +#elif defined(__wasi__) + static void* mi_memory_grow( size_t size ) { + size_t base = (size > 0 ? __builtin_wasm_memory_grow(0,_mi_divide_up(size, _mi_os_page_size())) + : __builtin_wasm_memory_size(0)); + if (base == SIZE_MAX) return NULL; + return (void*)(base * _mi_os_page_size()); + } +#endif + +#if defined(MI_USE_PTHREADS) +static pthread_mutex_t mi_heap_grow_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif + +static void* mi_prim_mem_grow(size_t size, size_t try_alignment) { + void* p = NULL; + if (try_alignment <= 1) { + // `sbrk` is not thread safe in general so try to protect it (we could skip this on WASM but leave it in for now) + #if defined(MI_USE_PTHREADS) + pthread_mutex_lock(&mi_heap_grow_mutex); + #endif + p = mi_memory_grow(size); + #if defined(MI_USE_PTHREADS) + pthread_mutex_unlock(&mi_heap_grow_mutex); + #endif + } + else { + void* base = NULL; + size_t alloc_size = 0; + // to allocate aligned use a lock to try to avoid thread interaction + // between getting the current size and actual allocation + // (also, `sbrk` is not thread safe in general) + #if defined(MI_USE_PTHREADS) + pthread_mutex_lock(&mi_heap_grow_mutex); + #endif + { + void* current = mi_memory_grow(0); // get current size + if (current != NULL) { + void* aligned_current = mi_align_up_ptr(current, try_alignment); // and align from there to minimize wasted space + alloc_size = _mi_align_up( ((uint8_t*)aligned_current - (uint8_t*)current) + size, _mi_os_page_size()); + base = mi_memory_grow(alloc_size); + } + } + #if defined(MI_USE_PTHREADS) + pthread_mutex_unlock(&mi_heap_grow_mutex); + #endif + if (base != NULL) { + p = mi_align_up_ptr(base, try_alignment); + if ((uint8_t*)p + size > (uint8_t*)base + alloc_size) { + // another thread used wasm_memory_grow/sbrk in-between and we do not have enough + // space after alignment. Give up (and waste the space as we cannot shrink :-( ) + // (in `mi_os_mem_alloc_aligned` this will fall back to overallocation to align) + p = NULL; + } + } + } + /* + if (p == NULL) { + _mi_warning_message("unable to allocate sbrk/wasm_memory_grow OS memory (%zu bytes, %zu alignment)\n", size, try_alignment); + errno = ENOMEM; + return NULL; + } + */ + mi_assert_internal( p == NULL || try_alignment == 0 || (uintptr_t)p % try_alignment == 0 ); + return p; +} + +// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. +int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + MI_UNUSED(allow_large); MI_UNUSED(commit); + *is_large = false; + *is_zero = false; + *addr = mi_prim_mem_grow(size, try_alignment); + return (*addr != NULL ? 0 : ENOMEM); +} + + +//--------------------------------------------- +// Commit/Reset/Protect +//--------------------------------------------- + +int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { + MI_UNUSED(addr); MI_UNUSED(size); + *is_zero = false; + return 0; +} + +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { + MI_UNUSED(addr); MI_UNUSED(size); + *needs_recommit = false; + return 0; +} + +int _mi_prim_reset(void* addr, size_t size) { + MI_UNUSED(addr); MI_UNUSED(size); + return 0; +} + +int _mi_prim_protect(void* addr, size_t size, bool protect) { + MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(protect); + return 0; +} + + +//--------------------------------------------- +// Huge pages and NUMA nodes +//--------------------------------------------- + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node); + *is_zero = true; + *addr = NULL; + return ENOSYS; +} + +size_t _mi_prim_numa_node(void) { + return 0; +} + +size_t _mi_prim_numa_node_count(void) { + return 1; +} + + +//---------------------------------------------------------------- +// Clock +//---------------------------------------------------------------- + +#include + +#if defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC) + +mi_msecs_t _mi_prim_clock_now(void) { + struct timespec t; + #ifdef CLOCK_MONOTONIC + clock_gettime(CLOCK_MONOTONIC, &t); + #else + clock_gettime(CLOCK_REALTIME, &t); + #endif + return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000); +} + +#else + +// low resolution timer +mi_msecs_t _mi_prim_clock_now(void) { + #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0) + return (mi_msecs_t)clock(); + #elif (CLOCKS_PER_SEC < 1000) + return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); + #else + return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000); + #endif +} + +#endif + + +//---------------------------------------------------------------- +// Process info +//---------------------------------------------------------------- + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + // use defaults + MI_UNUSED(pinfo); +} + + +//---------------------------------------------------------------- +// Output +//---------------------------------------------------------------- + +void _mi_prim_out_stderr( const char* msg ) { + fputs(msg,stderr); +} + + +//---------------------------------------------------------------- +// Environment +//---------------------------------------------------------------- + +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + // cannot call getenv() when still initializing the C runtime. + if (_mi_preloading()) return false; + const char* s = getenv(name); + if (s == NULL) { + // we check the upper case name too. + char buf[64+1]; + size_t len = _mi_strnlen(name,sizeof(buf)-1); + for (size_t i = 0; i < len; i++) { + buf[i] = _mi_toupper(name[i]); + } + buf[len] = 0; + s = getenv(buf); + } + if (s == NULL || _mi_strnlen(s,result_size) >= result_size) return false; + _mi_strlcpy(result, s, result_size); + return true; +} + + +//---------------------------------------------------------------- +// Random +//---------------------------------------------------------------- + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + return false; +} + + +//---------------------------------------------------------------- +// Thread init/done +//---------------------------------------------------------------- + +void _mi_prim_thread_init_auto_done(void) { + // nothing +} + +void _mi_prim_thread_done_auto_done(void) { + // nothing +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); +} diff --git a/yass/third_party/mimalloc/src/prim/windows/etw-mimalloc.wprp b/yass/third_party/mimalloc/src/prim/windows/etw-mimalloc.wprp new file mode 100644 index 0000000000..b00cd7adf2 --- /dev/null +++ b/yass/third_party/mimalloc/src/prim/windows/etw-mimalloc.wprp @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/yass/third_party/mimalloc/src/prim/windows/etw.h b/yass/third_party/mimalloc/src/prim/windows/etw.h new file mode 100644 index 0000000000..4e0a092a10 --- /dev/null +++ b/yass/third_party/mimalloc/src/prim/windows/etw.h @@ -0,0 +1,905 @@ +//**********************************************************************` +//* This is an include file generated by Message Compiler. *` +//* *` +//* Copyright (c) Microsoft Corporation. All Rights Reserved. *` +//**********************************************************************` +#pragma once + +//***************************************************************************** +// +// Notes on the ETW event code generated by MC: +// +// - Structures and arrays of structures are treated as an opaque binary blob. +// The caller is responsible for packing the data for the structure into a +// single region of memory, with no padding between values. The macro will +// have an extra parameter for the length of the blob. +// - Arrays of nul-terminated strings must be packed by the caller into a +// single binary blob containing the correct number of strings, with a nul +// after each string. The size of the blob is specified in characters, and +// includes the final nul. +// - Arrays of SID are treated as a single binary blob. The caller is +// responsible for packing the SID values into a single region of memory with +// no padding. +// - The length attribute on the data element in the manifest is significant +// for values with intype win:UnicodeString, win:AnsiString, or win:Binary. +// The length attribute must be specified for win:Binary, and is optional for +// win:UnicodeString and win:AnsiString (if no length is given, the strings +// are assumed to be nul-terminated). For win:UnicodeString, the length is +// measured in characters, not bytes. +// - For an array of win:UnicodeString, win:AnsiString, or win:Binary, the +// length attribute applies to every value in the array, so every value in +// the array must have the same length. The values in the array are provided +// to the macro via a single pointer -- the caller is responsible for packing +// all of the values into a single region of memory with no padding between +// values. +// - Values of type win:CountedUnicodeString, win:CountedAnsiString, and +// win:CountedBinary can be generated and collected on Vista or later. +// However, they may not decode properly without the Windows 10 2018 Fall +// Update. +// - Arrays of type win:CountedUnicodeString, win:CountedAnsiString, and +// win:CountedBinary must be packed by the caller into a single region of +// memory. The format for each item is a UINT16 byte-count followed by that +// many bytes of data. When providing the array to the generated macro, you +// must provide the total size of the packed array data, including the UINT16 +// sizes for each item. In the case of win:CountedUnicodeString, the data +// size is specified in WCHAR (16-bit) units. In the case of +// win:CountedAnsiString and win:CountedBinary, the data size is specified in +// bytes. +// +//***************************************************************************** + +#include +#include +#include + +#ifndef ETW_INLINE + #ifdef _ETW_KM_ + // In kernel mode, save stack space by never inlining templates. + #define ETW_INLINE DECLSPEC_NOINLINE __inline + #else + // In user mode, save code size by inlining templates as appropriate. + #define ETW_INLINE __inline + #endif +#endif // ETW_INLINE + +#if defined(__cplusplus) +extern "C" { +#endif + +// +// MCGEN_DISABLE_PROVIDER_CODE_GENERATION macro: +// Define this macro to have the compiler skip the generated functions in this +// header. +// +#ifndef MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +// +// MCGEN_USE_KERNEL_MODE_APIS macro: +// Controls whether the generated code uses kernel-mode or user-mode APIs. +// - Set to 0 to use Windows user-mode APIs such as EventRegister. +// - Set to 1 to use Windows kernel-mode APIs such as EtwRegister. +// Default is based on whether the _ETW_KM_ macro is defined (i.e. by wdm.h). +// Note that the APIs can also be overridden directly, e.g. by setting the +// MCGEN_EVENTWRITETRANSFER or MCGEN_EVENTREGISTER macros. +// +#ifndef MCGEN_USE_KERNEL_MODE_APIS + #ifdef _ETW_KM_ + #define MCGEN_USE_KERNEL_MODE_APIS 1 + #else + #define MCGEN_USE_KERNEL_MODE_APIS 0 + #endif +#endif // MCGEN_USE_KERNEL_MODE_APIS + +// +// MCGEN_HAVE_EVENTSETINFORMATION macro: +// Controls how McGenEventSetInformation uses the EventSetInformation API. +// - Set to 0 to disable the use of EventSetInformation +// (McGenEventSetInformation will always return an error). +// - Set to 1 to directly invoke MCGEN_EVENTSETINFORMATION. +// - Set to 2 to to locate EventSetInformation at runtime via GetProcAddress +// (user-mode) or MmGetSystemRoutineAddress (kernel-mode). +// Default is determined as follows: +// - If MCGEN_EVENTSETINFORMATION has been customized, set to 1 +// (i.e. use MCGEN_EVENTSETINFORMATION). +// - Else if the target OS version has EventSetInformation, set to 1 +// (i.e. use MCGEN_EVENTSETINFORMATION). +// - Else set to 2 (i.e. try to dynamically locate EventSetInformation). +// Note that an McGenEventSetInformation function will only be generated if one +// or more provider in a manifest has provider traits. +// +#ifndef MCGEN_HAVE_EVENTSETINFORMATION + #ifdef MCGEN_EVENTSETINFORMATION // if MCGEN_EVENTSETINFORMATION has been customized, + #define MCGEN_HAVE_EVENTSETINFORMATION 1 // directly invoke MCGEN_EVENTSETINFORMATION(...). + #elif MCGEN_USE_KERNEL_MODE_APIS // else if using kernel-mode APIs, + #if NTDDI_VERSION >= 0x06040000 // if target OS is Windows 10 or later, + #define MCGEN_HAVE_EVENTSETINFORMATION 1 // directly invoke MCGEN_EVENTSETINFORMATION(...). + #else // else + #define MCGEN_HAVE_EVENTSETINFORMATION 2 // find "EtwSetInformation" via MmGetSystemRoutineAddress. + #endif // else (using user-mode APIs) + #else // if target OS and SDK is Windows 8 or later, + #if WINVER >= 0x0602 && defined(EVENT_FILTER_TYPE_SCHEMATIZED) + #define MCGEN_HAVE_EVENTSETINFORMATION 1 // directly invoke MCGEN_EVENTSETINFORMATION(...). + #else // else + #define MCGEN_HAVE_EVENTSETINFORMATION 2 // find "EventSetInformation" via GetModuleHandleExW/GetProcAddress. + #endif + #endif +#endif // MCGEN_HAVE_EVENTSETINFORMATION + +// +// MCGEN Override Macros +// +// The following override macros may be defined before including this header +// to control the APIs used by this header: +// +// - MCGEN_EVENTREGISTER +// - MCGEN_EVENTUNREGISTER +// - MCGEN_EVENTSETINFORMATION +// - MCGEN_EVENTWRITETRANSFER +// +// If the the macro is undefined, the MC implementation will default to the +// corresponding ETW APIs. For example, if the MCGEN_EVENTREGISTER macro is +// undefined, the EventRegister[MyProviderName] macro will use EventRegister +// in user mode and will use EtwRegister in kernel mode. +// +// To prevent issues from conflicting definitions of these macros, the value +// of the override macro will be used as a suffix in certain internal function +// names. Because of this, the override macros must follow certain rules: +// +// - The macro must be defined before any MC-generated header is included and +// must not be undefined or redefined after any MC-generated header is +// included. Different translation units (i.e. different .c or .cpp files) +// may set the macros to different values, but within a translation unit +// (within a single .c or .cpp file), the macro must be set once and not +// changed. +// - The override must be an object-like macro, not a function-like macro +// (i.e. the override macro must not have a parameter list). +// - The override macro's value must be a simple identifier, i.e. must be +// something that starts with a letter or '_' and contains only letters, +// numbers, and '_' characters. +// - If the override macro's value is the name of a second object-like macro, +// the second object-like macro must follow the same rules. (The override +// macro's value can also be the name of a function-like macro, in which +// case the function-like macro does not need to follow the same rules.) +// +// For example, the following will cause compile errors: +// +// #define MCGEN_EVENTWRITETRANSFER MyNamespace::MyClass::MyFunction // Value has non-identifier characters (colon). +// #define MCGEN_EVENTWRITETRANSFER GetEventWriteFunctionPointer(7) // Value has non-identifier characters (parentheses). +// #define MCGEN_EVENTWRITETRANSFER(h,e,a,r,c,d) EventWrite(h,e,c,d) // Override is defined as a function-like macro. +// #define MY_OBJECT_LIKE_MACRO MyNamespace::MyClass::MyEventWriteFunction +// #define MCGEN_EVENTWRITETRANSFER MY_OBJECT_LIKE_MACRO // Evaluates to something with non-identifier characters (colon). +// +// The following would be ok: +// +// #define MCGEN_EVENTWRITETRANSFER MyEventWriteFunction1 // OK, suffix will be "MyEventWriteFunction1". +// #define MY_OBJECT_LIKE_MACRO MyEventWriteFunction2 +// #define MCGEN_EVENTWRITETRANSFER MY_OBJECT_LIKE_MACRO // OK, suffix will be "MyEventWriteFunction2". +// #define MY_FUNCTION_LIKE_MACRO(h,e,a,r,c,d) MyNamespace::MyClass::MyEventWriteFunction3(h,e,c,d) +// #define MCGEN_EVENTWRITETRANSFER MY_FUNCTION_LIKE_MACRO // OK, suffix will be "MY_FUNCTION_LIKE_MACRO". +// +#ifndef MCGEN_EVENTREGISTER + #if MCGEN_USE_KERNEL_MODE_APIS + #define MCGEN_EVENTREGISTER EtwRegister + #else + #define MCGEN_EVENTREGISTER EventRegister + #endif +#endif // MCGEN_EVENTREGISTER +#ifndef MCGEN_EVENTUNREGISTER + #if MCGEN_USE_KERNEL_MODE_APIS + #define MCGEN_EVENTUNREGISTER EtwUnregister + #else + #define MCGEN_EVENTUNREGISTER EventUnregister + #endif +#endif // MCGEN_EVENTUNREGISTER +#ifndef MCGEN_EVENTSETINFORMATION + #if MCGEN_USE_KERNEL_MODE_APIS + #define MCGEN_EVENTSETINFORMATION EtwSetInformation + #else + #define MCGEN_EVENTSETINFORMATION EventSetInformation + #endif +#endif // MCGEN_EVENTSETINFORMATION +#ifndef MCGEN_EVENTWRITETRANSFER + #if MCGEN_USE_KERNEL_MODE_APIS + #define MCGEN_EVENTWRITETRANSFER EtwWriteTransfer + #else + #define MCGEN_EVENTWRITETRANSFER EventWriteTransfer + #endif +#endif // MCGEN_EVENTWRITETRANSFER + +// +// MCGEN_EVENT_ENABLED macro: +// Override to control how the EventWrite[EventName] macros determine whether +// an event is enabled. The default behavior is for EventWrite[EventName] to +// use the EventEnabled[EventName] macros. +// +#ifndef MCGEN_EVENT_ENABLED +#define MCGEN_EVENT_ENABLED(EventName) EventEnabled##EventName() +#endif + +// +// MCGEN_EVENT_ENABLED_FORCONTEXT macro: +// Override to control how the EventWrite[EventName]_ForContext macros +// determine whether an event is enabled. The default behavior is for +// EventWrite[EventName]_ForContext to use the +// EventEnabled[EventName]_ForContext macros. +// +#ifndef MCGEN_EVENT_ENABLED_FORCONTEXT +#define MCGEN_EVENT_ENABLED_FORCONTEXT(pContext, EventName) EventEnabled##EventName##_ForContext(pContext) +#endif + +// +// MCGEN_ENABLE_CHECK macro: +// Determines whether the specified event would be considered as enabled +// based on the state of the specified context. Slightly faster than calling +// McGenEventEnabled directly. +// +#ifndef MCGEN_ENABLE_CHECK +#define MCGEN_ENABLE_CHECK(Context, Descriptor) (Context.IsEnabled && McGenEventEnabled(&Context, &Descriptor)) +#endif + +#if !defined(MCGEN_TRACE_CONTEXT_DEF) +#define MCGEN_TRACE_CONTEXT_DEF +// This structure is for use by MC-generated code and should not be used directly. +typedef struct _MCGEN_TRACE_CONTEXT +{ + TRACEHANDLE RegistrationHandle; + TRACEHANDLE Logger; // Used as pointer to provider traits. + ULONGLONG MatchAnyKeyword; + ULONGLONG MatchAllKeyword; + ULONG Flags; + ULONG IsEnabled; + UCHAR Level; + UCHAR Reserve; + USHORT EnableBitsCount; + PULONG EnableBitMask; + const ULONGLONG* EnableKeyWords; + const UCHAR* EnableLevel; +} MCGEN_TRACE_CONTEXT, *PMCGEN_TRACE_CONTEXT; +#endif // MCGEN_TRACE_CONTEXT_DEF + +#if !defined(MCGEN_LEVEL_KEYWORD_ENABLED_DEF) +#define MCGEN_LEVEL_KEYWORD_ENABLED_DEF +// +// Determines whether an event with a given Level and Keyword would be +// considered as enabled based on the state of the specified context. +// Note that you may want to use MCGEN_ENABLE_CHECK instead of calling this +// function directly. +// +FORCEINLINE +BOOLEAN +McGenLevelKeywordEnabled( + _In_ PMCGEN_TRACE_CONTEXT EnableInfo, + _In_ UCHAR Level, + _In_ ULONGLONG Keyword + ) +{ + // + // Check if the event Level is lower than the level at which + // the channel is enabled. + // If the event Level is 0 or the channel is enabled at level 0, + // all levels are enabled. + // + + if ((Level <= EnableInfo->Level) || // This also covers the case of Level == 0. + (EnableInfo->Level == 0)) { + + // + // Check if Keyword is enabled + // + + if ((Keyword == (ULONGLONG)0) || + ((Keyword & EnableInfo->MatchAnyKeyword) && + ((Keyword & EnableInfo->MatchAllKeyword) == EnableInfo->MatchAllKeyword))) { + return TRUE; + } + } + + return FALSE; +} +#endif // MCGEN_LEVEL_KEYWORD_ENABLED_DEF + +#if !defined(MCGEN_EVENT_ENABLED_DEF) +#define MCGEN_EVENT_ENABLED_DEF +// +// Determines whether the specified event would be considered as enabled based +// on the state of the specified context. Note that you may want to use +// MCGEN_ENABLE_CHECK instead of calling this function directly. +// +FORCEINLINE +BOOLEAN +McGenEventEnabled( + _In_ PMCGEN_TRACE_CONTEXT EnableInfo, + _In_ PCEVENT_DESCRIPTOR EventDescriptor + ) +{ + return McGenLevelKeywordEnabled(EnableInfo, EventDescriptor->Level, EventDescriptor->Keyword); +} +#endif // MCGEN_EVENT_ENABLED_DEF + +#if !defined(MCGEN_CONTROL_CALLBACK) +#define MCGEN_CONTROL_CALLBACK + +// This function is for use by MC-generated code and should not be used directly. +DECLSPEC_NOINLINE __inline +VOID +__stdcall +McGenControlCallbackV2( + _In_ LPCGUID SourceId, + _In_ ULONG ControlCode, + _In_ UCHAR Level, + _In_ ULONGLONG MatchAnyKeyword, + _In_ ULONGLONG MatchAllKeyword, + _In_opt_ PEVENT_FILTER_DESCRIPTOR FilterData, + _Inout_opt_ PVOID CallbackContext + ) +/*++ + +Routine Description: + + This is the notification callback for Windows Vista and later. + +Arguments: + + SourceId - The GUID that identifies the session that enabled the provider. + + ControlCode - The parameter indicates whether the provider + is being enabled or disabled. + + Level - The level at which the event is enabled. + + MatchAnyKeyword - The bitmask of keywords that the provider uses to + determine the category of events that it writes. + + MatchAllKeyword - This bitmask additionally restricts the category + of events that the provider writes. + + FilterData - The provider-defined data. + + CallbackContext - The context of the callback that is defined when the provider + called EtwRegister to register itself. + +Remarks: + + ETW calls this function to notify provider of enable/disable + +--*/ +{ + PMCGEN_TRACE_CONTEXT Ctx = (PMCGEN_TRACE_CONTEXT)CallbackContext; + ULONG Ix; +#ifndef MCGEN_PRIVATE_ENABLE_CALLBACK_V2 + UNREFERENCED_PARAMETER(SourceId); + UNREFERENCED_PARAMETER(FilterData); +#endif + + if (Ctx == NULL) { + return; + } + + switch (ControlCode) { + + case EVENT_CONTROL_CODE_ENABLE_PROVIDER: + Ctx->Level = Level; + Ctx->MatchAnyKeyword = MatchAnyKeyword; + Ctx->MatchAllKeyword = MatchAllKeyword; + Ctx->IsEnabled = EVENT_CONTROL_CODE_ENABLE_PROVIDER; + + for (Ix = 0; Ix < Ctx->EnableBitsCount; Ix += 1) { + if (McGenLevelKeywordEnabled(Ctx, Ctx->EnableLevel[Ix], Ctx->EnableKeyWords[Ix]) != FALSE) { + Ctx->EnableBitMask[Ix >> 5] |= (1 << (Ix % 32)); + } else { + Ctx->EnableBitMask[Ix >> 5] &= ~(1 << (Ix % 32)); + } + } + break; + + case EVENT_CONTROL_CODE_DISABLE_PROVIDER: + Ctx->IsEnabled = EVENT_CONTROL_CODE_DISABLE_PROVIDER; + Ctx->Level = 0; + Ctx->MatchAnyKeyword = 0; + Ctx->MatchAllKeyword = 0; + if (Ctx->EnableBitsCount > 0) { +#pragma warning(suppress: 26451) // Arithmetic overflow cannot occur, no matter the value of EnableBitCount + RtlZeroMemory(Ctx->EnableBitMask, (((Ctx->EnableBitsCount - 1) / 32) + 1) * sizeof(ULONG)); + } + break; + + default: + break; + } + +#ifdef MCGEN_PRIVATE_ENABLE_CALLBACK_V2 + // + // Call user defined callback + // + MCGEN_PRIVATE_ENABLE_CALLBACK_V2( + SourceId, + ControlCode, + Level, + MatchAnyKeyword, + MatchAllKeyword, + FilterData, + CallbackContext + ); +#endif // MCGEN_PRIVATE_ENABLE_CALLBACK_V2 + + return; +} + +#endif // MCGEN_CONTROL_CALLBACK + +#ifndef _mcgen_PENABLECALLBACK + #if MCGEN_USE_KERNEL_MODE_APIS + #define _mcgen_PENABLECALLBACK PETWENABLECALLBACK + #else + #define _mcgen_PENABLECALLBACK PENABLECALLBACK + #endif +#endif // _mcgen_PENABLECALLBACK + +#if !defined(_mcgen_PASTE2) +// This macro is for use by MC-generated code and should not be used directly. +#define _mcgen_PASTE2(a, b) _mcgen_PASTE2_imp(a, b) +#define _mcgen_PASTE2_imp(a, b) a##b +#endif // _mcgen_PASTE2 + +#if !defined(_mcgen_PASTE3) +// This macro is for use by MC-generated code and should not be used directly. +#define _mcgen_PASTE3(a, b, c) _mcgen_PASTE3_imp(a, b, c) +#define _mcgen_PASTE3_imp(a, b, c) a##b##_##c +#endif // _mcgen_PASTE3 + +// +// Macro validation +// + +// Validate MCGEN_EVENTREGISTER: + +// Trigger an error if MCGEN_EVENTREGISTER is not an unqualified (simple) identifier: +struct _mcgen_PASTE2(MCGEN_EVENTREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTREGISTER); + +// Trigger an error if MCGEN_EVENTREGISTER is redefined: +typedef struct _mcgen_PASTE2(MCGEN_EVENTREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTREGISTER) + MCGEN_EVENTREGISTER_must_not_be_redefined_between_headers; + +// Trigger an error if MCGEN_EVENTREGISTER is defined as a function-like macro: +typedef void MCGEN_EVENTREGISTER_must_not_be_a_functionLike_macro_MCGEN_EVENTREGISTER; +typedef int _mcgen_PASTE2(MCGEN_EVENTREGISTER_must_not_be_a_functionLike_macro_, MCGEN_EVENTREGISTER); + +// Validate MCGEN_EVENTUNREGISTER: + +// Trigger an error if MCGEN_EVENTUNREGISTER is not an unqualified (simple) identifier: +struct _mcgen_PASTE2(MCGEN_EVENTUNREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTUNREGISTER); + +// Trigger an error if MCGEN_EVENTUNREGISTER is redefined: +typedef struct _mcgen_PASTE2(MCGEN_EVENTUNREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTUNREGISTER) + MCGEN_EVENTUNREGISTER_must_not_be_redefined_between_headers; + +// Trigger an error if MCGEN_EVENTUNREGISTER is defined as a function-like macro: +typedef void MCGEN_EVENTUNREGISTER_must_not_be_a_functionLike_macro_MCGEN_EVENTUNREGISTER; +typedef int _mcgen_PASTE2(MCGEN_EVENTUNREGISTER_must_not_be_a_functionLike_macro_, MCGEN_EVENTUNREGISTER); + +// Validate MCGEN_EVENTSETINFORMATION: + +// Trigger an error if MCGEN_EVENTSETINFORMATION is not an unqualified (simple) identifier: +struct _mcgen_PASTE2(MCGEN_EVENTSETINFORMATION_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTSETINFORMATION); + +// Trigger an error if MCGEN_EVENTSETINFORMATION is redefined: +typedef struct _mcgen_PASTE2(MCGEN_EVENTSETINFORMATION_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTSETINFORMATION) + MCGEN_EVENTSETINFORMATION_must_not_be_redefined_between_headers; + +// Trigger an error if MCGEN_EVENTSETINFORMATION is defined as a function-like macro: +typedef void MCGEN_EVENTSETINFORMATION_must_not_be_a_functionLike_macro_MCGEN_EVENTSETINFORMATION; +typedef int _mcgen_PASTE2(MCGEN_EVENTSETINFORMATION_must_not_be_a_functionLike_macro_, MCGEN_EVENTSETINFORMATION); + +// Validate MCGEN_EVENTWRITETRANSFER: + +// Trigger an error if MCGEN_EVENTWRITETRANSFER is not an unqualified (simple) identifier: +struct _mcgen_PASTE2(MCGEN_EVENTWRITETRANSFER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTWRITETRANSFER); + +// Trigger an error if MCGEN_EVENTWRITETRANSFER is redefined: +typedef struct _mcgen_PASTE2(MCGEN_EVENTWRITETRANSFER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTWRITETRANSFER) + MCGEN_EVENTWRITETRANSFER_must_not_be_redefined_between_headers;; + +// Trigger an error if MCGEN_EVENTWRITETRANSFER is defined as a function-like macro: +typedef void MCGEN_EVENTWRITETRANSFER_must_not_be_a_functionLike_macro_MCGEN_EVENTWRITETRANSFER; +typedef int _mcgen_PASTE2(MCGEN_EVENTWRITETRANSFER_must_not_be_a_functionLike_macro_, MCGEN_EVENTWRITETRANSFER); + +#ifndef McGenEventWrite_def +#define McGenEventWrite_def + +// This macro is for use by MC-generated code and should not be used directly. +#define McGenEventWrite _mcgen_PASTE2(McGenEventWrite_, MCGEN_EVENTWRITETRANSFER) + +// This function is for use by MC-generated code and should not be used directly. +DECLSPEC_NOINLINE __inline +ULONG __stdcall +McGenEventWrite( + _In_ PMCGEN_TRACE_CONTEXT Context, + _In_ PCEVENT_DESCRIPTOR Descriptor, + _In_opt_ LPCGUID ActivityId, + _In_range_(1, 128) ULONG EventDataCount, + _Pre_cap_(EventDataCount) EVENT_DATA_DESCRIPTOR* EventData + ) +{ + const USHORT UNALIGNED* Traits; + + // Some customized MCGEN_EVENTWRITETRANSFER macros might ignore ActivityId. + UNREFERENCED_PARAMETER(ActivityId); + + Traits = (const USHORT UNALIGNED*)(UINT_PTR)Context->Logger; + + if (Traits == NULL) { + EventData[0].Ptr = 0; + EventData[0].Size = 0; + EventData[0].Reserved = 0; + } else { + EventData[0].Ptr = (ULONG_PTR)Traits; + EventData[0].Size = *Traits; + EventData[0].Reserved = 2; // EVENT_DATA_DESCRIPTOR_TYPE_PROVIDER_METADATA + } + + return MCGEN_EVENTWRITETRANSFER( + Context->RegistrationHandle, + Descriptor, + ActivityId, + NULL, + EventDataCount, + EventData); +} +#endif // McGenEventWrite_def + +#if !defined(McGenEventRegisterUnregister) +#define McGenEventRegisterUnregister + +// This macro is for use by MC-generated code and should not be used directly. +#define McGenEventRegister _mcgen_PASTE2(McGenEventRegister_, MCGEN_EVENTREGISTER) + +#pragma warning(push) +#pragma warning(disable:6103) +// This function is for use by MC-generated code and should not be used directly. +DECLSPEC_NOINLINE __inline +ULONG __stdcall +McGenEventRegister( + _In_ LPCGUID ProviderId, + _In_opt_ _mcgen_PENABLECALLBACK EnableCallback, + _In_opt_ PVOID CallbackContext, + _Inout_ PREGHANDLE RegHandle + ) +/*++ + +Routine Description: + + This function registers the provider with ETW. + +Arguments: + + ProviderId - Provider ID to register with ETW. + + EnableCallback - Callback to be used. + + CallbackContext - Context for the callback. + + RegHandle - Pointer to registration handle. + +Remarks: + + Should not be called if the provider is already registered (i.e. should not + be called if *RegHandle != 0). Repeatedly registering a provider is a bug + and may indicate a race condition. However, for compatibility with previous + behavior, this function will return SUCCESS in this case. + +--*/ +{ + ULONG Error; + + if (*RegHandle != 0) + { + Error = 0; // ERROR_SUCCESS + } + else + { + Error = MCGEN_EVENTREGISTER(ProviderId, EnableCallback, CallbackContext, RegHandle); + } + + return Error; +} +#pragma warning(pop) + +// This macro is for use by MC-generated code and should not be used directly. +#define McGenEventUnregister _mcgen_PASTE2(McGenEventUnregister_, MCGEN_EVENTUNREGISTER) + +// This function is for use by MC-generated code and should not be used directly. +DECLSPEC_NOINLINE __inline +ULONG __stdcall +McGenEventUnregister(_Inout_ PREGHANDLE RegHandle) +/*++ + +Routine Description: + + Unregister from ETW and set *RegHandle = 0. + +Arguments: + + RegHandle - the pointer to the provider registration handle + +Remarks: + + If provider has not been registered (i.e. if *RegHandle == 0), + return SUCCESS. It is safe to call McGenEventUnregister even if the + call to McGenEventRegister returned an error. + +--*/ +{ + ULONG Error; + + if(*RegHandle == 0) + { + Error = 0; // ERROR_SUCCESS + } + else + { + Error = MCGEN_EVENTUNREGISTER(*RegHandle); + *RegHandle = (REGHANDLE)0; + } + + return Error; +} + +#endif // McGenEventRegisterUnregister + +#ifndef _mcgen_EVENT_BIT_SET + #if defined(_M_IX86) || defined(_M_X64) + // This macro is for use by MC-generated code and should not be used directly. + #define _mcgen_EVENT_BIT_SET(EnableBits, BitPosition) ((((const unsigned char*)EnableBits)[BitPosition >> 3] & (1u << (BitPosition & 7))) != 0) + #else // CPU type + // This macro is for use by MC-generated code and should not be used directly. + #define _mcgen_EVENT_BIT_SET(EnableBits, BitPosition) ((EnableBits[BitPosition >> 5] & (1u << (BitPosition & 31))) != 0) + #endif // CPU type +#endif // _mcgen_EVENT_BIT_SET + +#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +// Provider "microsoft-windows-mimalloc" event count 2 +//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +// Provider GUID = 138f4dbb-ee04-4899-aa0a-572ad4475779 +EXTERN_C __declspec(selectany) const GUID ETW_MI_Provider = {0x138f4dbb, 0xee04, 0x4899, {0xaa, 0x0a, 0x57, 0x2a, 0xd4, 0x47, 0x57, 0x79}}; + +#ifndef ETW_MI_Provider_Traits +#define ETW_MI_Provider_Traits NULL +#endif // ETW_MI_Provider_Traits + +// +// Event Descriptors +// +EXTERN_C __declspec(selectany) const EVENT_DESCRIPTOR ETW_MI_ALLOC = {0x64, 0x1, 0x0, 0x4, 0x0, 0x0, 0x0}; +#define ETW_MI_ALLOC_value 0x64 +EXTERN_C __declspec(selectany) const EVENT_DESCRIPTOR ETW_MI_FREE = {0x65, 0x1, 0x0, 0x4, 0x0, 0x0, 0x0}; +#define ETW_MI_FREE_value 0x65 + +// +// MCGEN_DISABLE_PROVIDER_CODE_GENERATION macro: +// Define this macro to have the compiler skip the generated functions in this +// header. +// +#ifndef MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +// +// Event Enablement Bits +// These variables are for use by MC-generated code and should not be used directly. +// +EXTERN_C __declspec(selectany) DECLSPEC_CACHEALIGN ULONG microsoft_windows_mimallocEnableBits[1]; +EXTERN_C __declspec(selectany) const ULONGLONG microsoft_windows_mimallocKeywords[1] = {0x0}; +EXTERN_C __declspec(selectany) const unsigned char microsoft_windows_mimallocLevels[1] = {4}; + +// +// Provider context +// +EXTERN_C __declspec(selectany) MCGEN_TRACE_CONTEXT ETW_MI_Provider_Context = {0, (ULONG_PTR)ETW_MI_Provider_Traits, 0, 0, 0, 0, 0, 0, 1, microsoft_windows_mimallocEnableBits, microsoft_windows_mimallocKeywords, microsoft_windows_mimallocLevels}; + +// +// Provider REGHANDLE +// +#define microsoft_windows_mimallocHandle (ETW_MI_Provider_Context.RegistrationHandle) + +// +// This macro is set to 0, indicating that the EventWrite[Name] macros do not +// have an Activity parameter. This is controlled by the -km and -um options. +// +#define ETW_MI_Provider_EventWriteActivity 0 + +// +// Register with ETW using the control GUID specified in the manifest. +// Invoke this macro during module initialization (i.e. program startup, +// DLL process attach, or driver load) to initialize the provider. +// Note that if this function returns an error, the error means that +// will not work, but no action needs to be taken -- even if EventRegister +// returns an error, it is generally safe to use EventWrite and +// EventUnregister macros (they will be no-ops if EventRegister failed). +// +#ifndef EventRegistermicrosoft_windows_mimalloc +#define EventRegistermicrosoft_windows_mimalloc() McGenEventRegister(&ETW_MI_Provider, McGenControlCallbackV2, &ETW_MI_Provider_Context, µsoft_windows_mimallocHandle) +#endif + +// +// Register with ETW using a specific control GUID (i.e. a GUID other than what +// is specified in the manifest). Advanced scenarios only. +// +#ifndef EventRegisterByGuidmicrosoft_windows_mimalloc +#define EventRegisterByGuidmicrosoft_windows_mimalloc(Guid) McGenEventRegister(&(Guid), McGenControlCallbackV2, &ETW_MI_Provider_Context, µsoft_windows_mimallocHandle) +#endif + +// +// Unregister with ETW and close the provider. +// Invoke this macro during module shutdown (i.e. program exit, DLL process +// detach, or driver unload) to unregister the provider. +// Note that you MUST call EventUnregister before DLL or driver unload +// (not optional): failure to unregister a provider before DLL or driver unload +// will result in crashes. +// +#ifndef EventUnregistermicrosoft_windows_mimalloc +#define EventUnregistermicrosoft_windows_mimalloc() McGenEventUnregister(µsoft_windows_mimallocHandle) +#endif + +// +// MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION macro: +// Define this macro to enable support for caller-allocated provider context. +// +#ifdef MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION + +// +// Advanced scenarios: Caller-allocated provider context. +// Use when multiple differently-configured provider handles are needed, +// e.g. for container-aware drivers, one context per container. +// +// Usage: +// +// - Caller enables the feature before including this header, e.g. +// #define MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION 1 +// - Caller allocates memory, e.g. pContext = malloc(sizeof(McGenContext_microsoft_windows_mimalloc)); +// - Caller registers the provider, e.g. EventRegistermicrosoft_windows_mimalloc_ForContext(pContext); +// - Caller writes events, e.g. EventWriteMyEvent_ForContext(pContext, ...); +// - Caller unregisters, e.g. EventUnregistermicrosoft_windows_mimalloc_ForContext(pContext); +// - Caller frees memory, e.g. free(pContext); +// + +typedef struct tagMcGenContext_microsoft_windows_mimalloc { + // The fields of this structure are subject to change and should + // not be accessed directly. To access the provider's REGHANDLE, + // use microsoft_windows_mimallocHandle_ForContext(pContext). + MCGEN_TRACE_CONTEXT Context; + ULONG EnableBits[1]; +} McGenContext_microsoft_windows_mimalloc; + +#define EventRegistermicrosoft_windows_mimalloc_ForContext(pContext) _mcgen_PASTE2(_mcgen_RegisterForContext_microsoft_windows_mimalloc_, MCGEN_EVENTREGISTER)(&ETW_MI_Provider, pContext) +#define EventRegisterByGuidmicrosoft_windows_mimalloc_ForContext(Guid, pContext) _mcgen_PASTE2(_mcgen_RegisterForContext_microsoft_windows_mimalloc_, MCGEN_EVENTREGISTER)(&(Guid), pContext) +#define EventUnregistermicrosoft_windows_mimalloc_ForContext(pContext) McGenEventUnregister(&(pContext)->Context.RegistrationHandle) + +// +// Provider REGHANDLE for caller-allocated context. +// +#define microsoft_windows_mimallocHandle_ForContext(pContext) ((pContext)->Context.RegistrationHandle) + +// This function is for use by MC-generated code and should not be used directly. +// Initialize and register the caller-allocated context. +__inline +ULONG __stdcall +_mcgen_PASTE2(_mcgen_RegisterForContext_microsoft_windows_mimalloc_, MCGEN_EVENTREGISTER)( + _In_ LPCGUID pProviderId, + _Out_ McGenContext_microsoft_windows_mimalloc* pContext) +{ + RtlZeroMemory(pContext, sizeof(*pContext)); + pContext->Context.Logger = (ULONG_PTR)ETW_MI_Provider_Traits; + pContext->Context.EnableBitsCount = 1; + pContext->Context.EnableBitMask = pContext->EnableBits; + pContext->Context.EnableKeyWords = microsoft_windows_mimallocKeywords; + pContext->Context.EnableLevel = microsoft_windows_mimallocLevels; + return McGenEventRegister( + pProviderId, + McGenControlCallbackV2, + &pContext->Context, + &pContext->Context.RegistrationHandle); +} + +// This function is for use by MC-generated code and should not be used directly. +// Trigger a compile error if called with the wrong parameter type. +FORCEINLINE +_Ret_ McGenContext_microsoft_windows_mimalloc* +_mcgen_CheckContextType_microsoft_windows_mimalloc(_In_ McGenContext_microsoft_windows_mimalloc* pContext) +{ + return pContext; +} + +#endif // MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION + +// +// Enablement check macro for event "ETW_MI_ALLOC" +// +#define EventEnabledETW_MI_ALLOC() _mcgen_EVENT_BIT_SET(microsoft_windows_mimallocEnableBits, 0) +#define EventEnabledETW_MI_ALLOC_ForContext(pContext) _mcgen_EVENT_BIT_SET(_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->EnableBits, 0) + +// +// Event write macros for event "ETW_MI_ALLOC" +// +#define EventWriteETW_MI_ALLOC(Address, Size) \ + MCGEN_EVENT_ENABLED(ETW_MI_ALLOC) \ + ? _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&ETW_MI_Provider_Context, &ETW_MI_ALLOC, Address, Size) : 0 +#define EventWriteETW_MI_ALLOC_AssumeEnabled(Address, Size) \ + _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&ETW_MI_Provider_Context, &ETW_MI_ALLOC, Address, Size) +#define EventWriteETW_MI_ALLOC_ForContext(pContext, Address, Size) \ + MCGEN_EVENT_ENABLED_FORCONTEXT(pContext, ETW_MI_ALLOC) \ + ? _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&(pContext)->Context, &ETW_MI_ALLOC, Address, Size) : 0 +#define EventWriteETW_MI_ALLOC_ForContextAssumeEnabled(pContext, Address, Size) \ + _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->Context, &ETW_MI_ALLOC, Address, Size) + +// This macro is for use by MC-generated code and should not be used directly. +#define _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC _mcgen_PASTE2(McTemplateU0xx_, MCGEN_EVENTWRITETRANSFER) + +// +// Enablement check macro for event "ETW_MI_FREE" +// +#define EventEnabledETW_MI_FREE() _mcgen_EVENT_BIT_SET(microsoft_windows_mimallocEnableBits, 0) +#define EventEnabledETW_MI_FREE_ForContext(pContext) _mcgen_EVENT_BIT_SET(_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->EnableBits, 0) + +// +// Event write macros for event "ETW_MI_FREE" +// +#define EventWriteETW_MI_FREE(Address, Size) \ + MCGEN_EVENT_ENABLED(ETW_MI_FREE) \ + ? _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&ETW_MI_Provider_Context, &ETW_MI_FREE, Address, Size) : 0 +#define EventWriteETW_MI_FREE_AssumeEnabled(Address, Size) \ + _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&ETW_MI_Provider_Context, &ETW_MI_FREE, Address, Size) +#define EventWriteETW_MI_FREE_ForContext(pContext, Address, Size) \ + MCGEN_EVENT_ENABLED_FORCONTEXT(pContext, ETW_MI_FREE) \ + ? _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&(pContext)->Context, &ETW_MI_FREE, Address, Size) : 0 +#define EventWriteETW_MI_FREE_ForContextAssumeEnabled(pContext, Address, Size) \ + _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->Context, &ETW_MI_FREE, Address, Size) + +// This macro is for use by MC-generated code and should not be used directly. +#define _mcgen_TEMPLATE_FOR_ETW_MI_FREE _mcgen_PASTE2(McTemplateU0xx_, MCGEN_EVENTWRITETRANSFER) + +#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +// +// MCGEN_DISABLE_PROVIDER_CODE_GENERATION macro: +// Define this macro to have the compiler skip the generated functions in this +// header. +// +#ifndef MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +// +// Template Functions +// + +// +// Function for template "ETW_CUSTOM_HEAP_ALLOC_DATA" (and possibly others). +// This function is for use by MC-generated code and should not be used directly. +// +#ifndef McTemplateU0xx_def +#define McTemplateU0xx_def +ETW_INLINE +ULONG +_mcgen_PASTE2(McTemplateU0xx_, MCGEN_EVENTWRITETRANSFER)( + _In_ PMCGEN_TRACE_CONTEXT Context, + _In_ PCEVENT_DESCRIPTOR Descriptor, + _In_ const unsigned __int64 _Arg0, + _In_ const unsigned __int64 _Arg1 + ) +{ +#define McTemplateU0xx_ARGCOUNT 2 + + EVENT_DATA_DESCRIPTOR EventData[McTemplateU0xx_ARGCOUNT + 1]; + + EventDataDescCreate(&EventData[1],&_Arg0, sizeof(const unsigned __int64) ); + + EventDataDescCreate(&EventData[2],&_Arg1, sizeof(const unsigned __int64) ); + + return McGenEventWrite(Context, Descriptor, NULL, McTemplateU0xx_ARGCOUNT + 1, EventData); +} +#endif // McTemplateU0xx_def + +#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +#if defined(__cplusplus) +} +#endif diff --git a/yass/third_party/mimalloc/src/prim/windows/etw.man b/yass/third_party/mimalloc/src/prim/windows/etw.man new file mode 100644 index 0000000000000000000000000000000000000000..cfd1f8a9eaacd50af63f1e28f9540aa88c20f90c GIT binary patch literal 3926 zcmeH~T~8B16o${WiT`2c+NGc<*i;EYh$d8xl<0*CS-Mb~vPP(R>hsQY*nU!q z$b})3?##}d?{nTW+uy%xwr*doYaNU1!Vc}sa%7F%g+hVAmL$hwL?4dodnmuAKhUXm0)X9|WHj>XRahh@~SWDIkbduX;B#u6^Q>@U= zDO8U+KXa0*th&%f*z^Udh4ol@uE=Q&`emUsh_CA`FOXgI{i-`XZ9C#bR1yBm=PJ*p z9kVN$J6O;h;8HY>p)RnhY8A#Hb?z)_!y(Iaen(I)@-9CrSSp(;_Jn9I*$S&ATjP1? zVxB>pV@LVsy;^jZr7r$HNAm06TcUiI`l@~F$Mt$E%Shfd3O+h1vFhR9a8yQZ@wpne zr3bI-p=VEdo{)#uWxSVJeYQF|-5tnq>~f+CP~A0&{v=(up=ngEDl>5!$EDwPRaNjW zXj|wbG$OwmwaW-hMvBK%pbm3wpic71O^dzbxT%*13+SP9h- zI~rA5hapTVnk|qmiIVYy{__+x9f7OV4j3`g4;{{8_SWnLBSu2PUc%~`t%Ae^>J`SS zdtZg-r<0xAH*_ALtK;Nv(d9nbKK1jK=Ld)I(jQrKhBjgToR#Wm8{0a}@6ZuEO(lvG=y=Jh{M!4!-$(E)!vYUrf47 znf4W$e&QFOovV_$>J%X*KN>oXI@jt%BJms>IU}I$<7Hr{PChchs%+mx{% zt(fa_PM4r63?1h#YOk~;byc5`>%q>sLHA1gohNq{qOREhxu<y~`}YVhGe0?R_ceQ8vt^BpSNp6kErj_0hUNFyWQ1IOZ|GEgWBPwYFLgFu Oo!*uqEBu%Ae18C_t1cS= literal 0 HcmV?d00001 diff --git a/yass/third_party/mimalloc/src/prim/windows/prim.c b/yass/third_party/mimalloc/src/prim/windows/prim.c new file mode 100644 index 0000000000..5074ad4cbd --- /dev/null +++ b/yass/third_party/mimalloc/src/prim/windows/prim.c @@ -0,0 +1,663 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// This file is included in `src/prim/prim.c` + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" +#include // fputs, stderr + + +//--------------------------------------------- +// Dynamically bind Windows API points for portability +//--------------------------------------------- + +// We use VirtualAlloc2 for aligned allocation, but it is only supported on Windows 10 and Windows Server 2016. +// So, we need to look it up dynamically to run on older systems. (use __stdcall for 32-bit compatibility) +// NtAllocateVirtualAllocEx is used for huge OS page allocation (1GiB) +// We define a minimal MEM_EXTENDED_PARAMETER ourselves in order to be able to compile with older SDK's. +typedef enum MI_MEM_EXTENDED_PARAMETER_TYPE_E { + MiMemExtendedParameterInvalidType = 0, + MiMemExtendedParameterAddressRequirements, + MiMemExtendedParameterNumaNode, + MiMemExtendedParameterPartitionHandle, + MiMemExtendedParameterUserPhysicalHandle, + MiMemExtendedParameterAttributeFlags, + MiMemExtendedParameterMax +} MI_MEM_EXTENDED_PARAMETER_TYPE; + +typedef struct DECLSPEC_ALIGN(8) MI_MEM_EXTENDED_PARAMETER_S { + struct { DWORD64 Type : 8; DWORD64 Reserved : 56; } Type; + union { DWORD64 ULong64; PVOID Pointer; SIZE_T Size; HANDLE Handle; DWORD ULong; } Arg; +} MI_MEM_EXTENDED_PARAMETER; + +typedef struct MI_MEM_ADDRESS_REQUIREMENTS_S { + PVOID LowestStartingAddress; + PVOID HighestEndingAddress; + SIZE_T Alignment; +} MI_MEM_ADDRESS_REQUIREMENTS; + +#define MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE 0x00000010 + +#include +typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); +typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); +static PVirtualAlloc2 pVirtualAlloc2 = NULL; +static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL; + +// Similarly, GetNumaProcesorNodeEx is only supported since Windows 7 +typedef struct MI_PROCESSOR_NUMBER_S { WORD Group; BYTE Number; BYTE Reserved; } MI_PROCESSOR_NUMBER; + +typedef VOID (__stdcall *PGetCurrentProcessorNumberEx)(MI_PROCESSOR_NUMBER* ProcNumber); +typedef BOOL (__stdcall *PGetNumaProcessorNodeEx)(MI_PROCESSOR_NUMBER* Processor, PUSHORT NodeNumber); +typedef BOOL (__stdcall* PGetNumaNodeProcessorMaskEx)(USHORT Node, PGROUP_AFFINITY ProcessorMask); +typedef BOOL (__stdcall *PGetNumaProcessorNode)(UCHAR Processor, PUCHAR NodeNumber); +static PGetCurrentProcessorNumberEx pGetCurrentProcessorNumberEx = NULL; +static PGetNumaProcessorNodeEx pGetNumaProcessorNodeEx = NULL; +static PGetNumaNodeProcessorMaskEx pGetNumaNodeProcessorMaskEx = NULL; +static PGetNumaProcessorNode pGetNumaProcessorNode = NULL; + +//--------------------------------------------- +// Enable large page support dynamically (if possible) +//--------------------------------------------- + +static bool win_enable_large_os_pages(size_t* large_page_size) +{ + static bool large_initialized = false; + if (large_initialized) return (_mi_os_large_page_size() > 0); + large_initialized = true; + + // Try to see if large OS pages are supported + // To use large pages on Windows, we first need access permission + // Set "Lock pages in memory" permission in the group policy editor + // + unsigned long err = 0; + HANDLE token = NULL; + BOOL ok = OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token); + if (ok) { + TOKEN_PRIVILEGES tp; + ok = LookupPrivilegeValue(NULL, TEXT("SeLockMemoryPrivilege"), &tp.Privileges[0].Luid); + if (ok) { + tp.PrivilegeCount = 1; + tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; + ok = AdjustTokenPrivileges(token, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0); + if (ok) { + err = GetLastError(); + ok = (err == ERROR_SUCCESS); + if (ok && large_page_size != NULL) { + *large_page_size = GetLargePageMinimum(); + } + } + } + CloseHandle(token); + } + if (!ok) { + if (err == 0) err = GetLastError(); + _mi_warning_message("cannot enable large OS page support, error %lu\n", err); + } + return (ok!=0); +} + + +//--------------------------------------------- +// Initialize +//--------------------------------------------- + +void _mi_prim_mem_init( mi_os_mem_config_t* config ) +{ + config->has_overcommit = false; + config->has_partial_free = false; + config->has_virtual_reserve = true; + // get the page size + SYSTEM_INFO si; + GetSystemInfo(&si); + if (si.dwPageSize > 0) { config->page_size = si.dwPageSize; } + if (si.dwAllocationGranularity > 0) { config->alloc_granularity = si.dwAllocationGranularity; } + // get the VirtualAlloc2 function + HINSTANCE hDll; + hDll = LoadLibrary(TEXT("kernelbase.dll")); + if (hDll != NULL) { + // use VirtualAlloc2FromApp if possible as it is available to Windows store apps + pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2FromApp"); + if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2"); + FreeLibrary(hDll); + } + // NtAllocateVirtualMemoryEx is used for huge page allocation + hDll = LoadLibrary(TEXT("ntdll.dll")); + if (hDll != NULL) { + pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)(void (*)(void))GetProcAddress(hDll, "NtAllocateVirtualMemoryEx"); + FreeLibrary(hDll); + } + // Try to use Win7+ numa API + hDll = LoadLibrary(TEXT("kernel32.dll")); + if (hDll != NULL) { + pGetCurrentProcessorNumberEx = (PGetCurrentProcessorNumberEx)(void (*)(void))GetProcAddress(hDll, "GetCurrentProcessorNumberEx"); + pGetNumaProcessorNodeEx = (PGetNumaProcessorNodeEx)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNodeEx"); + pGetNumaNodeProcessorMaskEx = (PGetNumaNodeProcessorMaskEx)(void (*)(void))GetProcAddress(hDll, "GetNumaNodeProcessorMaskEx"); + pGetNumaProcessorNode = (PGetNumaProcessorNode)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNode"); + FreeLibrary(hDll); + } + if (mi_option_is_enabled(mi_option_allow_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) { + win_enable_large_os_pages(&config->large_page_size); + } +} + + +//--------------------------------------------- +// Free +//--------------------------------------------- + +int _mi_prim_free(void* addr, size_t size ) { + MI_UNUSED(size); + DWORD errcode = 0; + bool err = (VirtualFree(addr, 0, MEM_RELEASE) == 0); + if (err) { errcode = GetLastError(); } + if (errcode == ERROR_INVALID_ADDRESS) { + // In mi_os_mem_alloc_aligned the fallback path may have returned a pointer inside + // the memory region returned by VirtualAlloc; in that case we need to free using + // the start of the region. + MEMORY_BASIC_INFORMATION info = { 0 }; + VirtualQuery(addr, &info, sizeof(info)); + if (info.AllocationBase < addr && ((uint8_t*)addr - (uint8_t*)info.AllocationBase) < (ptrdiff_t)MI_SEGMENT_SIZE) { + errcode = 0; + err = (VirtualFree(info.AllocationBase, 0, MEM_RELEASE) == 0); + if (err) { errcode = GetLastError(); } + } + } + return (int)errcode; +} + + +//--------------------------------------------- +// VirtualAlloc +//--------------------------------------------- + +static void* win_virtual_alloc_prim_once(void* addr, size_t size, size_t try_alignment, DWORD flags) { + #if (MI_INTPTR_SIZE >= 8) + // on 64-bit systems, try to use the virtual address area after 2TiB for 4MiB aligned allocations + if (addr == NULL) { + void* hint = _mi_os_get_aligned_hint(try_alignment,size); + if (hint != NULL) { + void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE); + if (p != NULL) return p; + _mi_verbose_message("warning: unable to allocate hinted aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), hint, try_alignment, flags); + // fall through on error + } + } + #endif + // on modern Windows try use VirtualAlloc2 for aligned allocation + if (try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) { + MI_MEM_ADDRESS_REQUIREMENTS reqs = { 0, 0, 0 }; + reqs.Alignment = try_alignment; + MI_MEM_EXTENDED_PARAMETER param = { {0, 0}, {0} }; + param.Type.Type = MiMemExtendedParameterAddressRequirements; + param.Arg.Pointer = &reqs; + void* p = (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, ¶m, 1); + if (p != NULL) return p; + _mi_warning_message("unable to allocate aligned OS memory (0x%zx bytes, error code: 0x%x, address: %p, alignment: 0x%zx, flags: 0x%x)\n", size, GetLastError(), addr, try_alignment, flags); + // fall through on error + } + // last resort + return VirtualAlloc(addr, size, flags, PAGE_READWRITE); +} + +static bool win_is_out_of_memory_error(DWORD err) { + switch (err) { + case ERROR_COMMITMENT_MINIMUM: + case ERROR_COMMITMENT_LIMIT: + case ERROR_PAGEFILE_QUOTA: + case ERROR_NOT_ENOUGH_MEMORY: + return true; + default: + return false; + } +} + +static void* win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignment, DWORD flags) { + long max_retry_msecs = mi_option_get_clamp(mi_option_retry_on_oom, 0, 2000); // at most 2 seconds + if (max_retry_msecs == 1) { max_retry_msecs = 100; } // if one sets the option to "true" + for (long tries = 1; tries <= 10; tries++) { // try at most 10 times (=2200ms) + void* p = win_virtual_alloc_prim_once(addr, size, try_alignment, flags); + if (p != NULL) { + // success, return the address + return p; + } + else if (max_retry_msecs > 0 && (try_alignment <= 2*MI_SEGMENT_ALIGN) && + (flags&MEM_COMMIT) != 0 && (flags&MEM_LARGE_PAGES) == 0 && + win_is_out_of_memory_error(GetLastError())) { + // if committing regular memory and being out-of-memory, + // keep trying for a bit in case memory frees up after all. See issue #894 + _mi_warning_message("out-of-memory on OS allocation, try again... (attempt %lu, 0x%zx bytes, error code: 0x%x, address: %p, alignment: 0x%zx, flags: 0x%x)\n", tries, size, GetLastError(), addr, try_alignment, flags); + long sleep_msecs = tries*40; // increasing waits + if (sleep_msecs > max_retry_msecs) { sleep_msecs = max_retry_msecs; } + max_retry_msecs -= sleep_msecs; + Sleep(sleep_msecs); + } + else { + // otherwise return with an error + break; + } + } + return NULL; +} + +static void* win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) { + mi_assert_internal(!(large_only && !allow_large)); + static _Atomic(size_t) large_page_try_ok; // = 0; + void* p = NULL; + // Try to allocate large OS pages (2MiB) if allowed or required. + if ((large_only || _mi_os_use_large_page(size, try_alignment)) + && allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) { + size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); + if (!large_only && try_ok > 0) { + // if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive. + // therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times. + mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1); + } + else { + // large OS pages must always reserve and commit. + *is_large = true; + p = win_virtual_alloc_prim(addr, size, try_alignment, flags | MEM_LARGE_PAGES); + if (large_only) return p; + // fall back to non-large page allocation on error (`p == NULL`). + if (p == NULL) { + mi_atomic_store_release(&large_page_try_ok,10UL); // on error, don't try again for the next N allocations + } + } + } + // Fall back to regular page allocation + if (p == NULL) { + *is_large = ((flags&MEM_LARGE_PAGES) != 0); + p = win_virtual_alloc_prim(addr, size, try_alignment, flags); + } + //if (p == NULL) { _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x, large only: %d, allow large: %d)\n", size, GetLastError(), addr, try_alignment, flags, large_only, allow_large); } + return p; +} + +int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); + mi_assert_internal(commit || !allow_large); + mi_assert_internal(try_alignment > 0); + *is_zero = true; + int flags = MEM_RESERVE; + if (commit) { flags |= MEM_COMMIT; } + *addr = win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large); + return (*addr != NULL ? 0 : (int)GetLastError()); +} + + +//--------------------------------------------- +// Commit/Reset/Protect +//--------------------------------------------- +#ifdef _MSC_VER +#pragma warning(disable:6250) // suppress warning calling VirtualFree without MEM_RELEASE (for decommit) +#endif + +int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { + *is_zero = false; + /* + // zero'ing only happens on an initial commit... but checking upfront seems expensive.. + _MEMORY_BASIC_INFORMATION meminfo; _mi_memzero_var(meminfo); + if (VirtualQuery(addr, &meminfo, size) > 0) { + if ((meminfo.State & MEM_COMMIT) == 0) { + *is_zero = true; + } + } + */ + // commit + void* p = VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE); + if (p == NULL) return (int)GetLastError(); + return 0; +} + +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { + BOOL ok = VirtualFree(addr, size, MEM_DECOMMIT); + *needs_recommit = true; // for safety, assume always decommitted even in the case of an error. + return (ok ? 0 : (int)GetLastError()); +} + +int _mi_prim_reset(void* addr, size_t size) { + void* p = VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); + mi_assert_internal(p == addr); + #if 0 + if (p != NULL) { + VirtualUnlock(addr,size); // VirtualUnlock after MEM_RESET removes the memory directly from the working set + } + #endif + return (p != NULL ? 0 : (int)GetLastError()); +} + +int _mi_prim_protect(void* addr, size_t size, bool protect) { + DWORD oldprotect = 0; + BOOL ok = VirtualProtect(addr, size, protect ? PAGE_NOACCESS : PAGE_READWRITE, &oldprotect); + return (ok ? 0 : (int)GetLastError()); +} + + +//--------------------------------------------- +// Huge page allocation +//--------------------------------------------- + +static void* _mi_prim_alloc_huge_os_pagesx(void* hint_addr, size_t size, int numa_node) +{ + const DWORD flags = MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE; + + win_enable_large_os_pages(NULL); + + MI_MEM_EXTENDED_PARAMETER params[3] = { {{0,0},{0}},{{0,0},{0}},{{0,0},{0}} }; + // on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages + static bool mi_huge_pages_available = true; + if (pNtAllocateVirtualMemoryEx != NULL && mi_huge_pages_available) { + params[0].Type.Type = MiMemExtendedParameterAttributeFlags; + params[0].Arg.ULong64 = MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE; + ULONG param_count = 1; + if (numa_node >= 0) { + param_count++; + params[1].Type.Type = MiMemExtendedParameterNumaNode; + params[1].Arg.ULong = (unsigned)numa_node; + } + SIZE_T psize = size; + void* base = hint_addr; + NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags, PAGE_READWRITE, params, param_count); + if (err == 0 && base != NULL) { + return base; + } + else { + // fall back to regular large pages + mi_huge_pages_available = false; // don't try further huge pages + _mi_warning_message("unable to allocate using huge (1GiB) pages, trying large (2MiB) pages instead (status 0x%lx)\n", err); + } + } + // on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation + if (pVirtualAlloc2 != NULL && numa_node >= 0) { + params[0].Type.Type = MiMemExtendedParameterNumaNode; + params[0].Arg.ULong = (unsigned)numa_node; + return (*pVirtualAlloc2)(GetCurrentProcess(), hint_addr, size, flags, PAGE_READWRITE, params, 1); + } + + // otherwise use regular virtual alloc on older windows + return VirtualAlloc(hint_addr, size, flags, PAGE_READWRITE); +} + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + *is_zero = true; + *addr = _mi_prim_alloc_huge_os_pagesx(hint_addr,size,numa_node); + return (*addr != NULL ? 0 : (int)GetLastError()); +} + + +//--------------------------------------------- +// Numa nodes +//--------------------------------------------- + +size_t _mi_prim_numa_node(void) { + USHORT numa_node = 0; + if (pGetCurrentProcessorNumberEx != NULL && pGetNumaProcessorNodeEx != NULL) { + // Extended API is supported + MI_PROCESSOR_NUMBER pnum; + (*pGetCurrentProcessorNumberEx)(&pnum); + USHORT nnode = 0; + BOOL ok = (*pGetNumaProcessorNodeEx)(&pnum, &nnode); + if (ok) { numa_node = nnode; } + } + else if (pGetNumaProcessorNode != NULL) { + // Vista or earlier, use older API that is limited to 64 processors. Issue #277 + DWORD pnum = GetCurrentProcessorNumber(); + UCHAR nnode = 0; + BOOL ok = pGetNumaProcessorNode((UCHAR)pnum, &nnode); + if (ok) { numa_node = nnode; } + } + return numa_node; +} + +size_t _mi_prim_numa_node_count(void) { + ULONG numa_max = 0; + GetNumaHighestNodeNumber(&numa_max); + // find the highest node number that has actual processors assigned to it. Issue #282 + while(numa_max > 0) { + if (pGetNumaNodeProcessorMaskEx != NULL) { + // Extended API is supported + GROUP_AFFINITY affinity; + if ((*pGetNumaNodeProcessorMaskEx)((USHORT)numa_max, &affinity)) { + if (affinity.Mask != 0) break; // found the maximum non-empty node + } + } + else { + // Vista or earlier, use older API that is limited to 64 processors. + ULONGLONG mask; + if (GetNumaNodeProcessorMask((UCHAR)numa_max, &mask)) { + if (mask != 0) break; // found the maximum non-empty node + }; + } + // max node was invalid or had no processor assigned, try again + numa_max--; + } + return ((size_t)numa_max + 1); +} + + +//---------------------------------------------------------------- +// Clock +//---------------------------------------------------------------- + +static mi_msecs_t mi_to_msecs(LARGE_INTEGER t) { + static LARGE_INTEGER mfreq; // = 0 + if (mfreq.QuadPart == 0LL) { + LARGE_INTEGER f; + QueryPerformanceFrequency(&f); + mfreq.QuadPart = f.QuadPart/1000LL; + if (mfreq.QuadPart == 0) mfreq.QuadPart = 1; + } + return (mi_msecs_t)(t.QuadPart / mfreq.QuadPart); +} + +mi_msecs_t _mi_prim_clock_now(void) { + LARGE_INTEGER t; + QueryPerformanceCounter(&t); + return mi_to_msecs(t); +} + + +//---------------------------------------------------------------- +// Process Info +//---------------------------------------------------------------- + +#include +#include + +static mi_msecs_t filetime_msecs(const FILETIME* ftime) { + ULARGE_INTEGER i; + i.LowPart = ftime->dwLowDateTime; + i.HighPart = ftime->dwHighDateTime; + mi_msecs_t msecs = (i.QuadPart / 10000); // FILETIME is in 100 nano seconds + return msecs; +} + +typedef BOOL (WINAPI *PGetProcessMemoryInfo)(HANDLE, PPROCESS_MEMORY_COUNTERS, DWORD); +static PGetProcessMemoryInfo pGetProcessMemoryInfo = NULL; + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + FILETIME ct; + FILETIME ut; + FILETIME st; + FILETIME et; + GetProcessTimes(GetCurrentProcess(), &ct, &et, &st, &ut); + pinfo->utime = filetime_msecs(&ut); + pinfo->stime = filetime_msecs(&st); + + // load psapi on demand + if (pGetProcessMemoryInfo == NULL) { + HINSTANCE hDll = LoadLibrary(TEXT("psapi.dll")); + if (hDll != NULL) { + pGetProcessMemoryInfo = (PGetProcessMemoryInfo)(void (*)(void))GetProcAddress(hDll, "GetProcessMemoryInfo"); + } + } + + // get process info + PROCESS_MEMORY_COUNTERS info; + memset(&info, 0, sizeof(info)); + if (pGetProcessMemoryInfo != NULL) { + pGetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info)); + } + pinfo->current_rss = (size_t)info.WorkingSetSize; + pinfo->peak_rss = (size_t)info.PeakWorkingSetSize; + pinfo->current_commit = (size_t)info.PagefileUsage; + pinfo->peak_commit = (size_t)info.PeakPagefileUsage; + pinfo->page_faults = (size_t)info.PageFaultCount; +} + +//---------------------------------------------------------------- +// Output +//---------------------------------------------------------------- + +void _mi_prim_out_stderr( const char* msg ) +{ + // on windows with redirection, the C runtime cannot handle locale dependent output + // after the main thread closes so we use direct console output. + if (!_mi_preloading()) { + // _cputs(msg); // _cputs cannot be used as it aborts when failing to lock the console + static HANDLE hcon = INVALID_HANDLE_VALUE; + static bool hconIsConsole; + if (hcon == INVALID_HANDLE_VALUE) { + CONSOLE_SCREEN_BUFFER_INFO sbi; + hcon = GetStdHandle(STD_ERROR_HANDLE); + hconIsConsole = ((hcon != INVALID_HANDLE_VALUE) && GetConsoleScreenBufferInfo(hcon, &sbi)); + } + const size_t len = _mi_strlen(msg); + if (len > 0 && len < UINT32_MAX) { + DWORD written = 0; + if (hconIsConsole) { + WriteConsoleA(hcon, msg, (DWORD)len, &written, NULL); + } + else if (hcon != INVALID_HANDLE_VALUE) { + // use direct write if stderr was redirected + WriteFile(hcon, msg, (DWORD)len, &written, NULL); + } + else { + // finally fall back to fputs after all + fputs(msg, stderr); + } + } + } +} + + +//---------------------------------------------------------------- +// Environment +//---------------------------------------------------------------- + +// On Windows use GetEnvironmentVariable instead of getenv to work +// reliably even when this is invoked before the C runtime is initialized. +// i.e. when `_mi_preloading() == true`. +// Note: on windows, environment names are not case sensitive. +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + result[0] = 0; + size_t len = GetEnvironmentVariableA(name, result, (DWORD)result_size); + return (len > 0 && len < result_size); +} + + + +//---------------------------------------------------------------- +// Random +//---------------------------------------------------------------- + +#if defined(MI_USE_RTLGENRANDOM) // || defined(__cplusplus) +// We prefer to use BCryptGenRandom instead of (the unofficial) RtlGenRandom but when using +// dynamic overriding, we observed it can raise an exception when compiled with C++, and +// sometimes deadlocks when also running under the VS debugger. +// In contrast, issue #623 implies that on Windows Server 2019 we need to use BCryptGenRandom. +// To be continued.. +#pragma comment (lib,"advapi32.lib") +#define RtlGenRandom SystemFunction036 +mi_decl_externc BOOLEAN NTAPI RtlGenRandom(PVOID RandomBuffer, ULONG RandomBufferLength); + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + return (RtlGenRandom(buf, (ULONG)buf_len) != 0); +} + +#else + +#ifndef BCRYPT_USE_SYSTEM_PREFERRED_RNG +#define BCRYPT_USE_SYSTEM_PREFERRED_RNG 0x00000002 +#endif + +typedef LONG (NTAPI *PBCryptGenRandom)(HANDLE, PUCHAR, ULONG, ULONG); +static PBCryptGenRandom pBCryptGenRandom = NULL; + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + if (pBCryptGenRandom == NULL) { + HINSTANCE hDll = LoadLibrary(TEXT("bcrypt.dll")); + if (hDll != NULL) { + pBCryptGenRandom = (PBCryptGenRandom)(void (*)(void))GetProcAddress(hDll, "BCryptGenRandom"); + } + if (pBCryptGenRandom == NULL) return false; + } + return (pBCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0); +} + +#endif // MI_USE_RTLGENRANDOM + +//---------------------------------------------------------------- +// Thread init/done +//---------------------------------------------------------------- + +#if !defined(MI_SHARED_LIB) + +// use thread local storage keys to detect thread ending +// note: another design could be to use special linker sections (see issue #869) +#include +#if (_WIN32_WINNT < 0x600) // before Windows Vista +WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback ); +WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex ); +WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData ); +WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex); +#endif + +static DWORD mi_fls_key = (DWORD)(-1); + +static void NTAPI mi_fls_done(PVOID value) { + mi_heap_t* heap = (mi_heap_t*)value; + if (heap != NULL) { + _mi_thread_done(heap); + FlsSetValue(mi_fls_key, NULL); // prevent recursion as _mi_thread_done may set it back to the main heap, issue #672 + } +} + +void _mi_prim_thread_init_auto_done(void) { + mi_fls_key = FlsAlloc(&mi_fls_done); +} + +void _mi_prim_thread_done_auto_done(void) { + // call thread-done on all threads (except the main thread) to prevent + // dangling callback pointer if statically linked with a DLL; Issue #208 + FlsFree(mi_fls_key); +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + mi_assert_internal(mi_fls_key != (DWORD)(-1)); + FlsSetValue(mi_fls_key, heap); +} + +#else + +// Dll; nothing to do as in that case thread_done is handled through the DLL_THREAD_DETACH event. + +void _mi_prim_thread_init_auto_done(void) { +} + +void _mi_prim_thread_done_auto_done(void) { +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); +} + +#endif diff --git a/yass/third_party/mimalloc/src/prim/windows/readme.md b/yass/third_party/mimalloc/src/prim/windows/readme.md new file mode 100644 index 0000000000..217c3d174d --- /dev/null +++ b/yass/third_party/mimalloc/src/prim/windows/readme.md @@ -0,0 +1,17 @@ +## Primitives: + +- `prim.c` contains Windows primitives for OS allocation. + +## Event Tracing for Windows (ETW) + +- `etw.h` is generated from `etw.man` which contains the manifest for mimalloc events. + (100 is an allocation, 101 is for a free) + +- `etw-mimalloc.wprp` is a profile for the Windows Performance Recorder (WPR). + In an admin prompt, you can use: + ``` + > wpr -start src\prim\windows\etw-mimalloc.wprp -filemode + > + > wpr -stop test.etl + ``` + and then open `test.etl` in the Windows Performance Analyzer (WPA). \ No newline at end of file diff --git a/yass/third_party/mimalloc/src/random.c b/yass/third_party/mimalloc/src/random.c new file mode 100644 index 0000000000..4fc8b2f8fb --- /dev/null +++ b/yass/third_party/mimalloc/src/random.c @@ -0,0 +1,254 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" // _mi_prim_random_buf +#include // memset + +/* ---------------------------------------------------------------------------- +We use our own PRNG to keep predictable performance of random number generation +and to avoid implementations that use a lock. We only use the OS provided +random source to initialize the initial seeds. Since we do not need ultimate +performance but we do rely on the security (for secret cookies in secure mode) +we use a cryptographically secure generator (chacha20). +-----------------------------------------------------------------------------*/ + +#define MI_CHACHA_ROUNDS (20) // perhaps use 12 for better performance? + + +/* ---------------------------------------------------------------------------- +Chacha20 implementation as the original algorithm with a 64-bit nonce +and counter: https://en.wikipedia.org/wiki/Salsa20 +The input matrix has sixteen 32-bit values: +Position 0 to 3: constant key +Position 4 to 11: the key +Position 12 to 13: the counter. +Position 14 to 15: the nonce. + +The implementation uses regular C code which compiles very well on modern compilers. +(gcc x64 has no register spills, and clang 6+ uses SSE instructions) +-----------------------------------------------------------------------------*/ + +static inline uint32_t rotl(uint32_t x, uint32_t shift) { + return (x << shift) | (x >> (32 - shift)); +} + +static inline void qround(uint32_t x[16], size_t a, size_t b, size_t c, size_t d) { + x[a] += x[b]; x[d] = rotl(x[d] ^ x[a], 16); + x[c] += x[d]; x[b] = rotl(x[b] ^ x[c], 12); + x[a] += x[b]; x[d] = rotl(x[d] ^ x[a], 8); + x[c] += x[d]; x[b] = rotl(x[b] ^ x[c], 7); +} + +static void chacha_block(mi_random_ctx_t* ctx) +{ + // scramble into `x` + uint32_t x[16]; + for (size_t i = 0; i < 16; i++) { + x[i] = ctx->input[i]; + } + for (size_t i = 0; i < MI_CHACHA_ROUNDS; i += 2) { + qround(x, 0, 4, 8, 12); + qround(x, 1, 5, 9, 13); + qround(x, 2, 6, 10, 14); + qround(x, 3, 7, 11, 15); + qround(x, 0, 5, 10, 15); + qround(x, 1, 6, 11, 12); + qround(x, 2, 7, 8, 13); + qround(x, 3, 4, 9, 14); + } + + // add scrambled data to the initial state + for (size_t i = 0; i < 16; i++) { + ctx->output[i] = x[i] + ctx->input[i]; + } + ctx->output_available = 16; + + // increment the counter for the next round + ctx->input[12] += 1; + if (ctx->input[12] == 0) { + ctx->input[13] += 1; + if (ctx->input[13] == 0) { // and keep increasing into the nonce + ctx->input[14] += 1; + } + } +} + +static uint32_t chacha_next32(mi_random_ctx_t* ctx) { + if (ctx->output_available <= 0) { + chacha_block(ctx); + ctx->output_available = 16; // (assign again to suppress static analysis warning) + } + const uint32_t x = ctx->output[16 - ctx->output_available]; + ctx->output[16 - ctx->output_available] = 0; // reset once the data is handed out + ctx->output_available--; + return x; +} + +static inline uint32_t read32(const uint8_t* p, size_t idx32) { + const size_t i = 4*idx32; + return ((uint32_t)p[i+0] | (uint32_t)p[i+1] << 8 | (uint32_t)p[i+2] << 16 | (uint32_t)p[i+3] << 24); +} + +static void chacha_init(mi_random_ctx_t* ctx, const uint8_t key[32], uint64_t nonce) +{ + // since we only use chacha for randomness (and not encryption) we + // do not _need_ to read 32-bit values as little endian but we do anyways + // just for being compatible :-) + memset(ctx, 0, sizeof(*ctx)); + for (size_t i = 0; i < 4; i++) { + const uint8_t* sigma = (uint8_t*)"expand 32-byte k"; + ctx->input[i] = read32(sigma,i); + } + for (size_t i = 0; i < 8; i++) { + ctx->input[i + 4] = read32(key,i); + } + ctx->input[12] = 0; + ctx->input[13] = 0; + ctx->input[14] = (uint32_t)nonce; + ctx->input[15] = (uint32_t)(nonce >> 32); +} + +static void chacha_split(mi_random_ctx_t* ctx, uint64_t nonce, mi_random_ctx_t* ctx_new) { + memset(ctx_new, 0, sizeof(*ctx_new)); + _mi_memcpy(ctx_new->input, ctx->input, sizeof(ctx_new->input)); + ctx_new->input[12] = 0; + ctx_new->input[13] = 0; + ctx_new->input[14] = (uint32_t)nonce; + ctx_new->input[15] = (uint32_t)(nonce >> 32); + mi_assert_internal(ctx->input[14] != ctx_new->input[14] || ctx->input[15] != ctx_new->input[15]); // do not reuse nonces! + chacha_block(ctx_new); +} + + +/* ---------------------------------------------------------------------------- +Random interface +-----------------------------------------------------------------------------*/ + +#if MI_DEBUG>1 +static bool mi_random_is_initialized(mi_random_ctx_t* ctx) { + return (ctx != NULL && ctx->input[0] != 0); +} +#endif + +void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* ctx_new) { + mi_assert_internal(mi_random_is_initialized(ctx)); + mi_assert_internal(ctx != ctx_new); + chacha_split(ctx, (uintptr_t)ctx_new /*nonce*/, ctx_new); +} + +uintptr_t _mi_random_next(mi_random_ctx_t* ctx) { + mi_assert_internal(mi_random_is_initialized(ctx)); + #if MI_INTPTR_SIZE <= 4 + return chacha_next32(ctx); + #elif MI_INTPTR_SIZE == 8 + return (((uintptr_t)chacha_next32(ctx) << 32) | chacha_next32(ctx)); + #else + # error "define mi_random_next for this platform" + #endif +} + + +/* ---------------------------------------------------------------------------- +To initialize a fresh random context. +If we cannot get good randomness, we fall back to weak randomness based on a timer and ASLR. +-----------------------------------------------------------------------------*/ + +uintptr_t _mi_os_random_weak(uintptr_t extra_seed) { + uintptr_t x = (uintptr_t)&_mi_os_random_weak ^ extra_seed; // ASLR makes the address random + x ^= _mi_prim_clock_now(); + // and do a few randomization steps + uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1; + for (uintptr_t i = 0; i < max; i++) { + x = _mi_random_shuffle(x); + } + mi_assert_internal(x != 0); + return x; +} + +static void mi_random_init_ex(mi_random_ctx_t* ctx, bool use_weak) { + uint8_t key[32]; + if (use_weak || !_mi_prim_random_buf(key, sizeof(key))) { + // if we fail to get random data from the OS, we fall back to a + // weak random source based on the current time + #if !defined(__wasi__) + if (!use_weak) { _mi_warning_message("unable to use secure randomness\n"); } + #endif + uintptr_t x = _mi_os_random_weak(0); + for (size_t i = 0; i < 8; i++) { // key is eight 32-bit words. + x = _mi_random_shuffle(x); + ((uint32_t*)key)[i] = (uint32_t)x; + } + ctx->weak = true; + } + else { + ctx->weak = false; + } + chacha_init(ctx, key, (uintptr_t)ctx /*nonce*/ ); +} + +void _mi_random_init(mi_random_ctx_t* ctx) { + mi_random_init_ex(ctx, false); +} + +void _mi_random_init_weak(mi_random_ctx_t * ctx) { + mi_random_init_ex(ctx, true); +} + +void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx) { + if (ctx->weak) { + _mi_random_init(ctx); + } +} + +/* -------------------------------------------------------- +test vectors from +----------------------------------------------------------- */ +/* +static bool array_equals(uint32_t* x, uint32_t* y, size_t n) { + for (size_t i = 0; i < n; i++) { + if (x[i] != y[i]) return false; + } + return true; +} +static void chacha_test(void) +{ + uint32_t x[4] = { 0x11111111, 0x01020304, 0x9b8d6f43, 0x01234567 }; + uint32_t x_out[4] = { 0xea2a92f4, 0xcb1cf8ce, 0x4581472e, 0x5881c4bb }; + qround(x, 0, 1, 2, 3); + mi_assert_internal(array_equals(x, x_out, 4)); + + uint32_t y[16] = { + 0x879531e0, 0xc5ecf37d, 0x516461b1, 0xc9a62f8a, + 0x44c20ef3, 0x3390af7f, 0xd9fc690b, 0x2a5f714c, + 0x53372767, 0xb00a5631, 0x974c541a, 0x359e9963, + 0x5c971061, 0x3d631689, 0x2098d9d6, 0x91dbd320 }; + uint32_t y_out[16] = { + 0x879531e0, 0xc5ecf37d, 0xbdb886dc, 0xc9a62f8a, + 0x44c20ef3, 0x3390af7f, 0xd9fc690b, 0xcfacafd2, + 0xe46bea80, 0xb00a5631, 0x974c541a, 0x359e9963, + 0x5c971061, 0xccc07c79, 0x2098d9d6, 0x91dbd320 }; + qround(y, 2, 7, 8, 13); + mi_assert_internal(array_equals(y, y_out, 16)); + + mi_random_ctx_t r = { + { 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574, + 0x03020100, 0x07060504, 0x0b0a0908, 0x0f0e0d0c, + 0x13121110, 0x17161514, 0x1b1a1918, 0x1f1e1d1c, + 0x00000001, 0x09000000, 0x4a000000, 0x00000000 }, + {0}, + 0 + }; + uint32_t r_out[16] = { + 0xe4e7f110, 0x15593bd1, 0x1fdd0f50, 0xc47120a3, + 0xc7f4d1c7, 0x0368c033, 0x9aaa2204, 0x4e6cd4c3, + 0x466482d2, 0x09aa9f07, 0x05d7c214, 0xa2028bd9, + 0xd19c12b5, 0xb94e16de, 0xe883d0cb, 0x4e3c50a2 }; + chacha_block(&r); + mi_assert_internal(array_equals(r.output, r_out, 16)); +} +*/ diff --git a/yass/third_party/mimalloc/src/segment-map.c b/yass/third_party/mimalloc/src/segment-map.c new file mode 100644 index 0000000000..1efb1e2360 --- /dev/null +++ b/yass/third_party/mimalloc/src/segment-map.c @@ -0,0 +1,155 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ----------------------------------------------------------- + The following functions are to reliably find the segment or + block that encompasses any pointer p (or NULL if it is not + in any of our segments). + We maintain a bitmap of all memory with 1 bit per MI_SEGMENT_SIZE (64MiB) + set to 1 if it contains the segment meta data. +----------------------------------------------------------- */ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" + +#if (MI_INTPTR_SIZE>=8) && MI_TRACK_ASAN +#define MI_MAX_ADDRESS ((size_t)140 << 40) // 140TB (see issue #881) +#elif (MI_INTPTR_SIZE >= 8) +#define MI_MAX_ADDRESS ((size_t)40 << 40) // 40TB (to include huge page areas) +#else +#define MI_MAX_ADDRESS ((size_t)2 << 30) // 2Gb +#endif + +#define MI_SEGMENT_MAP_BITS (MI_MAX_ADDRESS / MI_SEGMENT_SIZE) +#define MI_SEGMENT_MAP_SIZE (MI_SEGMENT_MAP_BITS / 8) +#define MI_SEGMENT_MAP_WSIZE (MI_SEGMENT_MAP_SIZE / MI_INTPTR_SIZE) + +static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments + +static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) { + // note: segment can be invalid or NULL. + mi_assert_internal(_mi_ptr_segment(segment + 1) == segment); // is it aligned on MI_SEGMENT_SIZE? + if ((uintptr_t)segment >= MI_MAX_ADDRESS) { + *bitidx = 0; + return MI_SEGMENT_MAP_WSIZE; + } + else { + const uintptr_t segindex = ((uintptr_t)segment) / MI_SEGMENT_SIZE; + *bitidx = segindex % MI_INTPTR_BITS; + const size_t mapindex = segindex / MI_INTPTR_BITS; + mi_assert_internal(mapindex < MI_SEGMENT_MAP_WSIZE); + return mapindex; + } +} + +void _mi_segment_map_allocated_at(const mi_segment_t* segment) { + size_t bitidx; + size_t index = mi_segment_map_index_of(segment, &bitidx); + mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE); + if (index==MI_SEGMENT_MAP_WSIZE) return; + uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); + uintptr_t newmask; + do { + newmask = (mask | ((uintptr_t)1 << bitidx)); + } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask)); +} + +void _mi_segment_map_freed_at(const mi_segment_t* segment) { + size_t bitidx; + size_t index = mi_segment_map_index_of(segment, &bitidx); + mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE); + if (index == MI_SEGMENT_MAP_WSIZE) return; + uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); + uintptr_t newmask; + do { + newmask = (mask & ~((uintptr_t)1 << bitidx)); + } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask)); +} + +// Determine the segment belonging to a pointer or NULL if it is not in a valid segment. +static mi_segment_t* _mi_segment_of(const void* p) { + if (p == NULL) return NULL; + mi_segment_t* segment = _mi_ptr_segment(p); // segment can be NULL + size_t bitidx; + size_t index = mi_segment_map_index_of(segment, &bitidx); + // fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge + const uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); + if mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0) { + return segment; // yes, allocated by us + } + if (index==MI_SEGMENT_MAP_WSIZE) return NULL; + + // TODO: maintain max/min allocated range for efficiency for more efficient rejection of invalid pointers? + + // search downwards for the first segment in case it is an interior pointer + // could be slow but searches in MI_INTPTR_SIZE * MI_SEGMENT_SIZE (512MiB) steps trough + // valid huge objects + // note: we could maintain a lowest index to speed up the path for invalid pointers? + size_t lobitidx; + size_t loindex; + uintptr_t lobits = mask & (((uintptr_t)1 << bitidx) - 1); + if (lobits != 0) { + loindex = index; + lobitidx = mi_bsr(lobits); // lobits != 0 + } + else if (index == 0) { + return NULL; + } + else { + mi_assert_internal(index > 0); + uintptr_t lomask = mask; + loindex = index; + do { + loindex--; + lomask = mi_atomic_load_relaxed(&mi_segment_map[loindex]); + } while (lomask != 0 && loindex > 0); + if (lomask == 0) return NULL; + lobitidx = mi_bsr(lomask); // lomask != 0 + } + mi_assert_internal(loindex < MI_SEGMENT_MAP_WSIZE); + // take difference as the addresses could be larger than the MAX_ADDRESS space. + size_t diff = (((index - loindex) * (8*MI_INTPTR_SIZE)) + bitidx - lobitidx) * MI_SEGMENT_SIZE; + segment = (mi_segment_t*)((uint8_t*)segment - diff); + + if (segment == NULL) return NULL; + mi_assert_internal((void*)segment < p); + bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie); + mi_assert_internal(cookie_ok); + if mi_unlikely(!cookie_ok) return NULL; + if (((uint8_t*)segment + mi_segment_size(segment)) <= (uint8_t*)p) return NULL; // outside the range + mi_assert_internal(p >= (void*)segment && (uint8_t*)p < (uint8_t*)segment + mi_segment_size(segment)); + return segment; +} + +// Is this a valid pointer in our heap? +static bool mi_is_valid_pointer(const void* p) { + return ((_mi_segment_of(p) != NULL) || (_mi_arena_contains(p))); +} + +mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept { + return mi_is_valid_pointer(p); +} + +/* +// Return the full segment range belonging to a pointer +static void* mi_segment_range_of(const void* p, size_t* size) { + mi_segment_t* segment = _mi_segment_of(p); + if (segment == NULL) { + if (size != NULL) *size = 0; + return NULL; + } + else { + if (size != NULL) *size = segment->segment_size; + return segment; + } + mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); + mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size); + mi_reset_delayed(tld); + mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld)); + return page; +} +*/ diff --git a/yass/third_party/mimalloc/src/segment.c b/yass/third_party/mimalloc/src/segment.c new file mode 100644 index 0000000000..4e4dcb80ee --- /dev/null +++ b/yass/third_party/mimalloc/src/segment.c @@ -0,0 +1,1524 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" + +#include // memset +#include + +// ------------------------------------------------------------------- +// Segments +// mimalloc pages reside in segments. See `mi_segment_valid` for invariants. +// ------------------------------------------------------------------- + + +static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats); + + +// ------------------------------------------------------------------- +// commit mask +// ------------------------------------------------------------------- + +static bool mi_commit_mask_all_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + if ((commit->mask[i] & cm->mask[i]) != cm->mask[i]) return false; + } + return true; +} + +static bool mi_commit_mask_any_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + if ((commit->mask[i] & cm->mask[i]) != 0) return true; + } + return false; +} + +static void mi_commit_mask_create_intersect(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm, mi_commit_mask_t* res) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + res->mask[i] = (commit->mask[i] & cm->mask[i]); + } +} + +static void mi_commit_mask_clear(mi_commit_mask_t* res, const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + res->mask[i] &= ~(cm->mask[i]); + } +} + +static void mi_commit_mask_set(mi_commit_mask_t* res, const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + res->mask[i] |= cm->mask[i]; + } +} + +static void mi_commit_mask_create(size_t bitidx, size_t bitcount, mi_commit_mask_t* cm) { + mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS); + mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS); + if (bitcount == MI_COMMIT_MASK_BITS) { + mi_assert_internal(bitidx==0); + mi_commit_mask_create_full(cm); + } + else if (bitcount == 0) { + mi_commit_mask_create_empty(cm); + } + else { + mi_commit_mask_create_empty(cm); + size_t i = bitidx / MI_COMMIT_MASK_FIELD_BITS; + size_t ofs = bitidx % MI_COMMIT_MASK_FIELD_BITS; + while (bitcount > 0) { + mi_assert_internal(i < MI_COMMIT_MASK_FIELD_COUNT); + size_t avail = MI_COMMIT_MASK_FIELD_BITS - ofs; + size_t count = (bitcount > avail ? avail : bitcount); + size_t mask = (count >= MI_COMMIT_MASK_FIELD_BITS ? ~((size_t)0) : (((size_t)1 << count) - 1) << ofs); + cm->mask[i] = mask; + bitcount -= count; + ofs = 0; + i++; + } + } +} + +size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total) { + mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0); + size_t count = 0; + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + size_t mask = cm->mask[i]; + if (~mask == 0) { + count += MI_COMMIT_MASK_FIELD_BITS; + } + else { + for (; mask != 0; mask >>= 1) { // todo: use popcount + if ((mask&1)!=0) count++; + } + } + } + // we use total since for huge segments each commit bit may represent a larger size + return ((total / MI_COMMIT_MASK_BITS) * count); +} + + +size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx) { + size_t i = (*idx) / MI_COMMIT_MASK_FIELD_BITS; + size_t ofs = (*idx) % MI_COMMIT_MASK_FIELD_BITS; + size_t mask = 0; + // find first ones + while (i < MI_COMMIT_MASK_FIELD_COUNT) { + mask = cm->mask[i]; + mask >>= ofs; + if (mask != 0) { + while ((mask&1) == 0) { + mask >>= 1; + ofs++; + } + break; + } + i++; + ofs = 0; + } + if (i >= MI_COMMIT_MASK_FIELD_COUNT) { + // not found + *idx = MI_COMMIT_MASK_BITS; + return 0; + } + else { + // found, count ones + size_t count = 0; + *idx = (i*MI_COMMIT_MASK_FIELD_BITS) + ofs; + do { + mi_assert_internal(ofs < MI_COMMIT_MASK_FIELD_BITS && (mask&1) == 1); + do { + count++; + mask >>= 1; + } while ((mask&1) == 1); + if ((((*idx + count) % MI_COMMIT_MASK_FIELD_BITS) == 0)) { + i++; + if (i >= MI_COMMIT_MASK_FIELD_COUNT) break; + mask = cm->mask[i]; + ofs = 0; + } + } while ((mask&1) == 1); + mi_assert_internal(count > 0); + return count; + } +} + + +/* -------------------------------------------------------------------------------- + Segment allocation +-------------------------------------------------------------------------------- */ + + +/* ----------------------------------------------------------- + Slices +----------------------------------------------------------- */ + + +static const mi_slice_t* mi_segment_slices_end(const mi_segment_t* segment) { + return &segment->slices[segment->slice_entries]; +} + +static uint8_t* mi_slice_start(const mi_slice_t* slice) { + mi_segment_t* segment = _mi_ptr_segment(slice); + mi_assert_internal(slice >= segment->slices && slice < mi_segment_slices_end(segment)); + return ((uint8_t*)segment + ((slice - segment->slices)*MI_SEGMENT_SLICE_SIZE)); +} + + +/* ----------------------------------------------------------- + Bins +----------------------------------------------------------- */ +// Use bit scan forward to quickly find the first zero bit if it is available + +static inline size_t mi_slice_bin8(size_t slice_count) { + if (slice_count<=1) return slice_count; + mi_assert_internal(slice_count <= MI_SLICES_PER_SEGMENT); + slice_count--; + size_t s = mi_bsr(slice_count); // slice_count > 1 + if (s <= 2) return slice_count + 1; + size_t bin = ((s << 2) | ((slice_count >> (s - 2))&0x03)) - 4; + return bin; +} + +static inline size_t mi_slice_bin(size_t slice_count) { + mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_SEGMENT_SIZE); + mi_assert_internal(mi_slice_bin8(MI_SLICES_PER_SEGMENT) <= MI_SEGMENT_BIN_MAX); + size_t bin = mi_slice_bin8(slice_count); + mi_assert_internal(bin <= MI_SEGMENT_BIN_MAX); + return bin; +} + +static inline size_t mi_slice_index(const mi_slice_t* slice) { + mi_segment_t* segment = _mi_ptr_segment(slice); + ptrdiff_t index = slice - segment->slices; + mi_assert_internal(index >= 0 && index < (ptrdiff_t)segment->slice_entries); + return index; +} + + +/* ----------------------------------------------------------- + Slice span queues +----------------------------------------------------------- */ + +static void mi_span_queue_push(mi_span_queue_t* sq, mi_slice_t* slice) { + // todo: or push to the end? + mi_assert_internal(slice->prev == NULL && slice->next==NULL); + slice->prev = NULL; // paranoia + slice->next = sq->first; + sq->first = slice; + if (slice->next != NULL) slice->next->prev = slice; + else sq->last = slice; + slice->block_size = 0; // free +} + +static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t* tld) { + size_t bin = mi_slice_bin(slice_count); + mi_span_queue_t* sq = &tld->spans[bin]; + mi_assert_internal(sq->slice_count >= slice_count); + return sq; +} + +static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) { + mi_assert_internal(slice->block_size==0 && slice->slice_count>0 && slice->slice_offset==0); + // should work too if the queue does not contain slice (which can happen during reclaim) + if (slice->prev != NULL) slice->prev->next = slice->next; + if (slice == sq->first) sq->first = slice->next; + if (slice->next != NULL) slice->next->prev = slice->prev; + if (slice == sq->last) sq->last = slice->prev; + slice->prev = NULL; + slice->next = NULL; + slice->block_size = 1; // no more free +} + + +/* ----------------------------------------------------------- + Invariant checking +----------------------------------------------------------- */ + +static bool mi_slice_is_used(const mi_slice_t* slice) { + return (slice->block_size > 0); +} + + +#if (MI_DEBUG>=3) +static bool mi_span_queue_contains(mi_span_queue_t* sq, mi_slice_t* slice) { + for (mi_slice_t* s = sq->first; s != NULL; s = s->next) { + if (s==slice) return true; + } + return false; +} + +static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) { + mi_assert_internal(segment != NULL); + mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); + mi_assert_internal(segment->abandoned <= segment->used); + mi_assert_internal(segment->thread_id == 0 || segment->thread_id == _mi_thread_id()); + mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); // can only decommit committed blocks + //mi_assert_internal(segment->segment_info_size % MI_SEGMENT_SLICE_SIZE == 0); + mi_slice_t* slice = &segment->slices[0]; + const mi_slice_t* end = mi_segment_slices_end(segment); + size_t used_count = 0; + mi_span_queue_t* sq; + while(slice < end) { + mi_assert_internal(slice->slice_count > 0); + mi_assert_internal(slice->slice_offset == 0); + size_t index = mi_slice_index(slice); + size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1; + if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET_COUNT valid back offsets + used_count++; + mi_assert_internal(slice->is_huge == (segment->kind == MI_SEGMENT_HUGE)); + for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET_COUNT && index + i <= maxindex; i++) { + mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t)); + mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0); + mi_assert_internal(i==0 || segment->slices[index + i].block_size == 1); + } + // and the last entry as well (for coalescing) + const mi_slice_t* last = slice + slice->slice_count - 1; + if (last > slice && last < mi_segment_slices_end(segment)) { + mi_assert_internal(last->slice_offset == (slice->slice_count-1)*sizeof(mi_slice_t)); + mi_assert_internal(last->slice_count == 0); + mi_assert_internal(last->block_size == 1); + } + } + else { // free range of slices; only last slice needs a valid back offset + mi_slice_t* last = &segment->slices[maxindex]; + if (segment->kind != MI_SEGMENT_HUGE || slice->slice_count <= (segment->slice_entries - segment->segment_info_slices)) { + mi_assert_internal((uint8_t*)slice == (uint8_t*)last - last->slice_offset); + } + mi_assert_internal(slice == last || last->slice_count == 0 ); + mi_assert_internal(last->block_size == 0 || (segment->kind==MI_SEGMENT_HUGE && last->block_size==1)); + if (segment->kind != MI_SEGMENT_HUGE && segment->thread_id != 0) { // segment is not huge or abandoned + sq = mi_span_queue_for(slice->slice_count,tld); + mi_assert_internal(mi_span_queue_contains(sq,slice)); + } + } + slice = &segment->slices[maxindex+1]; + } + mi_assert_internal(slice == end); + mi_assert_internal(used_count == segment->used + 1); + return true; +} +#endif + +/* ----------------------------------------------------------- + Segment size calculations +----------------------------------------------------------- */ + +static size_t mi_segment_info_size(mi_segment_t* segment) { + return segment->segment_info_slices * MI_SEGMENT_SLICE_SIZE; +} + +static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, const mi_slice_t* slice, size_t block_size, size_t* page_size) +{ + const ptrdiff_t idx = slice - segment->slices; + const size_t psize = (size_t)slice->slice_count * MI_SEGMENT_SLICE_SIZE; + uint8_t* const pstart = (uint8_t*)segment + (idx*MI_SEGMENT_SLICE_SIZE); + // make the start not OS page aligned for smaller blocks to avoid page/cache effects + // note: the offset must always be a block_size multiple since we assume small allocations + // are aligned (see `mi_heap_malloc_aligned`). + size_t start_offset = 0; + if (block_size > 0 && block_size <= MI_MAX_ALIGN_GUARANTEE) { + // for small objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore) + const size_t adjust = block_size - ((uintptr_t)pstart % block_size); + if (adjust < block_size && psize >= block_size + adjust) { + start_offset += adjust; + } + } + if (block_size >= MI_INTPTR_SIZE) { + if (block_size <= 64) { start_offset += 3*block_size; } + else if (block_size <= 512) { start_offset += block_size; } + } + if (page_size != NULL) { *page_size = psize - start_offset; } + return (pstart + start_offset); +} + +// Start of the page available memory; can be used on uninitialized pages +uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) +{ + const mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page); + uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, mi_page_block_size(page), page_size); + mi_assert_internal(mi_page_block_size(page) > 0 || _mi_ptr_page(p) == page); + mi_assert_internal(_mi_ptr_segment(p) == segment); + return p; +} + + +static size_t mi_segment_calculate_slices(size_t required, size_t* info_slices) { + size_t page_size = _mi_os_page_size(); + size_t isize = _mi_align_up(sizeof(mi_segment_t), page_size); + size_t guardsize = 0; + + if (MI_SECURE>0) { + // in secure mode, we set up a protected page in between the segment info + // and the page data (and one at the end of the segment) + guardsize = page_size; + if (required > 0) { + required = _mi_align_up(required, MI_SEGMENT_SLICE_SIZE) + page_size; + } + } + + isize = _mi_align_up(isize + guardsize, MI_SEGMENT_SLICE_SIZE); + if (info_slices != NULL) *info_slices = isize / MI_SEGMENT_SLICE_SIZE; + size_t segment_size = (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + guardsize, MI_SEGMENT_SLICE_SIZE) ); + mi_assert_internal(segment_size % MI_SEGMENT_SLICE_SIZE == 0); + return (segment_size / MI_SEGMENT_SLICE_SIZE); +} + + +/* ---------------------------------------------------------------------------- +Segment caches +We keep a small segment cache per thread to increase local +reuse and avoid setting/clearing guard pages in secure mode. +------------------------------------------------------------------------------- */ + +static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) { + if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1); + else _mi_stat_decrease(&tld->stats->segments,1); + tld->count += (segment_size >= 0 ? 1 : -1); + if (tld->count > tld->peak_count) tld->peak_count = tld->count; + tld->current_size += segment_size; + if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size; +} + +static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) { + segment->thread_id = 0; + _mi_segment_map_freed_at(segment); + mi_segments_track_size(-((long)mi_segment_size(segment)),tld); + if (segment->was_reclaimed) { + tld->reclaim_count--; + segment->was_reclaimed = false; + } + if (MI_SECURE>0) { + // _mi_os_unprotect(segment, mi_segment_size(segment)); // ensure no more guard pages are set + // unprotect the guard pages; we cannot just unprotect the whole segment size as part may be decommitted + size_t os_pagesize = _mi_os_page_size(); + _mi_os_unprotect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize); + uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize; + _mi_os_unprotect(end, os_pagesize); + } + + // purge delayed decommits now? (no, leave it to the arena) + // mi_segment_try_purge(segment,true,tld->stats); + + const size_t size = mi_segment_size(segment); + const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size); + + _mi_abandoned_await_readers(); // wait until safe to free + _mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid, tld->stats); +} + +/* ----------------------------------------------------------- + Commit/Decommit ranges +----------------------------------------------------------- */ + +static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uint8_t* p, size_t size, uint8_t** start_p, size_t* full_size, mi_commit_mask_t* cm) { + mi_assert_internal(_mi_ptr_segment(p + 1) == segment); + mi_assert_internal(segment->kind != MI_SEGMENT_HUGE); + mi_commit_mask_create_empty(cm); + if (size == 0 || size > MI_SEGMENT_SIZE || segment->kind == MI_SEGMENT_HUGE) return; + const size_t segstart = mi_segment_info_size(segment); + const size_t segsize = mi_segment_size(segment); + if (p >= (uint8_t*)segment + segsize) return; + + size_t pstart = (p - (uint8_t*)segment); + mi_assert_internal(pstart + size <= segsize); + + size_t start; + size_t end; + if (conservative) { + // decommit conservative + start = _mi_align_up(pstart, MI_COMMIT_SIZE); + end = _mi_align_down(pstart + size, MI_COMMIT_SIZE); + mi_assert_internal(start >= segstart); + mi_assert_internal(end <= segsize); + } + else { + // commit liberal + start = _mi_align_down(pstart, MI_MINIMAL_COMMIT_SIZE); + end = _mi_align_up(pstart + size, MI_MINIMAL_COMMIT_SIZE); + } + if (pstart >= segstart && start < segstart) { // note: the mask is also calculated for an initial commit of the info area + start = segstart; + } + if (end > segsize) { + end = segsize; + } + + mi_assert_internal(start <= pstart && (pstart + size) <= end); + mi_assert_internal(start % MI_COMMIT_SIZE==0 && end % MI_COMMIT_SIZE == 0); + *start_p = (uint8_t*)segment + start; + *full_size = (end > start ? end - start : 0); + if (*full_size == 0) return; + + size_t bitidx = start / MI_COMMIT_SIZE; + mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS); + + size_t bitcount = *full_size / MI_COMMIT_SIZE; // can be 0 + if (bitidx + bitcount > MI_COMMIT_MASK_BITS) { + _mi_warning_message("commit mask overflow: idx=%zu count=%zu start=%zx end=%zx p=0x%p size=%zu fullsize=%zu\n", bitidx, bitcount, start, end, p, size, *full_size); + } + mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS); + mi_commit_mask_create(bitidx, bitcount, cm); +} + +static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { + mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); + + // commit liberal + uint8_t* start = NULL; + size_t full_size = 0; + mi_commit_mask_t mask; + mi_segment_commit_mask(segment, false /* conservative? */, p, size, &start, &full_size, &mask); + if (mi_commit_mask_is_empty(&mask) || full_size == 0) return true; + + if (!mi_commit_mask_all_set(&segment->commit_mask, &mask)) { + // committing + bool is_zero = false; + mi_commit_mask_t cmask; + mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); + _mi_stat_decrease(&_mi_stats_main.committed, _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap + if (!_mi_os_commit(start, full_size, &is_zero, stats)) return false; + mi_commit_mask_set(&segment->commit_mask, &mask); + } + + // increase purge expiration when using part of delayed purges -- we assume more allocations are coming soon. + if (mi_commit_mask_any_set(&segment->purge_mask, &mask)) { + segment->purge_expire = _mi_clock_now() + mi_option_get(mi_option_purge_delay); + } + + // always clear any delayed purges in our range (as they are either committed now) + mi_commit_mask_clear(&segment->purge_mask, &mask); + return true; +} + +static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { + mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); + // note: assumes commit_mask is always full for huge segments as otherwise the commit mask bits can overflow + if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->purge_mask)) return true; // fully committed + mi_assert_internal(segment->kind != MI_SEGMENT_HUGE); + return mi_segment_commit(segment, p, size, stats); +} + +static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { + mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); + if (!segment->allow_purge) return true; + + // purge conservative + uint8_t* start = NULL; + size_t full_size = 0; + mi_commit_mask_t mask; + mi_segment_commit_mask(segment, true /* conservative? */, p, size, &start, &full_size, &mask); + if (mi_commit_mask_is_empty(&mask) || full_size==0) return true; + + if (mi_commit_mask_any_set(&segment->commit_mask, &mask)) { + // purging + mi_assert_internal((void*)start != (void*)segment); + mi_assert_internal(segment->allow_decommit); + const bool decommitted = _mi_os_purge(start, full_size, stats); // reset or decommit + if (decommitted) { + mi_commit_mask_t cmask; + mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); + _mi_stat_increase(&_mi_stats_main.committed, full_size - _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for double counting + mi_commit_mask_clear(&segment->commit_mask, &mask); + } + } + + // always clear any scheduled purges in our range + mi_commit_mask_clear(&segment->purge_mask, &mask); + return true; +} + +static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { + if (!segment->allow_purge) return; + + if (mi_option_get(mi_option_purge_delay) == 0) { + mi_segment_purge(segment, p, size, stats); + } + else { + // register for future purge in the purge mask + uint8_t* start = NULL; + size_t full_size = 0; + mi_commit_mask_t mask; + mi_segment_commit_mask(segment, true /*conservative*/, p, size, &start, &full_size, &mask); + if (mi_commit_mask_is_empty(&mask) || full_size==0) return; + + // update delayed commit + mi_assert_internal(segment->purge_expire > 0 || mi_commit_mask_is_empty(&segment->purge_mask)); + mi_commit_mask_t cmask; + mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only purge what is committed; span_free may try to decommit more + mi_commit_mask_set(&segment->purge_mask, &cmask); + mi_msecs_t now = _mi_clock_now(); + if (segment->purge_expire == 0) { + // no previous purgess, initialize now + segment->purge_expire = now + mi_option_get(mi_option_purge_delay); + } + else if (segment->purge_expire <= now) { + // previous purge mask already expired + if (segment->purge_expire + mi_option_get(mi_option_purge_extend_delay) <= now) { + mi_segment_try_purge(segment, true, stats); + } + else { + segment->purge_expire = now + mi_option_get(mi_option_purge_extend_delay); // (mi_option_get(mi_option_purge_delay) / 8); // wait a tiny bit longer in case there is a series of free's + } + } + else { + // previous purge mask is not yet expired, increase the expiration by a bit. + segment->purge_expire += mi_option_get(mi_option_purge_extend_delay); + } + } +} + +static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats) { + if (!segment->allow_purge || segment->purge_expire == 0 || mi_commit_mask_is_empty(&segment->purge_mask)) return; + mi_msecs_t now = _mi_clock_now(); + if (!force && now < segment->purge_expire) return; + + mi_commit_mask_t mask = segment->purge_mask; + segment->purge_expire = 0; + mi_commit_mask_create_empty(&segment->purge_mask); + + size_t idx; + size_t count; + mi_commit_mask_foreach(&mask, idx, count) { + // if found, decommit that sequence + if (count > 0) { + uint8_t* p = (uint8_t*)segment + (idx*MI_COMMIT_SIZE); + size_t size = count * MI_COMMIT_SIZE; + mi_segment_purge(segment, p, size, stats); + } + } + mi_commit_mask_foreach_end() + mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask)); +} + +// called from `mi_heap_collect_ex` +// this can be called per-page so it is important that try_purge has fast exit path +void _mi_segment_collect(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) { + mi_segment_try_purge(segment, force, tld->stats); +} + +/* ----------------------------------------------------------- + Span free +----------------------------------------------------------- */ + +static bool mi_segment_is_abandoned(mi_segment_t* segment) { + return (mi_atomic_load_relaxed(&segment->thread_id) == 0); +} + +// note: can be called on abandoned segments +static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size_t slice_count, bool allow_purge, mi_segments_tld_t* tld) { + mi_assert_internal(slice_index < segment->slice_entries); + mi_span_queue_t* sq = (segment->kind == MI_SEGMENT_HUGE || mi_segment_is_abandoned(segment) + ? NULL : mi_span_queue_for(slice_count,tld)); + if (slice_count==0) slice_count = 1; + mi_assert_internal(slice_index + slice_count - 1 < segment->slice_entries); + + // set first and last slice (the intermediates can be undetermined) + mi_slice_t* slice = &segment->slices[slice_index]; + slice->slice_count = (uint32_t)slice_count; + mi_assert_internal(slice->slice_count == slice_count); // no overflow? + slice->slice_offset = 0; + if (slice_count > 1) { + mi_slice_t* last = slice + slice_count - 1; + mi_slice_t* end = (mi_slice_t*)mi_segment_slices_end(segment); + if (last > end) { last = end; } + last->slice_count = 0; + last->slice_offset = (uint32_t)(sizeof(mi_page_t)*(slice_count - 1)); + last->block_size = 0; + } + + // perhaps decommit + if (allow_purge) { + mi_segment_schedule_purge(segment, mi_slice_start(slice), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats); + } + + // and push it on the free page queue (if it was not a huge page) + if (sq != NULL) mi_span_queue_push( sq, slice ); + else slice->block_size = 0; // mark huge page as free anyways +} + +/* +// called from reclaim to add existing free spans +static void mi_segment_span_add_free(mi_slice_t* slice, mi_segments_tld_t* tld) { + mi_segment_t* segment = _mi_ptr_segment(slice); + mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0); + size_t slice_index = mi_slice_index(slice); + mi_segment_span_free(segment,slice_index,slice->slice_count,tld); +} +*/ + +static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld_t* tld) { + mi_assert_internal(slice->slice_count > 0 && slice->slice_offset==0 && slice->block_size==0); + mi_assert_internal(_mi_ptr_segment(slice)->kind != MI_SEGMENT_HUGE); + mi_span_queue_t* sq = mi_span_queue_for(slice->slice_count, tld); + mi_span_queue_delete(sq, slice); +} + +// note: can be called on abandoned segments +static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_tld_t* tld) { + mi_assert_internal(slice != NULL && slice->slice_count > 0 && slice->slice_offset == 0); + mi_segment_t* const segment = _mi_ptr_segment(slice); + const bool is_abandoned = (segment->thread_id == 0); // mi_segment_is_abandoned(segment); + + // for huge pages, just mark as free but don't add to the queues + if (segment->kind == MI_SEGMENT_HUGE) { + // issue #691: segment->used can be 0 if the huge page block was freed while abandoned (reclaim will get here in that case) + mi_assert_internal((segment->used==0 && slice->block_size==0) || segment->used == 1); // decreased right after this call in `mi_segment_page_clear` + slice->block_size = 0; // mark as free anyways + // we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to + // avoid a possible cache miss (and the segment is about to be freed) + return slice; + } + + // otherwise coalesce the span and add to the free span queues + size_t slice_count = slice->slice_count; + mi_slice_t* next = slice + slice->slice_count; + mi_assert_internal(next <= mi_segment_slices_end(segment)); + if (next < mi_segment_slices_end(segment) && next->block_size==0) { + // free next block -- remove it from free and merge + mi_assert_internal(next->slice_count > 0 && next->slice_offset==0); + slice_count += next->slice_count; // extend + if (!is_abandoned) { mi_segment_span_remove_from_queue(next, tld); } + } + if (slice > segment->slices) { + mi_slice_t* prev = mi_slice_first(slice - 1); + mi_assert_internal(prev >= segment->slices); + if (prev->block_size==0) { + // free previous slice -- remove it from free and merge + mi_assert_internal(prev->slice_count > 0 && prev->slice_offset==0); + slice_count += prev->slice_count; + if (!is_abandoned) { mi_segment_span_remove_from_queue(prev, tld); } + slice = prev; + } + } + + // and add the new free page + mi_segment_span_free(segment, mi_slice_index(slice), slice_count, true, tld); + return slice; +} + + + +/* ----------------------------------------------------------- + Page allocation +----------------------------------------------------------- */ + +// Note: may still return NULL if committing the memory failed +static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) { + mi_assert_internal(slice_index < segment->slice_entries); + mi_slice_t* const slice = &segment->slices[slice_index]; + mi_assert_internal(slice->block_size==0 || slice->block_size==1); + + // commit before changing the slice data + if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats)) { + return NULL; // commit failed! + } + + // convert the slices to a page + slice->slice_offset = 0; + slice->slice_count = (uint32_t)slice_count; + mi_assert_internal(slice->slice_count == slice_count); + const size_t bsize = slice_count * MI_SEGMENT_SLICE_SIZE; + slice->block_size = bsize; + mi_page_t* page = mi_slice_to_page(slice); + mi_assert_internal(mi_page_block_size(page) == bsize); + + // set slice back pointers for the first MI_MAX_SLICE_OFFSET_COUNT entries + size_t extra = slice_count-1; + if (extra > MI_MAX_SLICE_OFFSET_COUNT) extra = MI_MAX_SLICE_OFFSET_COUNT; + if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices + + mi_slice_t* slice_next = slice + 1; + for (size_t i = 1; i <= extra; i++, slice_next++) { + slice_next->slice_offset = (uint32_t)(sizeof(mi_slice_t)*i); + slice_next->slice_count = 0; + slice_next->block_size = 1; + } + + // and also for the last one (if not set already) (the last one is needed for coalescing and for large alignments) + // note: the cast is needed for ubsan since the index can be larger than MI_SLICES_PER_SEGMENT for huge allocations (see #543) + mi_slice_t* last = slice + slice_count - 1; + mi_slice_t* end = (mi_slice_t*)mi_segment_slices_end(segment); + if (last > end) last = end; + if (last > slice) { + last->slice_offset = (uint32_t)(sizeof(mi_slice_t) * (last - slice)); + last->slice_count = 0; + last->block_size = 1; + } + + // and initialize the page + page->is_committed = true; + page->is_huge = (segment->kind == MI_SEGMENT_HUGE); + segment->used++; + return page; +} + +static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, size_t slice_count, mi_segments_tld_t* tld) { + mi_assert_internal(_mi_ptr_segment(slice) == segment); + mi_assert_internal(slice->slice_count >= slice_count); + mi_assert_internal(slice->block_size > 0); // no more in free queue + if (slice->slice_count <= slice_count) return; + mi_assert_internal(segment->kind != MI_SEGMENT_HUGE); + size_t next_index = mi_slice_index(slice) + slice_count; + size_t next_count = slice->slice_count - slice_count; + mi_segment_span_free(segment, next_index, next_count, false /* don't purge left-over part */, tld); + slice->slice_count = (uint32_t)slice_count; +} + +static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld) { + mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_LARGE_OBJ_SIZE_MAX); + // search from best fit up + mi_span_queue_t* sq = mi_span_queue_for(slice_count, tld); + if (slice_count == 0) slice_count = 1; + while (sq <= &tld->spans[MI_SEGMENT_BIN_MAX]) { + for (mi_slice_t* slice = sq->first; slice != NULL; slice = slice->next) { + if (slice->slice_count >= slice_count) { + // found one + mi_segment_t* segment = _mi_ptr_segment(slice); + if (_mi_arena_memid_is_suitable(segment->memid, req_arena_id)) { + // found a suitable page span + mi_span_queue_delete(sq, slice); + + if (slice->slice_count > slice_count) { + mi_segment_slice_split(segment, slice, slice_count, tld); + } + mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->block_size > 0); + mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld); + if (page == NULL) { + // commit failed; return NULL but first restore the slice + mi_segment_span_free_coalesce(slice, tld); + return NULL; + } + return page; + } + } + } + sq++; + } + // could not find a page.. + return NULL; +} + + +/* ----------------------------------------------------------- + Segment allocation +----------------------------------------------------------- */ + +static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delayed, mi_arena_id_t req_arena_id, + size_t* psegment_slices, size_t* pinfo_slices, + bool commit, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) + +{ + mi_memid_t memid; + bool allow_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy + size_t align_offset = 0; + size_t alignment = MI_SEGMENT_ALIGN; + + if (page_alignment > 0) { + // mi_assert_internal(huge_page != NULL); + mi_assert_internal(page_alignment >= MI_SEGMENT_ALIGN); + alignment = page_alignment; + const size_t info_size = (*pinfo_slices) * MI_SEGMENT_SLICE_SIZE; + align_offset = _mi_align_up( info_size, MI_SEGMENT_ALIGN ); + const size_t extra = align_offset - info_size; + // recalculate due to potential guard pages + *psegment_slices = mi_segment_calculate_slices(required + extra, pinfo_slices); + mi_assert_internal(*psegment_slices > 0 && *psegment_slices <= UINT32_MAX); + } + + const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE; + mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid, os_tld); + if (segment == NULL) { + return NULL; // failed to allocate + } + + // ensure metadata part of the segment is committed + mi_commit_mask_t commit_mask; + if (memid.initially_committed) { + mi_commit_mask_create_full(&commit_mask); + } + else { + // at least commit the info slices + const size_t commit_needed = _mi_divide_up((*pinfo_slices)*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE); + mi_assert_internal(commit_needed>0); + mi_commit_mask_create(0, commit_needed, &commit_mask); + mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= (*pinfo_slices)*MI_SEGMENT_SLICE_SIZE); + if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL, tld->stats)) { + _mi_arena_free(segment,segment_size,0,memid,tld->stats); + return NULL; + } + } + mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0); + + segment->memid = memid; + segment->allow_decommit = !memid.is_pinned; + segment->allow_purge = segment->allow_decommit && (mi_option_get(mi_option_purge_delay) >= 0); + segment->segment_size = segment_size; + segment->commit_mask = commit_mask; + segment->purge_expire = 0; + mi_commit_mask_create_empty(&segment->purge_mask); + + mi_segments_track_size((long)(segment_size), tld); + _mi_segment_map_allocated_at(segment); + return segment; +} + + +// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` . +static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) +{ + mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL)); + + // calculate needed sizes first + size_t info_slices; + size_t segment_slices = mi_segment_calculate_slices(required, &info_slices); + mi_assert_internal(segment_slices > 0 && segment_slices <= UINT32_MAX); + + // Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little) + const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems + _mi_current_thread_count() > 1 && // do not delay for the first N threads + tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay)); + const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit); + bool commit = eager || (required > 0); + + // Allocate the segment from the OS + mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id, + &segment_slices, &info_slices, commit, tld, os_tld); + if (segment == NULL) return NULL; + + // zero the segment info? -- not always needed as it may be zero initialized from the OS + if (!segment->memid.initially_zero) { + ptrdiff_t ofs = offsetof(mi_segment_t, next); + size_t prefix = offsetof(mi_segment_t, slices) - ofs; + size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more + _mi_memzero((uint8_t*)segment + ofs, zsize); + } + + // initialize the rest of the segment info + const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices); + segment->segment_slices = segment_slices; + segment->segment_info_slices = info_slices; + segment->thread_id = _mi_thread_id(); + segment->cookie = _mi_ptr_cookie(segment); + segment->slice_entries = slice_entries; + segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE); + + // _mi_memzero(segment->slices, sizeof(mi_slice_t)*(info_slices+1)); + _mi_stat_increase(&tld->stats->page_committed, mi_segment_info_size(segment)); + + // set up guard pages + size_t guard_slices = 0; + if (MI_SECURE>0) { + // in secure mode, we set up a protected page in between the segment info + // and the page data, and at the end of the segment. + size_t os_pagesize = _mi_os_page_size(); + _mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize); + uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize; + mi_segment_ensure_committed(segment, end, os_pagesize, tld->stats); + _mi_os_protect(end, os_pagesize); + if (slice_entries == segment_slices) segment->slice_entries--; // don't use the last slice :-( + guard_slices = 1; + } + + // reserve first slices for segment info + mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices, tld); + mi_assert_internal(page0!=NULL); if (page0==NULL) return NULL; // cannot fail as we always commit in advance + mi_assert_internal(segment->used == 1); + segment->used = 0; // don't count our internal slices towards usage + + // initialize initial free pages + if (segment->kind == MI_SEGMENT_NORMAL) { // not a huge page + mi_assert_internal(huge_page==NULL); + mi_segment_span_free(segment, info_slices, segment->slice_entries - info_slices, false /* don't purge */, tld); + } + else { + mi_assert_internal(huge_page!=NULL); + mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask)); + mi_assert_internal(mi_commit_mask_is_full(&segment->commit_mask)); + *huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices, tld); + mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance + } + + mi_assert_expensive(mi_segment_is_valid(segment,tld)); + return segment; +} + + +static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) { + MI_UNUSED(force); + mi_assert_internal(segment != NULL); + mi_assert_internal(segment->next == NULL); + mi_assert_internal(segment->used == 0); + + // Remove the free pages + mi_slice_t* slice = &segment->slices[0]; + const mi_slice_t* end = mi_segment_slices_end(segment); + #if MI_DEBUG>1 + size_t page_count = 0; + #endif + while (slice < end) { + mi_assert_internal(slice->slice_count > 0); + mi_assert_internal(slice->slice_offset == 0); + mi_assert_internal(mi_slice_index(slice)==0 || slice->block_size == 0); // no more used pages .. + if (slice->block_size == 0 && segment->kind != MI_SEGMENT_HUGE) { + mi_segment_span_remove_from_queue(slice, tld); + } + #if MI_DEBUG>1 + page_count++; + #endif + slice = slice + slice->slice_count; + } + mi_assert_internal(page_count == 2); // first page is allocated by the segment itself + + // stats + _mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment)); + + // return it to the OS + mi_segment_os_free(segment, tld); +} + + +/* ----------------------------------------------------------- + Page Free +----------------------------------------------------------- */ + +static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld); + +// note: can be called on abandoned pages +static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld) { + mi_assert_internal(page->block_size > 0); + mi_assert_internal(mi_page_all_free(page)); + mi_segment_t* segment = _mi_ptr_segment(page); + mi_assert_internal(segment->used > 0); + + size_t inuse = page->capacity * mi_page_block_size(page); + _mi_stat_decrease(&tld->stats->page_committed, inuse); + _mi_stat_decrease(&tld->stats->pages, 1); + + // reset the page memory to reduce memory pressure? + if (segment->allow_decommit && mi_option_is_enabled(mi_option_deprecated_page_reset)) { + size_t psize; + uint8_t* start = _mi_segment_page_start(segment, page, &psize); + _mi_os_reset(start, psize, tld->stats); + } + + // zero the page data, but not the segment fields and heap tag + page->is_zero_init = false; + uint8_t heap_tag = page->heap_tag; + ptrdiff_t ofs = offsetof(mi_page_t, capacity); + _mi_memzero((uint8_t*)page + ofs, sizeof(*page) - ofs); + page->block_size = 1; + page->heap_tag = heap_tag; + + // and free it + mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld); + segment->used--; + // cannot assert segment valid as it is called during reclaim + // mi_assert_expensive(mi_segment_is_valid(segment, tld)); + return slice; +} + +void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld) +{ + mi_assert(page != NULL); + + mi_segment_t* segment = _mi_page_segment(page); + mi_assert_expensive(mi_segment_is_valid(segment,tld)); + + // mark it as free now + mi_segment_page_clear(page, tld); + mi_assert_expensive(mi_segment_is_valid(segment, tld)); + + if (segment->used == 0) { + // no more used pages; remove from the free list and free the segment + mi_segment_free(segment, force, tld); + } + else if (segment->used == segment->abandoned) { + // only abandoned pages; remove from free list and abandon + mi_segment_abandon(segment,tld); + } + else { + // perform delayed purges + mi_segment_try_purge(segment, false /* force? */, tld->stats); + } +} + + +/* ----------------------------------------------------------- +Abandonment + +When threads terminate, they can leave segments with +live blocks (reachable through other threads). Such segments +are "abandoned" and will be reclaimed by other threads to +reuse their pages and/or free them eventually. The +`thread_id` of such segments is 0. + +When a block is freed in an abandoned segment, the segment +is reclaimed into that thread. + +Moreover, if threads are looking for a fresh segment, they +will first consider abondoned segments -- these can be found +by scanning the arena memory +(segments outside arena memoryare only reclaimed by a free). +----------------------------------------------------------- */ + +// legacy: Wait until there are no more pending reads on segments that used to be in the abandoned list +void _mi_abandoned_await_readers(void) { + // nothing needed +} + +/* ----------------------------------------------------------- + Abandon segment/page +----------------------------------------------------------- */ + +static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { + mi_assert_internal(segment->used == segment->abandoned); + mi_assert_internal(segment->used > 0); + mi_assert_internal(segment->abandoned_visits == 0); + mi_assert_expensive(mi_segment_is_valid(segment,tld)); + + // remove the free pages from the free page queues + mi_slice_t* slice = &segment->slices[0]; + const mi_slice_t* end = mi_segment_slices_end(segment); + while (slice < end) { + mi_assert_internal(slice->slice_count > 0); + mi_assert_internal(slice->slice_offset == 0); + if (slice->block_size == 0) { // a free page + mi_segment_span_remove_from_queue(slice,tld); + slice->block_size = 0; // but keep it free + } + slice = slice + slice->slice_count; + } + + // perform delayed decommits (forcing is much slower on mstress) + // Only abandoned segments in arena memory can be reclaimed without a free + // so if a segment is not from an arena we force purge here to be conservative. + const bool force_purge = (segment->memid.memkind != MI_MEM_ARENA) || mi_option_is_enabled(mi_option_abandoned_page_purge); + mi_segment_try_purge(segment, force_purge, tld->stats); + + // all pages in the segment are abandoned; add it to the abandoned list + _mi_stat_increase(&tld->stats->segments_abandoned, 1); + mi_segments_track_size(-((long)mi_segment_size(segment)), tld); + segment->thread_id = 0; + segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned + if (segment->was_reclaimed) { + tld->reclaim_count--; + segment->was_reclaimed = false; + } + _mi_arena_segment_mark_abandoned(segment); +} + +void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) { + mi_assert(page != NULL); + mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); + mi_assert_internal(mi_page_heap(page) == NULL); + mi_segment_t* segment = _mi_page_segment(page); + + mi_assert_expensive(mi_segment_is_valid(segment,tld)); + segment->abandoned++; + + _mi_stat_increase(&tld->stats->pages_abandoned, 1); + mi_assert_internal(segment->abandoned <= segment->used); + if (segment->used == segment->abandoned) { + // all pages are abandoned, abandon the entire segment + mi_segment_abandon(segment, tld); + } +} + +/* ----------------------------------------------------------- + Reclaim abandoned pages +----------------------------------------------------------- */ + +static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice_t** end) { + mi_slice_t* slice = &segment->slices[0]; + *end = mi_segment_slices_end(segment); + mi_assert_internal(slice->slice_count>0 && slice->block_size>0); // segment allocated page + slice = slice + slice->slice_count; // skip the first segment allocated page + return slice; +} + +// Possibly free pages and check if free space is available +static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, size_t block_size, mi_segments_tld_t* tld) +{ + mi_assert_internal(mi_segment_is_abandoned(segment)); + bool has_page = false; + + // for all slices + const mi_slice_t* end; + mi_slice_t* slice = mi_slices_start_iterate(segment, &end); + while (slice < end) { + mi_assert_internal(slice->slice_count > 0); + mi_assert_internal(slice->slice_offset == 0); + if (mi_slice_is_used(slice)) { // used page + // ensure used count is up to date and collect potential concurrent frees + mi_page_t* const page = mi_slice_to_page(slice); + _mi_page_free_collect(page, false); + if (mi_page_all_free(page)) { + // if this page is all free now, free it without adding to any queues (yet) + mi_assert_internal(page->next == NULL && page->prev==NULL); + _mi_stat_decrease(&tld->stats->pages_abandoned, 1); + segment->abandoned--; + slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce! + mi_assert_internal(!mi_slice_is_used(slice)); + if (slice->slice_count >= slices_needed) { + has_page = true; + } + } + else if (mi_page_block_size(page) == block_size && mi_page_has_any_available(page)) { + // a page has available free blocks of the right size + has_page = true; + } + } + else { + // empty span + if (slice->slice_count >= slices_needed) { + has_page = true; + } + } + slice = slice + slice->slice_count; + } + return has_page; +} + +// Reclaim an abandoned segment; returns NULL if the segment was freed +// set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full. +static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) { + if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; } + // can be 0 still with abandoned_next, or already a thread id for segments outside an arena that are reclaimed on a free. + mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0 || mi_atomic_load_relaxed(&segment->thread_id) == _mi_thread_id()); + mi_atomic_store_release(&segment->thread_id, _mi_thread_id()); + segment->abandoned_visits = 0; + segment->was_reclaimed = true; + tld->reclaim_count++; + mi_segments_track_size((long)mi_segment_size(segment), tld); + mi_assert_internal(segment->next == NULL); + _mi_stat_decrease(&tld->stats->segments_abandoned, 1); + + // for all slices + const mi_slice_t* end; + mi_slice_t* slice = mi_slices_start_iterate(segment, &end); + while (slice < end) { + mi_assert_internal(slice->slice_count > 0); + mi_assert_internal(slice->slice_offset == 0); + if (mi_slice_is_used(slice)) { + // in use: reclaim the page in our heap + mi_page_t* page = mi_slice_to_page(slice); + mi_assert_internal(page->is_committed); + mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); + mi_assert_internal(mi_page_heap(page) == NULL); + mi_assert_internal(page->next == NULL && page->prev==NULL); + _mi_stat_decrease(&tld->stats->pages_abandoned, 1); + segment->abandoned--; + // set the heap again and allow heap thread delayed free again. + mi_heap_t* target_heap = _mi_heap_by_tag(heap, page->heap_tag); // allow custom heaps to separate objects + if (target_heap == NULL) { + target_heap = heap; + _mi_error_message(EINVAL, "page with tag %u cannot be reclaimed by a heap with the same tag (using %u instead)\n", page->heap_tag, heap->tag ); + } + mi_page_set_heap(page, target_heap); + _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set) + _mi_page_free_collect(page, false); // ensure used count is up to date + if (mi_page_all_free(page)) { + // if everything free by now, free the page + slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing + } + else { + // otherwise reclaim it into the heap + _mi_page_reclaim(target_heap, page); + if (requested_block_size == mi_page_block_size(page) && mi_page_has_any_available(page) && heap == target_heap) { + if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; } + } + } + } + else { + // the span is free, add it to our page queues + slice = mi_segment_span_free_coalesce(slice, tld); // set slice again due to coalesceing + } + mi_assert_internal(slice->slice_count>0 && slice->slice_offset==0); + slice = slice + slice->slice_count; + } + + mi_assert(segment->abandoned == 0); + mi_assert_expensive(mi_segment_is_valid(segment, tld)); + if (segment->used == 0) { // due to page_clear + mi_assert_internal(right_page_reclaimed == NULL || !(*right_page_reclaimed)); + mi_segment_free(segment, false, tld); + return NULL; + } + else { + return segment; + } +} + +// attempt to reclaim a particular segment (called from multi threaded free `alloc.c:mi_free_block_mt`) +bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) { + if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned + // don't reclaim more from a free than half the current segments + // this is to prevent a pure free-ing thread to start owning too many segments + if (heap->tld->segments.reclaim_count * 2 > heap->tld->segments.count) return false; + if (_mi_arena_segment_clear_abandoned(segment)) { // atomically unabandon + mi_segment_t* res = mi_segment_reclaim(segment, heap, 0, NULL, &heap->tld->segments); + mi_assert_internal(res == segment); + return (res != NULL); + } + return false; +} + +void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) { + mi_segment_t* segment; + mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, ¤t); + while ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL) { + mi_segment_reclaim(segment, heap, 0, NULL, tld); + } +} + +static long mi_segment_get_reclaim_tries(void) { + // limit the tries to 10% (default) of the abandoned segments with at least 8 and at most 1024 tries. + const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100); + if (perc <= 0) return 0; + const size_t total_count = _mi_arena_segment_abandoned_count(); + if (total_count == 0) return 0; + const size_t relative_count = (total_count > 10000 ? (total_count / 100) * perc : (total_count * perc) / 100); // avoid overflow + long max_tries = (long)(relative_count <= 1 ? 1 : (relative_count > 1024 ? 1024 : relative_count)); + if (max_tries < 8 && total_count > 8) { max_tries = 8; } + return max_tries; +} + +static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slices, size_t block_size, bool* reclaimed, mi_segments_tld_t* tld) +{ + *reclaimed = false; + long max_tries = mi_segment_get_reclaim_tries(); + if (max_tries <= 0) return NULL; + + mi_segment_t* segment; + mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, ¤t); + while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) + { + segment->abandoned_visits++; + // todo: should we respect numa affinity for abondoned reclaim? perhaps only for the first visit? + // todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments and use many tries + // Perhaps we can skip non-suitable ones in a better way? + bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid); + bool has_page = mi_segment_check_free(segment,needed_slices,block_size,tld); // try to free up pages (due to concurrent frees) + if (segment->used == 0) { + // free the segment (by forced reclaim) to make it available to other threads. + // note1: we prefer to free a segment as that might lead to reclaiming another + // segment that is still partially used. + // note2: we could in principle optimize this by skipping reclaim and directly + // freeing but that would violate some invariants temporarily) + mi_segment_reclaim(segment, heap, 0, NULL, tld); + } + else if (has_page && is_suitable) { + // found a large enough free span, or a page of the right block_size with free space + // we return the result of reclaim (which is usually `segment`) as it might free + // the segment due to concurrent frees (in which case `NULL` is returned). + return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld); + } + else if (segment->abandoned_visits > 3 && is_suitable) { + // always reclaim on 3rd visit to limit the abandoned queue length. + mi_segment_reclaim(segment, heap, 0, NULL, tld); + } + else { + // otherwise, push on the visited list so it gets not looked at too quickly again + mi_segment_try_purge(segment, false /* true force? */, tld->stats); // force purge if needed as we may not visit soon again + _mi_arena_segment_mark_abandoned(segment); + } + } + return NULL; +} + + +void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld) +{ + mi_segment_t* segment; + mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, ¤t); + long max_tries = (force ? (long)_mi_arena_segment_abandoned_count() : 1024); // limit latency + while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) { + mi_segment_check_free(segment,0,0,tld); // try to free up pages (due to concurrent frees) + if (segment->used == 0) { + // free the segment (by forced reclaim) to make it available to other threads. + // note: we could in principle optimize this by skipping reclaim and directly + // freeing but that would violate some invariants temporarily) + mi_segment_reclaim(segment, heap, 0, NULL, tld); + } + else { + // otherwise, purge if needed and push on the visited list + // note: forced purge can be expensive if many threads are destroyed/created as in mstress. + mi_segment_try_purge(segment, force, tld->stats); + _mi_arena_segment_mark_abandoned(segment); + } + } +} + +/* ----------------------------------------------------------- + Reclaim or allocate +----------------------------------------------------------- */ + +static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) +{ + mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX); + + // 1. try to reclaim an abandoned segment + bool reclaimed; + mi_segment_t* segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld); + if (reclaimed) { + // reclaimed the right page right into the heap + mi_assert_internal(segment != NULL); + return NULL; // pretend out-of-memory as the page will be in the page queue of the heap with available blocks + } + else if (segment != NULL) { + // reclaimed a segment with a large enough empty span in it + return segment; + } + // 2. otherwise allocate a fresh segment + return mi_segment_alloc(0, 0, heap->arena_id, tld, os_tld, NULL); +} + + +/* ----------------------------------------------------------- + Page allocation +----------------------------------------------------------- */ + +static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) +{ + mi_assert_internal(required <= MI_LARGE_OBJ_SIZE_MAX && page_kind <= MI_PAGE_LARGE); + + // find a free page + size_t page_size = _mi_align_up(required, (required > MI_MEDIUM_PAGE_SIZE ? MI_MEDIUM_PAGE_SIZE : MI_SEGMENT_SLICE_SIZE)); + size_t slices_needed = page_size / MI_SEGMENT_SLICE_SIZE; + mi_assert_internal(slices_needed * MI_SEGMENT_SLICE_SIZE == page_size); + mi_page_t* page = mi_segments_page_find_and_allocate(slices_needed, heap->arena_id, tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld); + if (page==NULL) { + // no free page, allocate a new segment and try again + if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld, os_tld) == NULL) { + // OOM or reclaimed a good page in the heap + return NULL; + } + else { + // otherwise try again + return mi_segments_page_alloc(heap, page_kind, required, block_size, tld, os_tld); + } + } + mi_assert_internal(page != NULL && page->slice_count*MI_SEGMENT_SLICE_SIZE == page_size); + mi_assert_internal(_mi_ptr_segment(page)->thread_id == _mi_thread_id()); + mi_segment_try_purge(_mi_ptr_segment(page), false, tld->stats); + return page; +} + + + +/* ----------------------------------------------------------- + Huge page allocation +----------------------------------------------------------- */ + +static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) +{ + mi_page_t* page = NULL; + mi_segment_t* segment = mi_segment_alloc(size,page_alignment,req_arena_id,tld,os_tld,&page); + if (segment == NULL || page==NULL) return NULL; + mi_assert_internal(segment->used==1); + mi_assert_internal(mi_page_block_size(page) >= size); + #if MI_HUGE_PAGE_ABANDON + segment->thread_id = 0; // huge segments are immediately abandoned + #endif + + // for huge pages we initialize the block_size as we may + // overallocate to accommodate large alignments. + size_t psize; + uint8_t* start = _mi_segment_page_start(segment, page, &psize); + page->block_size = psize; + mi_assert_internal(page->is_huge); + + // decommit the part of the prefix of a page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE) + if (page_alignment > 0 && segment->allow_decommit) { + uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)start, page_alignment); + mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment)); + mi_assert_internal(psize - (aligned_p - start) >= size); + uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list + ptrdiff_t decommit_size = aligned_p - decommit_start; + _mi_os_reset(decommit_start, decommit_size, &_mi_stats_main); // note: cannot use segment_decommit on huge segments + } + + return page; +} + +#if MI_HUGE_PAGE_ABANDON +// free huge block from another thread +void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) { + // huge page segments are always abandoned and can be freed immediately by any thread + mi_assert_internal(segment->kind==MI_SEGMENT_HUGE); + mi_assert_internal(segment == _mi_page_segment(page)); + mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id)==0); + + // claim it and free + mi_heap_t* heap = mi_heap_get_default(); // issue #221; don't use the internal get_default_heap as we need to ensure the thread is initialized. + // paranoia: if this it the last reference, the cas should always succeed + size_t expected_tid = 0; + if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected_tid, heap->thread_id)) { + mi_block_set_next(page, block, page->free); + page->free = block; + page->used--; + page->is_zero_init = false; + mi_assert(page->used == 0); + mi_tld_t* tld = heap->tld; + _mi_segment_page_free(page, true, &tld->segments); + } +#if (MI_DEBUG!=0) + else { + mi_assert_internal(false); + } +#endif +} + +#else +// reset memory of a huge block from another thread +void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) { + MI_UNUSED(page); + mi_assert_internal(segment->kind == MI_SEGMENT_HUGE); + mi_assert_internal(segment == _mi_page_segment(page)); + mi_assert_internal(page->used == 1); // this is called just before the free + mi_assert_internal(page->free == NULL); + if (segment->allow_decommit) { + size_t csize = mi_usable_size(block); + if (csize > sizeof(mi_block_t)) { + csize = csize - sizeof(mi_block_t); + uint8_t* p = (uint8_t*)block + sizeof(mi_block_t); + _mi_os_reset(p, csize, &_mi_stats_main); // note: cannot use segment_decommit on huge segments + } + } +} +#endif + +/* ----------------------------------------------------------- + Page allocation and free +----------------------------------------------------------- */ +mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { + mi_page_t* page; + if mi_unlikely(page_alignment > MI_BLOCK_ALIGNMENT_MAX) { + mi_assert_internal(_mi_is_power_of_two(page_alignment)); + mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE); + if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; } + page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld); + } + else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) { + page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld,os_tld); + } + else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) { + page = mi_segments_page_alloc(heap,MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,block_size,tld, os_tld); + } + else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) { + page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld); + } + else { + page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld); + } + mi_assert_internal(page == NULL || _mi_heap_memid_is_suitable(heap, _mi_page_segment(page)->memid)); + mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); + return page; +} + + diff --git a/yass/third_party/mimalloc/src/static.c b/yass/third_party/mimalloc/src/static.c new file mode 100644 index 0000000000..bf025eb794 --- /dev/null +++ b/yass/third_party/mimalloc/src/static.c @@ -0,0 +1,41 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#ifndef _DEFAULT_SOURCE +#define _DEFAULT_SOURCE +#endif +#if defined(__sun) +// same remarks as os.c for the static's context. +#undef _XOPEN_SOURCE +#undef _POSIX_C_SOURCE +#endif + +#include "mimalloc.h" +#include "mimalloc/internal.h" + +// For a static override we create a single object file +// containing the whole library. If it is linked first +// it will override all the standard library allocation +// functions (on Unix's). +#include "alloc.c" // includes alloc-override.c +#include "alloc-aligned.c" +#include "alloc-posix.c" +#include "arena.c" +#include "bitmap.c" +#include "heap.c" +#include "init.c" +#include "libc.c" +#include "options.c" +#include "os.c" +#include "page.c" // includes page-queue.c +#include "random.c" +#include "segment.c" +#include "segment-map.c" +#include "stats.c" +#include "prim/prim.c" +#if MI_OSX_ZONE +#include "prim/osx/alloc-override-zone.c" +#endif diff --git a/yass/third_party/mimalloc/src/stats.c b/yass/third_party/mimalloc/src/stats.c new file mode 100644 index 0000000000..a936402744 --- /dev/null +++ b/yass/third_party/mimalloc/src/stats.c @@ -0,0 +1,467 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" + +#include // memset + +#if defined(_MSC_VER) && (_MSC_VER < 1920) +#pragma warning(disable:4204) // non-constant aggregate initializer +#endif + +/* ----------------------------------------------------------- + Statistics operations +----------------------------------------------------------- */ + +static bool mi_is_in_main(void* stat) { + return ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main + && (uint8_t*)stat < ((uint8_t*)&_mi_stats_main + sizeof(mi_stats_t))); +} + +static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) { + if (amount == 0) return; + if (mi_is_in_main(stat)) + { + // add atomically (for abandoned pages) + int64_t current = mi_atomic_addi64_relaxed(&stat->current, amount); + mi_atomic_maxi64_relaxed(&stat->peak, current + amount); + if (amount > 0) { + mi_atomic_addi64_relaxed(&stat->allocated,amount); + } + else { + mi_atomic_addi64_relaxed(&stat->freed, -amount); + } + } + else { + // add thread local + stat->current += amount; + if (stat->current > stat->peak) stat->peak = stat->current; + if (amount > 0) { + stat->allocated += amount; + } + else { + stat->freed += -amount; + } + } +} + +void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) { + if (mi_is_in_main(stat)) { + mi_atomic_addi64_relaxed( &stat->count, 1 ); + mi_atomic_addi64_relaxed( &stat->total, (int64_t)amount ); + } + else { + stat->count++; + stat->total += amount; + } +} + +void _mi_stat_increase(mi_stat_count_t* stat, size_t amount) { + mi_stat_update(stat, (int64_t)amount); +} + +void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) { + mi_stat_update(stat, -((int64_t)amount)); +} + +// must be thread safe as it is called from stats_merge +static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64_t unit) { + if (stat==src) return; + if (src->allocated==0 && src->freed==0) return; + mi_atomic_addi64_relaxed( &stat->allocated, src->allocated * unit); + mi_atomic_addi64_relaxed( &stat->current, src->current * unit); + mi_atomic_addi64_relaxed( &stat->freed, src->freed * unit); + // peak scores do not work across threads.. + mi_atomic_addi64_relaxed( &stat->peak, src->peak * unit); +} + +static void mi_stat_counter_add(mi_stat_counter_t* stat, const mi_stat_counter_t* src, int64_t unit) { + if (stat==src) return; + mi_atomic_addi64_relaxed( &stat->total, src->total * unit); + mi_atomic_addi64_relaxed( &stat->count, src->count * unit); +} + +// must be thread safe as it is called from stats_merge +static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) { + if (stats==src) return; + mi_stat_add(&stats->segments, &src->segments,1); + mi_stat_add(&stats->pages, &src->pages,1); + mi_stat_add(&stats->reserved, &src->reserved, 1); + mi_stat_add(&stats->committed, &src->committed, 1); + mi_stat_add(&stats->reset, &src->reset, 1); + mi_stat_add(&stats->purged, &src->purged, 1); + mi_stat_add(&stats->page_committed, &src->page_committed, 1); + + mi_stat_add(&stats->pages_abandoned, &src->pages_abandoned, 1); + mi_stat_add(&stats->segments_abandoned, &src->segments_abandoned, 1); + mi_stat_add(&stats->threads, &src->threads, 1); + + mi_stat_add(&stats->malloc, &src->malloc, 1); + mi_stat_add(&stats->segments_cache, &src->segments_cache, 1); + mi_stat_add(&stats->normal, &src->normal, 1); + mi_stat_add(&stats->huge, &src->huge, 1); + mi_stat_add(&stats->large, &src->large, 1); + + mi_stat_counter_add(&stats->pages_extended, &src->pages_extended, 1); + mi_stat_counter_add(&stats->mmap_calls, &src->mmap_calls, 1); + mi_stat_counter_add(&stats->commit_calls, &src->commit_calls, 1); + mi_stat_counter_add(&stats->reset_calls, &src->reset_calls, 1); + mi_stat_counter_add(&stats->purge_calls, &src->purge_calls, 1); + + mi_stat_counter_add(&stats->page_no_retire, &src->page_no_retire, 1); + mi_stat_counter_add(&stats->searches, &src->searches, 1); + mi_stat_counter_add(&stats->normal_count, &src->normal_count, 1); + mi_stat_counter_add(&stats->huge_count, &src->huge_count, 1); + mi_stat_counter_add(&stats->large_count, &src->large_count, 1); +#if MI_STAT>1 + for (size_t i = 0; i <= MI_BIN_HUGE; i++) { + if (src->normal_bins[i].allocated > 0 || src->normal_bins[i].freed > 0) { + mi_stat_add(&stats->normal_bins[i], &src->normal_bins[i], 1); + } + } +#endif +} + +/* ----------------------------------------------------------- + Display statistics +----------------------------------------------------------- */ + +// unit > 0 : size in binary bytes +// unit == 0: count as decimal +// unit < 0 : count in binary +static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg, const char* fmt) { + char buf[32]; buf[0] = 0; + int len = 32; + const char* suffix = (unit <= 0 ? " " : "B"); + const int64_t base = (unit == 0 ? 1000 : 1024); + if (unit>0) n *= unit; + + const int64_t pos = (n < 0 ? -n : n); + if (pos < base) { + if (n!=1 || suffix[0] != 'B') { // skip printing 1 B for the unit column + _mi_snprintf(buf, len, "%lld %-3s", (long long)n, (n==0 ? "" : suffix)); + } + } + else { + int64_t divider = base; + const char* magnitude = "K"; + if (pos >= divider*base) { divider *= base; magnitude = "M"; } + if (pos >= divider*base) { divider *= base; magnitude = "G"; } + const int64_t tens = (n / (divider/10)); + const long whole = (long)(tens/10); + const long frac1 = (long)(tens%10); + char unitdesc[8]; + _mi_snprintf(unitdesc, 8, "%s%s%s", magnitude, (base==1024 ? "i" : ""), suffix); + _mi_snprintf(buf, len, "%ld.%ld %-3s", whole, (frac1 < 0 ? -frac1 : frac1), unitdesc); + } + _mi_fprintf(out, arg, (fmt==NULL ? "%12s" : fmt), buf); +} + + +static void mi_print_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg) { + mi_printf_amount(n,unit,out,arg,NULL); +} + +static void mi_print_count(int64_t n, int64_t unit, mi_output_fun* out, void* arg) { + if (unit==1) _mi_fprintf(out, arg, "%12s"," "); + else mi_print_amount(n,0,out,arg); +} + +static void mi_stat_print_ex(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg, const char* notok ) { + _mi_fprintf(out, arg,"%10s:", msg); + if (unit != 0) { + if (unit > 0) { + mi_print_amount(stat->peak, unit, out, arg); + mi_print_amount(stat->allocated, unit, out, arg); + mi_print_amount(stat->freed, unit, out, arg); + mi_print_amount(stat->current, unit, out, arg); + mi_print_amount(unit, 1, out, arg); + mi_print_count(stat->allocated, unit, out, arg); + } + else { + mi_print_amount(stat->peak, -1, out, arg); + mi_print_amount(stat->allocated, -1, out, arg); + mi_print_amount(stat->freed, -1, out, arg); + mi_print_amount(stat->current, -1, out, arg); + if (unit == -1) { + _mi_fprintf(out, arg, "%24s", ""); + } + else { + mi_print_amount(-unit, 1, out, arg); + mi_print_count((stat->allocated / -unit), 0, out, arg); + } + } + if (stat->allocated > stat->freed) { + _mi_fprintf(out, arg, " "); + _mi_fprintf(out, arg, (notok == NULL ? "not all freed" : notok)); + _mi_fprintf(out, arg, "\n"); + } + else { + _mi_fprintf(out, arg, " ok\n"); + } + } + else { + mi_print_amount(stat->peak, 1, out, arg); + mi_print_amount(stat->allocated, 1, out, arg); + _mi_fprintf(out, arg, "%11s", " "); // no freed + mi_print_amount(stat->current, 1, out, arg); + _mi_fprintf(out, arg, "\n"); + } +} + +static void mi_stat_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) { + mi_stat_print_ex(stat, msg, unit, out, arg, NULL); +} + +static void mi_stat_peak_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) { + _mi_fprintf(out, arg, "%10s:", msg); + mi_print_amount(stat->peak, unit, out, arg); + _mi_fprintf(out, arg, "\n"); +} + +static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg ) { + _mi_fprintf(out, arg, "%10s:", msg); + mi_print_amount(stat->total, -1, out, arg); + _mi_fprintf(out, arg, "\n"); +} + + +static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg) { + const int64_t avg_tens = (stat->count == 0 ? 0 : (stat->total*10 / stat->count)); + const long avg_whole = (long)(avg_tens/10); + const long avg_frac1 = (long)(avg_tens%10); + _mi_fprintf(out, arg, "%10s: %5ld.%ld avg\n", msg, avg_whole, avg_frac1); +} + + +static void mi_print_header(mi_output_fun* out, void* arg ) { + _mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "freed ", "current ", "unit ", "count "); +} + +#if MI_STAT>1 +static void mi_stats_print_bins(const mi_stat_count_t* bins, size_t max, const char* fmt, mi_output_fun* out, void* arg) { + bool found = false; + char buf[64]; + for (size_t i = 0; i <= max; i++) { + if (bins[i].allocated > 0) { + found = true; + int64_t unit = _mi_bin_size((uint8_t)i); + _mi_snprintf(buf, 64, "%s %3lu", fmt, (long)i); + mi_stat_print(&bins[i], buf, unit, out, arg); + } + } + if (found) { + _mi_fprintf(out, arg, "\n"); + mi_print_header(out, arg); + } +} +#endif + + + +//------------------------------------------------------------ +// Use an output wrapper for line-buffered output +// (which is nice when using loggers etc.) +//------------------------------------------------------------ +typedef struct buffered_s { + mi_output_fun* out; // original output function + void* arg; // and state + char* buf; // local buffer of at least size `count+1` + size_t used; // currently used chars `used <= count` + size_t count; // total chars available for output +} buffered_t; + +static void mi_buffered_flush(buffered_t* buf) { + buf->buf[buf->used] = 0; + _mi_fputs(buf->out, buf->arg, NULL, buf->buf); + buf->used = 0; +} + +static void mi_cdecl mi_buffered_out(const char* msg, void* arg) { + buffered_t* buf = (buffered_t*)arg; + if (msg==NULL || buf==NULL) return; + for (const char* src = msg; *src != 0; src++) { + char c = *src; + if (buf->used >= buf->count) mi_buffered_flush(buf); + mi_assert_internal(buf->used < buf->count); + buf->buf[buf->used++] = c; + if (c == '\n') mi_buffered_flush(buf); + } +} + +//------------------------------------------------------------ +// Print statistics +//------------------------------------------------------------ + +static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0) mi_attr_noexcept { + // wrap the output function to be line buffered + char buf[256]; + buffered_t buffer = { out0, arg0, NULL, 0, 255 }; + buffer.buf = buf; + mi_output_fun* out = &mi_buffered_out; + void* arg = &buffer; + + // and print using that + mi_print_header(out,arg); + #if MI_STAT>1 + mi_stats_print_bins(stats->normal_bins, MI_BIN_HUGE, "normal",out,arg); + #endif + #if MI_STAT + mi_stat_print(&stats->normal, "normal", (stats->normal_count.count == 0 ? 1 : -(stats->normal.allocated / stats->normal_count.count)), out, arg); + mi_stat_print(&stats->large, "large", (stats->large_count.count == 0 ? 1 : -(stats->large.allocated / stats->large_count.count)), out, arg); + mi_stat_print(&stats->huge, "huge", (stats->huge_count.count == 0 ? 1 : -(stats->huge.allocated / stats->huge_count.count)), out, arg); + mi_stat_count_t total = { 0,0,0,0 }; + mi_stat_add(&total, &stats->normal, 1); + mi_stat_add(&total, &stats->large, 1); + mi_stat_add(&total, &stats->huge, 1); + mi_stat_print(&total, "total", 1, out, arg); + #endif + #if MI_STAT>1 + mi_stat_print(&stats->malloc, "malloc req", 1, out, arg); + _mi_fprintf(out, arg, "\n"); + #endif + mi_stat_print_ex(&stats->reserved, "reserved", 1, out, arg, ""); + mi_stat_print_ex(&stats->committed, "committed", 1, out, arg, ""); + mi_stat_peak_print(&stats->reset, "reset", 1, out, arg ); + mi_stat_peak_print(&stats->purged, "purged", 1, out, arg ); + mi_stat_print(&stats->page_committed, "touched", 1, out, arg); + mi_stat_print(&stats->segments, "segments", -1, out, arg); + mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg); + mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg); + mi_stat_print(&stats->pages, "pages", -1, out, arg); + mi_stat_print(&stats->pages_abandoned, "-abandoned", -1, out, arg); + mi_stat_counter_print(&stats->pages_extended, "-extended", out, arg); + mi_stat_counter_print(&stats->page_no_retire, "-noretire", out, arg); + mi_stat_counter_print(&stats->arena_count, "arenas", out, arg); + mi_stat_counter_print(&stats->arena_crossover_count, "-crossover", out, arg); + mi_stat_counter_print(&stats->arena_rollback_count, "-rollback", out, arg); + mi_stat_counter_print(&stats->mmap_calls, "mmaps", out, arg); + mi_stat_counter_print(&stats->commit_calls, "commits", out, arg); + mi_stat_counter_print(&stats->reset_calls, "resets", out, arg); + mi_stat_counter_print(&stats->purge_calls, "purges", out, arg); + mi_stat_print(&stats->threads, "threads", -1, out, arg); + mi_stat_counter_print_avg(&stats->searches, "searches", out, arg); + _mi_fprintf(out, arg, "%10s: %5zu\n", "numa nodes", _mi_os_numa_node_count()); + + size_t elapsed; + size_t user_time; + size_t sys_time; + size_t current_rss; + size_t peak_rss; + size_t current_commit; + size_t peak_commit; + size_t page_faults; + mi_process_info(&elapsed, &user_time, &sys_time, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults); + _mi_fprintf(out, arg, "%10s: %5ld.%03ld s\n", "elapsed", elapsed/1000, elapsed%1000); + _mi_fprintf(out, arg, "%10s: user: %ld.%03ld s, system: %ld.%03ld s, faults: %lu, rss: ", "process", + user_time/1000, user_time%1000, sys_time/1000, sys_time%1000, (unsigned long)page_faults ); + mi_printf_amount((int64_t)peak_rss, 1, out, arg, "%s"); + if (peak_commit > 0) { + _mi_fprintf(out, arg, ", commit: "); + mi_printf_amount((int64_t)peak_commit, 1, out, arg, "%s"); + } + _mi_fprintf(out, arg, "\n"); +} + +static mi_msecs_t mi_process_start; // = 0 + +static mi_stats_t* mi_stats_get_default(void) { + mi_heap_t* heap = mi_heap_get_default(); + return &heap->tld->stats; +} + +static void mi_stats_merge_from(mi_stats_t* stats) { + if (stats != &_mi_stats_main) { + mi_stats_add(&_mi_stats_main, stats); + memset(stats, 0, sizeof(mi_stats_t)); + } +} + +void mi_stats_reset(void) mi_attr_noexcept { + mi_stats_t* stats = mi_stats_get_default(); + if (stats != &_mi_stats_main) { memset(stats, 0, sizeof(mi_stats_t)); } + memset(&_mi_stats_main, 0, sizeof(mi_stats_t)); + if (mi_process_start == 0) { mi_process_start = _mi_clock_start(); }; +} + +void mi_stats_merge(void) mi_attr_noexcept { + mi_stats_merge_from( mi_stats_get_default() ); +} + +void _mi_stats_done(mi_stats_t* stats) { // called from `mi_thread_done` + mi_stats_merge_from(stats); +} + +void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept { + mi_stats_merge_from(mi_stats_get_default()); + _mi_stats_print(&_mi_stats_main, out, arg); +} + +void mi_stats_print(void* out) mi_attr_noexcept { + // for compatibility there is an `out` parameter (which can be `stdout` or `stderr`) + mi_stats_print_out((mi_output_fun*)out, NULL); +} + +void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept { + _mi_stats_print(mi_stats_get_default(), out, arg); +} + + +// ---------------------------------------------------------------- +// Basic timer for convenience; use milli-seconds to avoid doubles +// ---------------------------------------------------------------- + +static mi_msecs_t mi_clock_diff; + +mi_msecs_t _mi_clock_now(void) { + return _mi_prim_clock_now(); +} + +mi_msecs_t _mi_clock_start(void) { + if (mi_clock_diff == 0.0) { + mi_msecs_t t0 = _mi_clock_now(); + mi_clock_diff = _mi_clock_now() - t0; + } + return _mi_clock_now(); +} + +mi_msecs_t _mi_clock_end(mi_msecs_t start) { + mi_msecs_t end = _mi_clock_now(); + return (end - start - mi_clock_diff); +} + + +// -------------------------------------------------------- +// Basic process statistics +// -------------------------------------------------------- + +mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept +{ + mi_process_info_t pinfo; + _mi_memzero_var(pinfo); + pinfo.elapsed = _mi_clock_end(mi_process_start); + pinfo.current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.current)); + pinfo.peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.peak)); + pinfo.current_rss = pinfo.current_commit; + pinfo.peak_rss = pinfo.peak_commit; + pinfo.utime = 0; + pinfo.stime = 0; + pinfo.page_faults = 0; + + _mi_prim_process_info(&pinfo); + + if (elapsed_msecs!=NULL) *elapsed_msecs = (pinfo.elapsed < 0 ? 0 : (pinfo.elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.elapsed : PTRDIFF_MAX)); + if (user_msecs!=NULL) *user_msecs = (pinfo.utime < 0 ? 0 : (pinfo.utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.utime : PTRDIFF_MAX)); + if (system_msecs!=NULL) *system_msecs = (pinfo.stime < 0 ? 0 : (pinfo.stime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.stime : PTRDIFF_MAX)); + if (current_rss!=NULL) *current_rss = pinfo.current_rss; + if (peak_rss!=NULL) *peak_rss = pinfo.peak_rss; + if (current_commit!=NULL) *current_commit = pinfo.current_commit; + if (peak_commit!=NULL) *peak_commit = pinfo.peak_commit; + if (page_faults!=NULL) *page_faults = pinfo.page_faults; +} diff --git a/yass/third_party/mimalloc/test/CMakeLists.txt b/yass/third_party/mimalloc/test/CMakeLists.txt new file mode 100644 index 0000000000..e76ffa64fc --- /dev/null +++ b/yass/third_party/mimalloc/test/CMakeLists.txt @@ -0,0 +1,54 @@ +cmake_minimum_required(VERSION 3.0) +project(mimalloc-test C CXX) + +set(CMAKE_C_STANDARD 11) +set(CMAKE_CXX_STANDARD 17) + +# Set default build type +if (NOT CMAKE_BUILD_TYPE) + if ("${CMAKE_BINARY_DIR}" MATCHES ".*(D|d)ebug$") + message(STATUS "No build type selected, default to *** Debug ***") + set(CMAKE_BUILD_TYPE "Debug") + else() + message(STATUS "No build type selected, default to *** Release ***") + set(CMAKE_BUILD_TYPE "Release") + endif() +endif() + +# Import mimalloc (if installed) +find_package(mimalloc 2.0 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH) +message(STATUS "Found mimalloc installed at: ${MIMALLOC_LIBRARY_DIR} (${MIMALLOC_VERSION_DIR})") + +# overriding with a dynamic library +add_executable(dynamic-override main-override.c) +target_link_libraries(dynamic-override PUBLIC mimalloc) + +add_executable(dynamic-override-cxx main-override.cpp) +target_link_libraries(dynamic-override-cxx PUBLIC mimalloc) + + +# overriding with a static object file works reliable as the symbols in the +# object file have priority over those in library files +add_executable(static-override-obj main-override.c ${MIMALLOC_OBJECT_DIR}/mimalloc.o) +target_include_directories(static-override-obj PUBLIC ${MIMALLOC_INCLUDE_DIR}) +target_link_libraries(static-override-obj PUBLIC pthread) + + +# overriding with a static library works too if using the `mimalloc-override.h` +# header to redefine malloc/free. (the library already overrides new/delete) +add_executable(static-override-static main-override-static.c) +target_link_libraries(static-override-static PUBLIC mimalloc-static) + + +# overriding with a static library: this may not work if the library is linked too late +# on the command line after the C runtime library; but we cannot control that well in CMake +add_executable(static-override main-override.c) +target_link_libraries(static-override PUBLIC mimalloc-static) + +add_executable(static-override-cxx main-override.cpp) +target_link_libraries(static-override-cxx PUBLIC mimalloc-static) + + +## test memory errors +add_executable(test-wrong test-wrong.c) +target_link_libraries(test-wrong PUBLIC mimalloc) diff --git a/yass/third_party/mimalloc/test/main-override-static.c b/yass/third_party/mimalloc/test/main-override-static.c new file mode 100644 index 0000000000..e71be29e95 --- /dev/null +++ b/yass/third_party/mimalloc/test/main-override-static.c @@ -0,0 +1,415 @@ +#include +#include +#include +#include +#include + +#include +#include // redefines malloc etc. + + +static void double_free1(); +static void double_free2(); +static void corrupt_free(); +static void block_overflow1(); +static void invalid_free(); +static void test_aslr(void); +static void test_process_info(void); +static void test_reserved(void); +static void negative_stat(void); +static void alloc_huge(void); +static void test_heap_walk(void); +static void test_heap_arena(void); +static void test_align(void); + +int main() { + mi_version(); + mi_stats_reset(); + // detect double frees and heap corruption + // double_free1(); + // double_free2(); + // corrupt_free(); + // block_overflow1(); + // test_aslr(); + // invalid_free(); + // test_reserved(); + // negative_stat(); + // test_heap_walk(); + // alloc_huge(); + // test_heap_walk(); + // test_heap_arena(); + // test_align(); + + void* p1 = malloc(78); + void* p2 = malloc(24); + free(p1); + p1 = mi_malloc(8); + char* s = strdup("hello\n"); + free(p2); + + mi_heap_t* h = mi_heap_new(); + mi_heap_set_default(h); + + p2 = malloc(16); + p1 = realloc(p1, 32); + free(p1); + free(p2); + free(s); + + /* now test if override worked by allocating/freeing across the api's*/ + //p1 = mi_malloc(32); + //free(p1); + //p2 = malloc(32); + //mi_free(p2); + + //mi_collect(true); + //mi_stats_print(NULL); + + // test_process_info(); + + return 0; +} + +static void test_align() { + void* p = mi_malloc_aligned(256, 256); + if (((uintptr_t)p % 256) != 0) { + fprintf(stderr, "%p is not 256 alignend!\n", p); + } +} + +static void invalid_free() { + free((void*)0xBADBEEF); + realloc((void*)0xBADBEEF,10); +} + +static void block_overflow1() { + uint8_t* p = (uint8_t*)mi_malloc(17); + p[18] = 0; + free(p); +} + +// The double free samples come ArcHeap [1] by Insu Yun (issue #161) +// [1]: https://arxiv.org/pdf/1903.00503.pdf + +static void double_free1() { + void* p[256]; + //uintptr_t buf[256]; + + p[0] = mi_malloc(622616); + p[1] = mi_malloc(655362); + p[2] = mi_malloc(786432); + mi_free(p[2]); + // [VULN] Double free + mi_free(p[2]); + p[3] = mi_malloc(786456); + // [BUG] Found overlap + // p[3]=0x429b2ea2000 (size=917504), p[1]=0x429b2e42000 (size=786432) + fprintf(stderr, "p3: %p-%p, p1: %p-%p, p2: %p\n", p[3], (uint8_t*)(p[3]) + 786456, p[1], (uint8_t*)(p[1]) + 655362, p[2]); +} + +static void double_free2() { + void* p[256]; + //uintptr_t buf[256]; + // [INFO] Command buffer: 0x327b2000 + // [INFO] Input size: 182 + p[0] = malloc(712352); + p[1] = malloc(786432); + free(p[0]); + // [VULN] Double free + free(p[0]); + p[2] = malloc(786440); + p[3] = malloc(917504); + p[4] = malloc(786440); + // [BUG] Found overlap + // p[4]=0x433f1402000 (size=917504), p[1]=0x433f14c2000 (size=786432) + fprintf(stderr, "p1: %p-%p, p2: %p-%p\n", p[4], (uint8_t*)(p[4]) + 917504, p[1], (uint8_t*)(p[1]) + 786432); +} + + +// Try to corrupt the heap through buffer overflow +#define N 256 +#define SZ 64 + +static void corrupt_free() { + void* p[N]; + // allocate + for (int i = 0; i < N; i++) { + p[i] = malloc(SZ); + } + // free some + for (int i = 0; i < N; i += (N/10)) { + free(p[i]); + p[i] = NULL; + } + // try to corrupt the free list + for (int i = 0; i < N; i++) { + if (p[i] != NULL) { + memset(p[i], 0, SZ+8); + } + } + // allocate more.. trying to trigger an allocation from a corrupted entry + // this may need many allocations to get there (if at all) + for (int i = 0; i < 4096; i++) { + malloc(SZ); + } +} + +static void test_aslr(void) { + void* p[256]; + p[0] = malloc(378200); + p[1] = malloc(1134626); + printf("p1: %p, p2: %p\n", p[0], p[1]); +} + +static void test_process_info(void) { + size_t elapsed = 0; + size_t user_msecs = 0; + size_t system_msecs = 0; + size_t current_rss = 0; + size_t peak_rss = 0; + size_t current_commit = 0; + size_t peak_commit = 0; + size_t page_faults = 0; + for (int i = 0; i < 100000; i++) { + void* p = calloc(100,10); + free(p); + } + mi_process_info(&elapsed, &user_msecs, &system_msecs, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults); + printf("\n\n*** process info: elapsed %3zd.%03zd s, user: %3zd.%03zd s, rss: %zd b, commit: %zd b\n\n", elapsed/1000, elapsed%1000, user_msecs/1000, user_msecs%1000, peak_rss, peak_commit); +} + +static void test_reserved(void) { +#define KiB 1024ULL +#define MiB (KiB*KiB) +#define GiB (MiB*KiB) + mi_reserve_os_memory(4*GiB, false, true); + void* p1 = malloc(100); + void* p2 = malloc(100000); + void* p3 = malloc(2*GiB); + void* p4 = malloc(1*GiB + 100000); + free(p1); + free(p2); + free(p3); + p3 = malloc(1*GiB); + free(p4); +} + + + +static void negative_stat(void) { + int* p = mi_malloc(60000); + mi_stats_print_out(NULL, NULL); + *p = 100; + mi_free(p); + mi_stats_print_out(NULL, NULL); +} + +static void alloc_huge(void) { + void* p = mi_malloc(67108872); + mi_free(p); +} + +static bool test_visit(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) { + if (block == NULL) { + printf("visiting an area with blocks of size %zu (including padding)\n", area->full_block_size); + } + else { + printf(" block of size %zu (allocated size is %zu)\n", block_size, mi_usable_size(block)); + } + return true; +} + +static void test_heap_walk(void) { + mi_heap_t* heap = mi_heap_new(); + mi_heap_malloc(heap, 16*2097152); + mi_heap_malloc(heap, 2067152); + mi_heap_malloc(heap, 2097160); + mi_heap_malloc(heap, 24576); + mi_heap_visit_blocks(heap, true, &test_visit, NULL); +} + +static void test_heap_arena(void) { + mi_arena_id_t arena_id; + int err = mi_reserve_os_memory_ex(100 * 1024 * 1024, false /* commit */, false /* allow large */, true /* exclusive */, &arena_id); + if (err) abort(); + mi_heap_t* heap = mi_heap_new_in_arena(arena_id); + for (int i = 0; i < 500000; i++) { + void* p = mi_heap_malloc(heap, 1024); + if (p == NULL) { + printf("out of memory after %d kb (expecting about 100_000kb)\n", i); + break; + } + } +} + +// ---------------------------- +// bin size experiments +// ------------------------------ + +#if 0 +#include +#include + +#define MI_INTPTR_SIZE 8 +#define MI_LARGE_WSIZE_MAX (4*1024*1024 / MI_INTPTR_SIZE) + +#define MI_BIN_HUGE 100 +//#define MI_ALIGN2W + +// Bit scan reverse: return the index of the highest bit. +static inline uint8_t mi_bsr32(uint32_t x); + +#if defined(_MSC_VER) +#include +#include +static inline uint8_t mi_bsr32(uint32_t x) { + uint32_t idx; + _BitScanReverse((DWORD*)&idx, x); + return idx; +} +#elif defined(__GNUC__) || defined(__clang__) +static inline uint8_t mi_bsr32(uint32_t x) { + return (31 - __builtin_clz(x)); +} +#else +static inline uint8_t mi_bsr32(uint32_t x) { + // de Bruijn multiplication, see + static const uint8_t debruijn[32] = { + 31, 0, 22, 1, 28, 23, 18, 2, 29, 26, 24, 10, 19, 7, 3, 12, + 30, 21, 27, 17, 25, 9, 6, 11, 20, 16, 8, 5, 15, 4, 14, 13, + }; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x++; + return debruijn[(x*0x076be629) >> 27]; +} +#endif + +/* +// Bit scan reverse: return the index of the highest bit. +uint8_t _mi_bsr(uintptr_t x) { + if (x == 0) return 0; + #if MI_INTPTR_SIZE==8 + uint32_t hi = (x >> 32); + return (hi == 0 ? mi_bsr32((uint32_t)x) : 32 + mi_bsr32(hi)); + #elif MI_INTPTR_SIZE==4 + return mi_bsr32(x); + #else + # error "define bsr for non-32 or 64-bit platforms" + #endif +} +*/ + + +static inline size_t _mi_wsize_from_size(size_t size) { + return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t); +} + +// Return the bin for a given field size. +// Returns MI_BIN_HUGE if the size is too large. +// We use `wsize` for the size in "machine word sizes", +// i.e. byte size == `wsize*sizeof(void*)`. +extern inline uint8_t _mi_bin8(size_t size) { + size_t wsize = _mi_wsize_from_size(size); + uint8_t bin; + if (wsize <= 1) { + bin = 1; + } +#if defined(MI_ALIGN4W) + else if (wsize <= 4) { + bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + } +#elif defined(MI_ALIGN2W) + else if (wsize <= 8) { + bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + } +#else + else if (wsize <= 8) { + bin = (uint8_t)wsize; + } +#endif + else if (wsize > MI_LARGE_WSIZE_MAX) { + bin = MI_BIN_HUGE; + } + else { +#if defined(MI_ALIGN4W) + if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes +#endif + wsize--; + // find the highest bit + uint8_t b = mi_bsr32((uint32_t)wsize); + // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation). + // - adjust with 3 because we use do not round the first 8 sizes + // which each get an exact bin + bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3; + } + return bin; +} + +static inline uint8_t _mi_bin4(size_t size) { + size_t wsize = _mi_wsize_from_size(size); + uint8_t bin; + if (wsize <= 1) { + bin = 1; + } +#if defined(MI_ALIGN4W) + else if (wsize <= 4) { + bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + } +#elif defined(MI_ALIGN2W) + else if (wsize <= 8) { + bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + } +#else + else if (wsize <= 8) { + bin = (uint8_t)wsize; + } +#endif + else if (wsize > MI_LARGE_WSIZE_MAX) { + bin = MI_BIN_HUGE; + } + else { + uint8_t b = mi_bsr32((uint32_t)wsize); + bin = ((b << 1) + (uint8_t)((wsize >> (b - 1)) & 0x01)) + 3; + } + return bin; +} + +static size_t _mi_binx4(size_t bsize) { + if (bsize==0) return 0; + uint8_t b = mi_bsr32((uint32_t)bsize); + if (b <= 1) return bsize; + size_t bin = ((b << 1) | (bsize >> (b - 1))&0x01); + return bin; +} + +static size_t _mi_binx8(size_t bsize) { + if (bsize<=1) return bsize; + uint8_t b = mi_bsr32((uint32_t)bsize); + if (b <= 2) return bsize; + size_t bin = ((b << 2) | (bsize >> (b - 2))&0x03) - 5; + return bin; +} + +static void mi_bins(void) { + //printf(" QNULL(1), /* 0 */ \\\n "); + size_t last_bin = 0; + size_t min_bsize = 0; + size_t last_bsize = 0; + for (size_t bsize = 1; bsize < 2*1024; bsize++) { + size_t size = bsize * 64 * 1024; + size_t bin = _mi_binx8(bsize); + if (bin != last_bin) { + printf("min bsize: %6zd, max bsize: %6zd, bin: %6zd\n", min_bsize, last_bsize, last_bin); + //printf("QNULL(%6zd), ", wsize); + //if (last_bin%8 == 0) printf("/* %i */ \\\n ", last_bin); + last_bin = bin; + min_bsize = bsize; + } + last_bsize = bsize; + } +} +#endif diff --git a/yass/third_party/mimalloc/test/main-override.c b/yass/third_party/mimalloc/test/main-override.c new file mode 100644 index 0000000000..284fdd2040 --- /dev/null +++ b/yass/third_party/mimalloc/test/main-override.c @@ -0,0 +1,36 @@ +#include +#include +#include +#include + +#include + +int main() { + mi_version(); // ensure mimalloc library is linked + void* p1 = malloc(78); + void* p2 = malloc(24); + free(p1); + p1 = malloc(8); + //char* s = strdup("hello\n"); + free(p2); + p2 = malloc(16); + p1 = realloc(p1, 32); + free(p1); + free(p2); + //free(s); + //mi_collect(true); + + /* now test if override worked by allocating/freeing across the api's*/ + //p1 = mi_malloc(32); + //free(p1); + //p2 = malloc(32); + //mi_free(p2); + p1 = malloc(24); + p2 = reallocarray(p1, 16, 16); + free(p2); + p1 = malloc(24); + assert(reallocarr(&p1, 16, 16) == 0); + free(p1); + mi_stats_print(NULL); + return 0; +} diff --git a/yass/third_party/mimalloc/test/main-override.cpp b/yass/third_party/mimalloc/test/main-override.cpp new file mode 100644 index 0000000000..582f24ee92 --- /dev/null +++ b/yass/third_party/mimalloc/test/main-override.cpp @@ -0,0 +1,400 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef _WIN32 +#include +#endif + +#ifdef _WIN32 +#include +static void msleep(unsigned long msecs) { Sleep(msecs); } +#else +#include +static void msleep(unsigned long msecs) { usleep(msecs * 1000UL); } +#endif + +static void heap_thread_free_large(); // issue #221 +static void heap_no_delete(); // issue #202 +static void heap_late_free(); // issue #204 +static void padding_shrink(); // issue #209 +static void various_tests(); +static void test_mt_shutdown(); +static void large_alloc(void); // issue #363 +static void fail_aslr(); // issue #372 +static void tsan_numa_test(); // issue #414 +static void strdup_test(); // issue #445 +static void bench_alloc_large(void); // issue #xxx +//static void test_large_migrate(void); // issue #691 +static void heap_thread_free_huge(); +static void test_std_string(); // issue #697 + +static void test_stl_allocators(); + + +int main() { + // mi_stats_reset(); // ignore earlier allocations + + // test_std_string(); + // heap_thread_free_huge(); + /* + heap_thread_free_huge(); + heap_thread_free_large(); + heap_no_delete(); + heap_late_free(); + padding_shrink(); + various_tests(); + large_alloc(); + tsan_numa_test(); + strdup_test(); + */ + // test_stl_allocators(); + // test_mt_shutdown(); + // test_large_migrate(); + + //fail_aslr(); + // bench_alloc_large(); + // mi_stats_print(NULL); + return 0; +} + +static void* p = malloc(8); + +void free_p() { + free(p); + return; +} + +class Test { +private: + int i; +public: + Test(int x) { i = x; } + ~Test() { } +}; + + +static void various_tests() { + atexit(free_p); + void* p1 = malloc(78); + void* p2 = mi_malloc_aligned(24, 16); + free(p1); + p1 = malloc(8); + char* s = mi_strdup("hello\n"); + + mi_free(p2); + p2 = malloc(16); + p1 = realloc(p1, 32); + free(p1); + free(p2); + mi_free(s); + + Test* t = new Test(42); + delete t; + t = new (std::nothrow) Test(42); + delete t; + auto tbuf = new unsigned char[sizeof(Test)]; + t = new (tbuf) Test(42); + t->~Test(); + delete[] tbuf; +} + +class Static { +private: + void* p; +public: + Static() { + p = malloc(64); + return; + } + ~Static() { + free(p); + return; + } +}; + +static Static s = Static(); + + +static bool test_stl_allocator1() { + std::vector > vec; + vec.push_back(1); + vec.pop_back(); + return vec.size() == 0; +} + +struct some_struct { int i; int j; double z; }; + +static bool test_stl_allocator2() { + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +} + +#if MI_HAS_HEAP_STL_ALLOCATOR +static bool test_stl_allocator3() { + std::vector > vec; + vec.push_back(1); + vec.pop_back(); + return vec.size() == 0; +} + +static bool test_stl_allocator4() { + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +} + +static bool test_stl_allocator5() { + std::vector > vec; + vec.push_back(1); + vec.pop_back(); + return vec.size() == 0; +} + +static bool test_stl_allocator6() { + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +} +#endif + +static void test_stl_allocators() { + test_stl_allocator1(); + test_stl_allocator2(); +#if MI_HAS_HEAP_STL_ALLOCATOR + test_stl_allocator3(); + test_stl_allocator4(); + test_stl_allocator5(); + test_stl_allocator6(); +#endif +} + +#if 0 +// issue #691 +static char* cptr; + +static void* thread1_allocate() +{ + cptr = mi_calloc_tp(char,22085632); + return NULL; +} + +static void* thread2_free() +{ + assert(cptr); + mi_free(cptr); + cptr = NULL; + return NULL; +} + +static void test_large_migrate(void) { + auto t1 = std::thread(thread1_allocate); + t1.join(); + auto t2 = std::thread(thread2_free); + t2.join(); + /* + pthread_t thread1, thread2; + + pthread_create(&thread1, NULL, &thread1_allocate, NULL); + pthread_join(thread1, NULL); + + pthread_create(&thread2, NULL, &thread2_free, NULL); + pthread_join(thread2, NULL); + */ + return; +} +#endif + +// issue 445 +static void strdup_test() { +#ifdef _MSC_VER + char* s = _strdup("hello\n"); + char* buf = NULL; + size_t len; + _dupenv_s(&buf, &len, "MIMALLOC_VERBOSE"); + mi_free(buf); + mi_free(s); +#endif +} + +// Issue #202 +static void heap_no_delete_worker() { + mi_heap_t* heap = mi_heap_new(); + void* q = mi_heap_malloc(heap, 1024); (void)(q); + // mi_heap_delete(heap); // uncomment to prevent assertion +} + +static void heap_no_delete() { + auto t1 = std::thread(heap_no_delete_worker); + t1.join(); +} + + +// Issue #697 +static void test_std_string() { + std::string path = "/Users/xxxx/Library/Developer/Xcode/DerivedData/xxxxxxxxxx/Build/Intermediates.noindex/xxxxxxxxxxx/arm64/XX_lto.o/0.arm64.lto.o"; + std::string path1 = "/Users/xxxx/Library/Developer/Xcode/DerivedData/xxxxxxxxxx/Build/Intermediates.noindex/xxxxxxxxxxx/arm64/XX_lto.o/1.arm64.lto.o"; + std::cout << path + "\n>>> " + path1 + "\n>>> " << std::endl; +} + +// Issue #204 +static volatile void* global_p; + +static void t1main() { + mi_heap_t* heap = mi_heap_new(); + global_p = mi_heap_malloc(heap, 1024); + mi_heap_delete(heap); +} + +static void heap_late_free() { + auto t1 = std::thread(t1main); + + msleep(2000); + assert(global_p); + mi_free((void*)global_p); + + t1.join(); +} + +// issue #209 +static void* shared_p; +static void alloc0(/* void* arg */) +{ + shared_p = mi_malloc(8); +} + +static void padding_shrink(void) +{ + auto t1 = std::thread(alloc0); + t1.join(); + mi_free(shared_p); +} + + +// Issue #221 +static void heap_thread_free_large_worker() { + mi_free(shared_p); +} + +static void heap_thread_free_large() { + for (int i = 0; i < 100; i++) { + shared_p = mi_malloc_aligned(2 * 1024 * 1024 + 1, 8); + auto t1 = std::thread(heap_thread_free_large_worker); + t1.join(); + } +} + +static void heap_thread_free_huge_worker() { + mi_free(shared_p); +} + +static void heap_thread_free_huge() { + for (int i = 0; i < 100; i++) { + shared_p = mi_malloc(1024 * 1024 * 1024); + auto t1 = std::thread(heap_thread_free_huge_worker); + t1.join(); + } +} + +static void test_mt_shutdown() +{ + const int threads = 5; + std::vector< std::future< std::vector< char* > > > ts; + + auto fn = [&]() + { + std::vector< char* > ps; + ps.reserve(1000); + for (int i = 0; i < 1000; i++) + ps.emplace_back(new char[1]); + return ps; + }; + + for (int i = 0; i < threads; i++) + ts.emplace_back(std::async(std::launch::async, fn)); + + for (auto& f : ts) + for (auto& p : f.get()) + delete[] p; + + std::cout << "done" << std::endl; +} + +// issue #363 +using namespace std; + +void large_alloc(void) +{ + char* a = new char[1ull << 25]; + thread th([&] { + delete[] a; + }); + th.join(); +} + +// issue #372 +static void fail_aslr() { + size_t sz = (4ULL << 40); // 4TiB + void* p = malloc(sz); + printf("pointer p: %p: area up to %p\n", p, (uint8_t*)p + sz); + *(int*)0x5FFFFFFF000 = 0; // should segfault +} + +// issues #414 +static void dummy_worker() { + void* p = mi_malloc(0); + mi_free(p); +} + +static void tsan_numa_test() { + auto t1 = std::thread(dummy_worker); + dummy_worker(); + t1.join(); +} + +// issue #? +#include +#include +#include + +static void bench_alloc_large(void) { + static constexpr int kNumBuffers = 20; + static constexpr size_t kMinBufferSize = 5 * 1024 * 1024; + static constexpr size_t kMaxBufferSize = 25 * 1024 * 1024; + std::unique_ptr buffers[kNumBuffers]; + + std::random_device rd; (void)rd; + std::mt19937 gen(42); //rd()); + std::uniform_int_distribution<> size_distribution(kMinBufferSize, kMaxBufferSize); + std::uniform_int_distribution<> buf_number_distribution(0, kNumBuffers - 1); + + static constexpr int kNumIterations = 2000; + const auto start = std::chrono::steady_clock::now(); + for (int i = 0; i < kNumIterations; ++i) { + int buffer_idx = buf_number_distribution(gen); + size_t new_size = size_distribution(gen); + buffers[buffer_idx] = std::make_unique(new_size); + } + const auto end = std::chrono::steady_clock::now(); + const auto num_ms = std::chrono::duration_cast(end - start).count(); + const auto us_per_allocation = std::chrono::duration_cast(end - start).count() / kNumIterations; + std::cout << kNumIterations << " allocations Done in " << num_ms << "ms." << std::endl; + std::cout << "Avg " << us_per_allocation << " us per allocation" << std::endl; +} + diff --git a/yass/third_party/mimalloc/test/main.c b/yass/third_party/mimalloc/test/main.c new file mode 100644 index 0000000000..b148f71244 --- /dev/null +++ b/yass/third_party/mimalloc/test/main.c @@ -0,0 +1,46 @@ +#include +#include +#include + +void test_heap(void* p_out) { + mi_heap_t* heap = mi_heap_new(); + void* p1 = mi_heap_malloc(heap,32); + void* p2 = mi_heap_malloc(heap,48); + mi_free(p_out); + mi_heap_destroy(heap); + //mi_heap_delete(heap); mi_free(p1); mi_free(p2); +} + +void test_large() { + const size_t N = 1000; + + for (size_t i = 0; i < N; ++i) { + size_t sz = 1ull << 21; + char* a = mi_mallocn_tp(char,sz); + for (size_t k = 0; k < sz; k++) { a[k] = 'x'; } + mi_free(a); + } +} + +int main() { + void* p1 = mi_malloc(16); + void* p2 = mi_malloc(1000000); + mi_free(p1); + mi_free(p2); + p1 = mi_malloc(16); + p2 = mi_malloc(16); + mi_free(p1); + mi_free(p2); + + test_heap(mi_malloc(32)); + + p1 = mi_malloc_aligned(64, 16); + p2 = mi_malloc_aligned(160,24); + mi_free(p2); + mi_free(p1); + //test_large(); + + mi_collect(true); + mi_stats_print(NULL); + return 0; +} diff --git a/yass/third_party/mimalloc/test/readme.md b/yass/third_party/mimalloc/test/readme.md new file mode 100644 index 0000000000..db3524cd4f --- /dev/null +++ b/yass/third_party/mimalloc/test/readme.md @@ -0,0 +1,16 @@ +Testing allocators is difficult as bugs may only surface after particular +allocation patterns. The main approach to testing _mimalloc_ is therefore +to have extensive internal invariant checking (see `page_is_valid` in `page.c` +for example), which is enabled in debug mode with `-DMI_DEBUG_FULL=ON`. +The main testing strategy is then to run [`mimalloc-bench`][bench] using full +invariant checking to catch any potential problems over a wide range of intensive +allocation benchmarks and programs. + +However, this does not test well for the entire API surface and this is tested +with `test-api.c` when using `make test` (from `out/debug` etc). (This is +not complete yet, please add to it.) + +The `main.c` and `main-override.c` are there to test if building and overriding +from a local install works and therefore these build a separate `test/CMakeLists.txt`. + +[bench]: https://github.com/daanx/mimalloc-bench diff --git a/yass/third_party/mimalloc/test/test-api-fill.c b/yass/third_party/mimalloc/test/test-api-fill.c new file mode 100644 index 0000000000..3fca3b9d43 --- /dev/null +++ b/yass/third_party/mimalloc/test/test-api-fill.c @@ -0,0 +1,343 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/types.h" + +#include "testhelper.h" + +// --------------------------------------------------------------------------- +// Helper functions +// --------------------------------------------------------------------------- +bool check_zero_init(uint8_t* p, size_t size); +#if MI_DEBUG >= 2 +bool check_debug_fill_uninit(uint8_t* p, size_t size); +bool check_debug_fill_freed(uint8_t* p, size_t size); +#endif + +// --------------------------------------------------------------------------- +// Main testing +// --------------------------------------------------------------------------- +int main(void) { + mi_option_disable(mi_option_verbose); + + // --------------------------------------------------- + // Zeroing allocation + // --------------------------------------------------- + CHECK_BODY("zeroinit-zalloc-small") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc(zalloc_size); + result = check_zero_init(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-zalloc-large") { + size_t zalloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_zalloc(zalloc_size); + result = check_zero_init(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-zalloc_small") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc_small(zalloc_size); + result = check_zero_init(p, zalloc_size); + mi_free(p); + }; + + CHECK_BODY("zeroinit-calloc-small") { + size_t calloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_calloc(calloc_size, 1); + result = check_zero_init(p, calloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-calloc-large") { + size_t calloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_calloc(calloc_size, 1); + result = check_zero_init(p, calloc_size); + mi_free(p); + }; + + CHECK_BODY("zeroinit-rezalloc-small") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc(zalloc_size); + result = check_zero_init(p, zalloc_size); + zalloc_size *= 3; + p = (uint8_t*)mi_rezalloc(p, zalloc_size); + result &= check_zero_init(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-rezalloc-large") { + size_t zalloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_zalloc(zalloc_size); + result = check_zero_init(p, zalloc_size); + zalloc_size *= 3; + p = (uint8_t*)mi_rezalloc(p, zalloc_size); + result &= check_zero_init(p, zalloc_size); + mi_free(p); + }; + + CHECK_BODY("zeroinit-recalloc-small") { + size_t calloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_calloc(calloc_size, 1); + result = check_zero_init(p, calloc_size); + calloc_size *= 3; + p = (uint8_t*)mi_recalloc(p, calloc_size, 1); + result &= check_zero_init(p, calloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-recalloc-large") { + size_t calloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_calloc(calloc_size, 1); + result = check_zero_init(p, calloc_size); + calloc_size *= 3; + p = (uint8_t*)mi_recalloc(p, calloc_size, 1); + result &= check_zero_init(p, calloc_size); + mi_free(p); + }; + + // --------------------------------------------------- + // Zeroing in aligned API + // --------------------------------------------------- + CHECK_BODY("zeroinit-zalloc_aligned-small") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-zalloc_aligned-large") { + size_t zalloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, zalloc_size); + mi_free(p); + }; + + CHECK_BODY("zeroinit-calloc_aligned-small") { + size_t calloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_calloc_aligned(calloc_size, 1, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, calloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-calloc_aligned-large") { + size_t calloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_calloc_aligned(calloc_size, 1, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, calloc_size); + mi_free(p); + }; + + CHECK_BODY("zeroinit-rezalloc_aligned-small") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, zalloc_size); + zalloc_size *= 3; + p = (uint8_t*)mi_rezalloc_aligned(p, zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result &= check_zero_init(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-rezalloc_aligned-large") { + size_t zalloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, zalloc_size); + zalloc_size *= 3; + p = (uint8_t*)mi_rezalloc_aligned(p, zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result &= check_zero_init(p, zalloc_size); + mi_free(p); + }; + + CHECK_BODY("zeroinit-recalloc_aligned-small") { + size_t calloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_calloc_aligned(calloc_size, 1, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, calloc_size); + calloc_size *= 3; + p = (uint8_t*)mi_recalloc_aligned(p, calloc_size, 1, MI_MAX_ALIGN_SIZE * 2); + result &= check_zero_init(p, calloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-recalloc_aligned-large") { + size_t calloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_calloc_aligned(calloc_size, 1, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, calloc_size); + calloc_size *= 3; + p = (uint8_t*)mi_recalloc_aligned(p, calloc_size, 1, MI_MAX_ALIGN_SIZE * 2); + result &= check_zero_init(p, calloc_size); + mi_free(p); + }; + +#if (MI_DEBUG >= 2) && !MI_TSAN + // --------------------------------------------------- + // Debug filling + // --------------------------------------------------- + CHECK_BODY("uninit-malloc-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_malloc(malloc_size); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + CHECK_BODY("uninit-malloc-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_malloc(malloc_size); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + CHECK_BODY("uninit-malloc_small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_malloc_small(malloc_size); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + CHECK_BODY("uninit-realloc-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_malloc(malloc_size); + result = check_debug_fill_uninit(p, malloc_size); + malloc_size *= 3; + p = (uint8_t*)mi_realloc(p, malloc_size); + result &= check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + CHECK_BODY("uninit-realloc-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_malloc(malloc_size); + result = check_debug_fill_uninit(p, malloc_size); + malloc_size *= 3; + p = (uint8_t*)mi_realloc(p, malloc_size); + result &= check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + CHECK_BODY("uninit-mallocn-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_mallocn(malloc_size, 1); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + CHECK_BODY("uninit-mallocn-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_mallocn(malloc_size, 1); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + CHECK_BODY("uninit-reallocn-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_mallocn(malloc_size, 1); + result = check_debug_fill_uninit(p, malloc_size); + malloc_size *= 3; + p = (uint8_t*)mi_reallocn(p, malloc_size, 1); + result &= check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + CHECK_BODY("uninit-reallocn-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_mallocn(malloc_size, 1); + result = check_debug_fill_uninit(p, malloc_size); + malloc_size *= 3; + p = (uint8_t*)mi_reallocn(p, malloc_size, 1); + result &= check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + CHECK_BODY("uninit-malloc_aligned-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_malloc_aligned(malloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + CHECK_BODY("uninit-malloc_aligned-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_malloc_aligned(malloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + CHECK_BODY("uninit-realloc_aligned-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_malloc_aligned(malloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_debug_fill_uninit(p, malloc_size); + malloc_size *= 3; + p = (uint8_t*)mi_realloc_aligned(p, malloc_size, MI_MAX_ALIGN_SIZE * 2); + result &= check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + CHECK_BODY("uninit-realloc_aligned-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_malloc_aligned(malloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_debug_fill_uninit(p, malloc_size); + malloc_size *= 3; + p = (uint8_t*)mi_realloc_aligned(p, malloc_size, MI_MAX_ALIGN_SIZE * 2); + result &= check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + #if !(MI_TRACK_VALGRIND || MI_TRACK_ASAN) + CHECK_BODY("fill-freed-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_malloc(malloc_size); + mi_free(p); + // First sizeof(void*) bytes will contain housekeeping data, skip these + result = check_debug_fill_freed(p + sizeof(void*), malloc_size - sizeof(void*)); + }; + CHECK_BODY("fill-freed-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_malloc(malloc_size); + mi_free(p); + // First sizeof(void*) bytes will contain housekeeping data, skip these + result = check_debug_fill_freed(p + sizeof(void*), malloc_size - sizeof(void*)); + }; + #endif +#endif + + // --------------------------------------------------- + // Done + // ---------------------------------------------------[] + return print_test_summary(); +} + +// --------------------------------------------------------------------------- +// Helper functions +// --------------------------------------------------------------------------- +bool check_zero_init(uint8_t* p, size_t size) { + if(!p) + return false; + bool result = true; + for (size_t i = 0; i < size; ++i) { + result &= p[i] == 0; + } + return result; +} + +#if MI_DEBUG >= 2 +bool check_debug_fill_uninit(uint8_t* p, size_t size) { +#if MI_TRACK_VALGRIND || MI_TRACK_ASAN + (void)p; (void)size; + return true; // when compiled with valgrind we don't init on purpose +#else + if(!p) + return false; + + bool result = true; + for (size_t i = 0; i < size; ++i) { + result &= p[i] == MI_DEBUG_UNINIT; + } + return result; +#endif +} + +bool check_debug_fill_freed(uint8_t* p, size_t size) { +#if MI_TRACK_VALGRIND + (void)p; (void)size; + return true; // when compiled with valgrind we don't fill on purpose +#else + if(!p) + return false; + + bool result = true; + for (size_t i = 0; i < size; ++i) { + result &= p[i] == MI_DEBUG_FREED; + } + return result; +#endif +} +#endif diff --git a/yass/third_party/mimalloc/test/test-api.c b/yass/third_party/mimalloc/test/test-api.c new file mode 100644 index 0000000000..76101980be --- /dev/null +++ b/yass/third_party/mimalloc/test/test-api.c @@ -0,0 +1,451 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic ignored "-Walloc-size-larger-than=" +#endif + +/* +Testing allocators is difficult as bugs may only surface after particular +allocation patterns. The main approach to testing _mimalloc_ is therefore +to have extensive internal invariant checking (see `page_is_valid` in `page.c` +for example), which is enabled in debug mode with `-DMI_DEBUG_FULL=ON`. +The main testing is then to run `mimalloc-bench` [1] using full invariant checking +to catch any potential problems over a wide range of intensive allocation bench +marks. + +However, this does not test well for the entire API surface. In this test file +we therefore test the API over various inputs. Please add more tests :-) + +[1] https://github.com/daanx/mimalloc-bench +*/ + +#include +#include +#include +#include + +#ifdef __cplusplus +#include +#endif + +#include "mimalloc.h" +// #include "mimalloc/internal.h" +#include "mimalloc/types.h" // for MI_DEBUG and MI_BLOCK_ALIGNMENT_MAX + +#include "testhelper.h" + +// --------------------------------------------------------------------------- +// Test functions +// --------------------------------------------------------------------------- +bool test_heap1(void); +bool test_heap2(void); +bool test_stl_allocator1(void); +bool test_stl_allocator2(void); + +bool test_stl_heap_allocator1(void); +bool test_stl_heap_allocator2(void); +bool test_stl_heap_allocator3(void); +bool test_stl_heap_allocator4(void); + +bool mem_is_zero(uint8_t* p, size_t size) { + if (p==NULL) return false; + for (size_t i = 0; i < size; ++i) { + if (p[i] != 0) return false; + } + return true; +} + +// --------------------------------------------------------------------------- +// Main testing +// --------------------------------------------------------------------------- +int main(void) { + mi_option_disable(mi_option_verbose); + + // --------------------------------------------------- + // Malloc + // --------------------------------------------------- + + CHECK_BODY("malloc-zero") { + void* p = mi_malloc(0); + result = (p != NULL); + mi_free(p); + }; + CHECK_BODY("malloc-nomem1") { + result = (mi_malloc((size_t)PTRDIFF_MAX + (size_t)1) == NULL); + }; + CHECK_BODY("malloc-null") { + mi_free(NULL); + }; + CHECK_BODY("calloc-overflow") { + // use (size_t)&mi_calloc to get some number without triggering compiler warnings + result = (mi_calloc((size_t)&mi_calloc,SIZE_MAX/1000) == NULL); + }; + CHECK_BODY("calloc0") { + void* p = mi_calloc(0,1000); + result = (mi_usable_size(p) <= 16); + mi_free(p); + }; + CHECK_BODY("malloc-large") { // see PR #544. + void* p = mi_malloc(67108872); + mi_free(p); + }; + + // --------------------------------------------------- + // Extended + // --------------------------------------------------- + CHECK_BODY("posix_memalign1") { + void* p = &p; + int err = mi_posix_memalign(&p, sizeof(void*), 32); + result = ((err==0 && (uintptr_t)p % sizeof(void*) == 0) || p==&p); + mi_free(p); + }; + CHECK_BODY("posix_memalign_no_align") { + void* p = &p; + int err = mi_posix_memalign(&p, 3, 32); + result = (err==EINVAL && p==&p); + }; + CHECK_BODY("posix_memalign_zero") { + void* p = &p; + int err = mi_posix_memalign(&p, sizeof(void*), 0); + mi_free(p); + result = (err==0); + }; + CHECK_BODY("posix_memalign_nopow2") { + void* p = &p; + int err = mi_posix_memalign(&p, 3*sizeof(void*), 32); + result = (err==EINVAL && p==&p); + }; + CHECK_BODY("posix_memalign_nomem") { + void* p = &p; + int err = mi_posix_memalign(&p, sizeof(void*), SIZE_MAX); + result = (err==ENOMEM && p==&p); + }; + + // --------------------------------------------------- + // Aligned API + // --------------------------------------------------- + CHECK_BODY("malloc-aligned1") { + void* p = mi_malloc_aligned(32,32); result = (p != NULL && (uintptr_t)(p) % 32 == 0); mi_free(p); + }; + CHECK_BODY("malloc-aligned2") { + void* p = mi_malloc_aligned(48,32); result = (p != NULL && (uintptr_t)(p) % 32 == 0); mi_free(p); + }; + CHECK_BODY("malloc-aligned3") { + void* p1 = mi_malloc_aligned(48,32); bool result1 = (p1 != NULL && (uintptr_t)(p1) % 32 == 0); + void* p2 = mi_malloc_aligned(48,32); bool result2 = (p2 != NULL && (uintptr_t)(p2) % 32 == 0); + mi_free(p2); + mi_free(p1); + result = (result1&&result2); + }; + CHECK_BODY("malloc-aligned4") { + void* p; + bool ok = true; + for (int i = 0; i < 8 && ok; i++) { + p = mi_malloc_aligned(8, 16); + ok = (p != NULL && (uintptr_t)(p) % 16 == 0); mi_free(p); + } + result = ok; + }; + CHECK_BODY("malloc-aligned5") { + void* p = mi_malloc_aligned(4097,4096); + size_t usable = mi_usable_size(p); + result = (usable >= 4097 && usable < 16000); + printf("malloc_aligned5: usable size: %zi\n", usable); + mi_free(p); + }; + CHECK_BODY("malloc-aligned6") { + bool ok = true; + for (size_t align = 1; align <= MI_BLOCK_ALIGNMENT_MAX && ok; align *= 2) { + void* ps[8]; + for (int i = 0; i < 8 && ok; i++) { + ps[i] = mi_malloc_aligned(align*13 // size + , align); + if (ps[i] == NULL || (uintptr_t)(ps[i]) % align != 0) { + ok = false; + } + } + for (int i = 0; i < 8 && ok; i++) { + mi_free(ps[i]); + } + } + result = ok; + }; + CHECK_BODY("malloc-aligned7") { + void* p = mi_malloc_aligned(1024,MI_BLOCK_ALIGNMENT_MAX); + mi_free(p); + result = ((uintptr_t)p % MI_BLOCK_ALIGNMENT_MAX) == 0; + }; + CHECK_BODY("malloc-aligned8") { + bool ok = true; + for (int i = 0; i < 5 && ok; i++) { + int n = (1 << i); + void* p = mi_malloc_aligned(1024, n * MI_BLOCK_ALIGNMENT_MAX); + ok = ((uintptr_t)p % (n*MI_BLOCK_ALIGNMENT_MAX)) == 0; + mi_free(p); + } + result = ok; + }; + CHECK_BODY("malloc-aligned9") { + bool ok = true; + void* p[8]; + size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1, 2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX, 0 }; + for (int i = 0; i < 28 && ok; i++) { + int align = (1 << i); + for (int j = 0; j < 8 && ok; j++) { + p[j] = mi_zalloc_aligned(sizes[j], align); + ok = ((uintptr_t)p[j] % align) == 0; + } + for (int j = 0; j < 8; j++) { + mi_free(p[j]); + } + } + result = ok; + }; + CHECK_BODY("malloc-aligned10") { + bool ok = true; + void* p[10+1]; + int align; + int j; + for(j = 0, align = 1; j <= 10 && ok; align *= 2, j++ ) { + p[j] = mi_malloc_aligned(43 + align, align); + ok = ((uintptr_t)p[j] % align) == 0; + } + for ( ; j > 0; j--) { + mi_free(p[j-1]); + } + result = ok; + } + CHECK_BODY("malloc_aligned11") { + mi_heap_t* heap = mi_heap_new(); + void* p = mi_heap_malloc_aligned(heap, 33554426, 8); + result = mi_heap_contains_block(heap, p); + mi_heap_destroy(heap); + } + CHECK_BODY("mimalloc-aligned12") { + void* p = mi_malloc_aligned(0x100, 0x100); + result = (((uintptr_t)p % 0x100) == 0); // #602 + mi_free(p); + } + CHECK_BODY("mimalloc-aligned13") { + bool ok = true; + for( size_t size = 1; size <= (MI_SMALL_SIZE_MAX * 2) && ok; size++ ) { + for(size_t align = 1; align <= size && ok; align *= 2 ) { + void* p[10]; + for(int i = 0; i < 10 && ok; i++) { + p[i] = mi_malloc_aligned(size,align);; + ok = (p[i] != NULL && ((uintptr_t)(p[i]) % align) == 0); + } + for(int i = 0; i < 10 && ok; i++) { + mi_free(p[i]); + } + /* + if (ok && align <= size && ((size + MI_PADDING_SIZE) & (align-1)) == 0) { + size_t bsize = mi_good_size(size); + ok = (align <= bsize && (bsize & (align-1)) == 0); + } + */ + } + } + result = ok; + } + CHECK_BODY("malloc-aligned-at1") { + void* p = mi_malloc_aligned_at(48,32,0); result = (p != NULL && ((uintptr_t)(p) + 0) % 32 == 0); mi_free(p); + }; + CHECK_BODY("malloc-aligned-at2") { + void* p = mi_malloc_aligned_at(50,32,8); result = (p != NULL && ((uintptr_t)(p) + 8) % 32 == 0); mi_free(p); + }; + CHECK_BODY("memalign1") { + void* p; + bool ok = true; + for (int i = 0; i < 8 && ok; i++) { + p = mi_memalign(16,8); + ok = (p != NULL && (uintptr_t)(p) % 16 == 0); mi_free(p); + } + result = ok; + }; + CHECK_BODY("zalloc-aligned-small1") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = mem_is_zero(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("rezalloc_aligned-small1") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = mem_is_zero(p, zalloc_size); + zalloc_size *= 3; + p = (uint8_t*)mi_rezalloc_aligned(p, zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = result && mem_is_zero(p, zalloc_size); + mi_free(p); + }; + + // --------------------------------------------------- + // Reallocation + // --------------------------------------------------- + CHECK_BODY("realloc-null") { + void* p = mi_realloc(NULL,4); + result = (p != NULL); + mi_free(p); + }; + + CHECK_BODY("realloc-null-sizezero") { + void* p = mi_realloc(NULL,0); // "If ptr is NULL, the behavior is the same as calling malloc(new_size)." + result = (p != NULL); + mi_free(p); + }; + + CHECK_BODY("realloc-sizezero") { + void* p = mi_malloc(4); + void* q = mi_realloc(p, 0); + result = (q != NULL); + mi_free(q); + }; + + CHECK_BODY("reallocarray-null-sizezero") { + void* p = mi_reallocarray(NULL,0,16); // issue #574 + result = (p != NULL && errno == 0); + mi_free(p); + }; + + // --------------------------------------------------- + // Heaps + // --------------------------------------------------- + CHECK("heap_destroy", test_heap1()); + CHECK("heap_delete", test_heap2()); + + //mi_stats_print(NULL); + + // --------------------------------------------------- + // various + // --------------------------------------------------- + #if !defined(MI_TRACK_ASAN) // realpath may leak with ASAN enabled (as the ASAN allocator intercepts it) + CHECK_BODY("realpath") { + char* s = mi_realpath( ".", NULL ); + // printf("realpath: %s\n",s); + mi_free(s); + }; + #endif + + CHECK("stl_allocator1", test_stl_allocator1()); + CHECK("stl_allocator2", test_stl_allocator2()); + + CHECK("stl_heap_allocator1", test_stl_heap_allocator1()); + CHECK("stl_heap_allocator2", test_stl_heap_allocator2()); + CHECK("stl_heap_allocator3", test_stl_heap_allocator3()); + CHECK("stl_heap_allocator4", test_stl_heap_allocator4()); + + // --------------------------------------------------- + // Done + // ---------------------------------------------------[] + return print_test_summary(); +} + +// --------------------------------------------------- +// Larger test functions +// --------------------------------------------------- + +bool test_heap1(void) { + mi_heap_t* heap = mi_heap_new(); + int* p1 = mi_heap_malloc_tp(heap,int); + int* p2 = mi_heap_malloc_tp(heap,int); + *p1 = *p2 = 43; + mi_heap_destroy(heap); + return true; +} + +bool test_heap2(void) { + mi_heap_t* heap = mi_heap_new(); + int* p1 = mi_heap_malloc_tp(heap,int); + int* p2 = mi_heap_malloc_tp(heap,int); + mi_heap_delete(heap); + *p1 = 42; + mi_free(p1); + mi_free(p2); + return true; +} + +bool test_stl_allocator1(void) { +#ifdef __cplusplus + std::vector > vec; + vec.push_back(1); + vec.pop_back(); + return vec.size() == 0; +#else + return true; +#endif +} + +struct some_struct { int i; int j; double z; }; + +bool test_stl_allocator2(void) { +#ifdef __cplusplus + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +#else + return true; +#endif +} + +bool test_stl_heap_allocator1(void) { +#ifdef __cplusplus + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +#else + return true; +#endif +} + +bool test_stl_heap_allocator2(void) { +#ifdef __cplusplus + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +#else + return true; +#endif +} + +bool test_stl_heap_allocator3(void) { +#ifdef __cplusplus + mi_heap_t* heap = mi_heap_new(); + bool good = false; + { + mi_heap_stl_allocator myAlloc(heap); + std::vector > vec(myAlloc); + vec.push_back(some_struct()); + vec.pop_back(); + good = vec.size() == 0; + } + mi_heap_delete(heap); + return good; +#else + return true; +#endif +} + +bool test_stl_heap_allocator4(void) { +#ifdef __cplusplus + mi_heap_t* heap = mi_heap_new(); + bool good = false; + { + mi_heap_destroy_stl_allocator myAlloc(heap); + std::vector > vec(myAlloc); + vec.push_back(some_struct()); + vec.pop_back(); + good = vec.size() == 0; + } + mi_heap_destroy(heap); + return good; +#else + return true; +#endif +} diff --git a/yass/third_party/mimalloc/test/test-stress.c b/yass/third_party/mimalloc/test/test-stress.c new file mode 100644 index 0000000000..15d0e25bf0 --- /dev/null +++ b/yass/third_party/mimalloc/test/test-stress.c @@ -0,0 +1,364 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. +-----------------------------------------------------------------------------*/ + +/* This is a stress test for the allocator, using multiple threads and + transferring objects between threads. It tries to reflect real-world workloads: + - allocation size is distributed linearly in powers of two + - with some fraction extra large (and some very large) + - the allocations are initialized and read again at free + - pointers transfer between threads + - threads are terminated and recreated with some objects surviving in between + - uses deterministic "randomness", but execution can still depend on + (random) thread scheduling. Do not use this test as a benchmark! +*/ + +#include +#include +#include +#include +#include +#include + +// > mimalloc-test-stress [THREADS] [SCALE] [ITER] +// +// argument defaults +static int THREADS = 32; // more repeatable if THREADS <= #processors +static int SCALE = 25; // scaling factor + +#if defined(MI_TSAN) +static int ITER = 10; // N full iterations destructing and re-creating all threads (on tsan reduce for azure pipeline limits) +#else +static int ITER = 50; // N full iterations destructing and re-creating all threads +#endif + +// static int THREADS = 8; // more repeatable if THREADS <= #processors +// static int SCALE = 100; // scaling factor + +#define STRESS // undefine for leak test + +static bool allow_large_objects = true; // allow very large objects? (set to `true` if SCALE>100) +static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`? + +static bool main_participates = false; // main thread participates as a worker too + +// #define USE_STD_MALLOC +#ifdef USE_STD_MALLOC +#define custom_calloc(n,s) calloc(n,s) +#define custom_realloc(p,s) realloc(p,s) +#define custom_free(p) free(p) +#else +#include +#define custom_calloc(n,s) mi_calloc(n,s) +#define custom_realloc(p,s) mi_realloc(p,s) +#define custom_free(p) mi_free(p) +#endif + +// transfer pointer between threads +#define TRANSFERS (1000) +static volatile void* transfer[TRANSFERS]; + + +#if (UINTPTR_MAX != UINT32_MAX) +const uintptr_t cookie = 0xbf58476d1ce4e5b9UL; +#else +const uintptr_t cookie = 0x1ce4e5b9UL; +#endif + +static void* atomic_exchange_ptr(volatile void** p, void* newval); + +typedef uintptr_t* random_t; + +static uintptr_t pick(random_t r) { + uintptr_t x = *r; +#if (UINTPTR_MAX > UINT32_MAX) + // by Sebastiano Vigna, see: + x ^= x >> 30; + x *= 0xbf58476d1ce4e5b9UL; + x ^= x >> 27; + x *= 0x94d049bb133111ebUL; + x ^= x >> 31; +#else + // by Chris Wellons, see: + x ^= x >> 16; + x *= 0x7feb352dUL; + x ^= x >> 15; + x *= 0x846ca68bUL; + x ^= x >> 16; +#endif + *r = x; + return x; +} + +static bool chance(size_t perc, random_t r) { + return (pick(r) % 100 <= perc); +} + +static void* alloc_items(size_t items, random_t r) { + if (chance(1, r)) { + if (chance(1, r) && allow_large_objects) items *= 10000; // 0.01% giant + else if (chance(10, r) && allow_large_objects) items *= 1000; // 0.1% huge + else items *= 100; // 1% large objects; + } + if (items == 40) items++; // pthreads uses that size for stack increases + if (use_one_size > 0) items = (use_one_size / sizeof(uintptr_t)); + if (items==0) items = 1; + uintptr_t* p = (uintptr_t*)custom_calloc(items,sizeof(uintptr_t)); + if (p != NULL) { + for (uintptr_t i = 0; i < items; i++) { + assert(p[i] == 0); + p[i] = (items - i) ^ cookie; + } + } + return p; +} + +static void free_items(void* p) { + if (p != NULL) { + uintptr_t* q = (uintptr_t*)p; + uintptr_t items = (q[0] ^ cookie); + for (uintptr_t i = 0; i < items; i++) { + if ((q[i] ^ cookie) != items - i) { + fprintf(stderr, "memory corruption at block %p at %zu\n", p, i); + abort(); + } + } + } + custom_free(p); +} + + +static void stress(intptr_t tid) { + //bench_start_thread(); + uintptr_t r = ((tid + 1) * 43); // rand(); + const size_t max_item_shift = 5; // 128 + const size_t max_item_retained_shift = max_item_shift + 2; + size_t allocs = 100 * ((size_t)SCALE) * (tid % 8 + 1); // some threads do more + size_t retain = allocs / 2; + void** data = NULL; + size_t data_size = 0; + size_t data_top = 0; + void** retained = (void**)custom_calloc(retain,sizeof(void*)); + size_t retain_top = 0; + + while (allocs > 0 || retain > 0) { + if (retain == 0 || (chance(50, &r) && allocs > 0)) { + // 50%+ alloc + allocs--; + if (data_top >= data_size) { + data_size += 100000; + data = (void**)custom_realloc(data, data_size * sizeof(void*)); + } + data[data_top++] = alloc_items(1ULL << (pick(&r) % max_item_shift), &r); + } + else { + // 25% retain + retained[retain_top++] = alloc_items( 1ULL << (pick(&r) % max_item_retained_shift), &r); + retain--; + } + if (chance(66, &r) && data_top > 0) { + // 66% free previous alloc + size_t idx = pick(&r) % data_top; + free_items(data[idx]); + data[idx] = NULL; + } + if (chance(25, &r) && data_top > 0) { + // 25% exchange a local pointer with the (shared) transfer buffer. + size_t data_idx = pick(&r) % data_top; + size_t transfer_idx = pick(&r) % TRANSFERS; + void* p = data[data_idx]; + void* q = atomic_exchange_ptr(&transfer[transfer_idx], p); + data[data_idx] = q; + } + } + // free everything that is left + for (size_t i = 0; i < retain_top; i++) { + free_items(retained[i]); + } + for (size_t i = 0; i < data_top; i++) { + free_items(data[i]); + } + custom_free(retained); + custom_free(data); + //bench_end_thread(); +} + +static void run_os_threads(size_t nthreads, void (*entry)(intptr_t tid)); + +static void test_stress(void) { + uintptr_t r = rand(); + for (int n = 0; n < ITER; n++) { + run_os_threads(THREADS, &stress); + for (int i = 0; i < TRANSFERS; i++) { + if (chance(50, &r) || n + 1 == ITER) { // free all on last run, otherwise free half of the transfers + void* p = atomic_exchange_ptr(&transfer[i], NULL); + free_items(p); + } + } + #ifndef NDEBUG + //mi_collect(false); + //mi_debug_show_arenas(); + #endif + #if !defined(NDEBUG) || defined(MI_TSAN) + if ((n + 1) % 10 == 0) { printf("- iterations left: %3d\n", ITER - (n + 1)); } + #endif + } +} + +#ifndef STRESS +static void leak(intptr_t tid) { + uintptr_t r = rand(); + void* p = alloc_items(1 /*pick(&r)%128*/, &r); + if (chance(50, &r)) { + intptr_t i = (pick(&r) % TRANSFERS); + void* q = atomic_exchange_ptr(&transfer[i], p); + free_items(q); + } +} + +static void test_leak(void) { + for (int n = 0; n < ITER; n++) { + run_os_threads(THREADS, &leak); + mi_collect(false); +#ifndef NDEBUG + if ((n + 1) % 10 == 0) { printf("- iterations left: %3d\n", ITER - (n + 1)); } +#endif + } +} +#endif + +int main(int argc, char** argv) { + #ifndef USE_STD_MALLOC + mi_stats_reset(); + #endif + + // > mimalloc-test-stress [THREADS] [SCALE] [ITER] + if (argc >= 2) { + char* end; + long n = strtol(argv[1], &end, 10); + if (n > 0) THREADS = n; + } + if (argc >= 3) { + char* end; + long n = (strtol(argv[2], &end, 10)); + if (n > 0) SCALE = n; + } + if (argc >= 4) { + char* end; + long n = (strtol(argv[3], &end, 10)); + if (n > 0) ITER = n; + } + if (SCALE > 100) { + allow_large_objects = true; + } + printf("Using %d threads with a %d%% load-per-thread and %d iterations %s\n", THREADS, SCALE, ITER, (allow_large_objects ? "(allow large objects)" : "")); + //mi_reserve_os_memory(1024*1024*1024ULL, false, true); + //int res = mi_reserve_huge_os_pages(4,1); + //printf("(reserve huge: %i\n)", res); + + //bench_start_program(); + + // Run ITER full iterations where half the objects in the transfer buffer survive to the next round. + srand(0x7feb352d); + + //mi_reserve_os_memory(512ULL << 20, true, true); + +#if !defined(NDEBUG) && !defined(USE_STD_MALLOC) + mi_stats_reset(); +#endif + +#ifdef STRESS + test_stress(); +#else + test_leak(); +#endif + +#ifndef USE_STD_MALLOC + #ifndef NDEBUG + // mi_collect(true); + mi_debug_show_arenas(true,true,true); + #endif + mi_stats_print(NULL); +#endif + //bench_end_program(); + return 0; +} + + +static void (*thread_entry_fun)(intptr_t) = &stress; + +#ifdef _WIN32 + +#include + +static DWORD WINAPI thread_entry(LPVOID param) { + thread_entry_fun((intptr_t)param); + return 0; +} + +static void run_os_threads(size_t nthreads, void (*fun)(intptr_t)) { + thread_entry_fun = fun; + DWORD* tids = (DWORD*)custom_calloc(nthreads,sizeof(DWORD)); + HANDLE* thandles = (HANDLE*)custom_calloc(nthreads,sizeof(HANDLE)); + const size_t start = (main_participates ? 1 : 0); + for (size_t i = start; i < nthreads; i++) { + thandles[i] = CreateThread(0, 8*1024, &thread_entry, (void*)(i), 0, &tids[i]); + } + if (main_participates) fun(0); // run the main thread as well + for (size_t i = start; i < nthreads; i++) { + WaitForSingleObject(thandles[i], INFINITE); + } + for (size_t i = start; i < nthreads; i++) { + CloseHandle(thandles[i]); + } + custom_free(tids); + custom_free(thandles); +} + +static void* atomic_exchange_ptr(volatile void** p, void* newval) { +#if (INTPTR_MAX == INT32_MAX) + return (void*)InterlockedExchange((volatile LONG*)p, (LONG)newval); +#else + return (void*)InterlockedExchange64((volatile LONG64*)p, (LONG64)newval); +#endif +} +#else + +#include + +static void* thread_entry(void* param) { + thread_entry_fun((uintptr_t)param); + return NULL; +} + +static void run_os_threads(size_t nthreads, void (*fun)(intptr_t)) { + thread_entry_fun = fun; + pthread_t* threads = (pthread_t*)custom_calloc(nthreads,sizeof(pthread_t)); + memset(threads, 0, sizeof(pthread_t) * nthreads); + const size_t start = (main_participates ? 1 : 0); + //pthread_setconcurrency(nthreads); + for (size_t i = start; i < nthreads; i++) { + pthread_create(&threads[i], NULL, &thread_entry, (void*)i); + } + if (main_participates) fun(0); // run the main thread as well + for (size_t i = start; i < nthreads; i++) { + pthread_join(threads[i], NULL); + } + custom_free(threads); +} + +#ifdef __cplusplus +#include +static void* atomic_exchange_ptr(volatile void** p, void* newval) { + return std::atomic_exchange((volatile std::atomic*)p, newval); +} +#else +#include +static void* atomic_exchange_ptr(volatile void** p, void* newval) { + return atomic_exchange((volatile _Atomic(void*)*)p, newval); +} +#endif + +#endif diff --git a/yass/third_party/mimalloc/test/test-wrong.c b/yass/third_party/mimalloc/test/test-wrong.c new file mode 100644 index 0000000000..56a2339a75 --- /dev/null +++ b/yass/third_party/mimalloc/test/test-wrong.c @@ -0,0 +1,92 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* test file for valgrind/asan support. + + VALGRIND: + ---------- + Compile in an "out/debug" folder: + + > cd out/debug + > cmake ../.. -DMI_TRACK_VALGRIND=1 + > make -j8 + + and then compile this file as: + + > gcc -g -o test-wrong -I../../include ../../test/test-wrong.c libmimalloc-valgrind-debug.a -lpthread + + and test as: + + > valgrind ./test-wrong + + + ASAN + ---------- + Compile in an "out/debug" folder: + + > cd out/debug + > cmake ../.. -DMI_TRACK_ASAN=1 + > make -j8 + + and then compile this file as: + + > clang -g -o test-wrong -I../../include ../../test/test-wrong.c libmimalloc-asan-debug.a -lpthread -fsanitize=address -fsanitize-recover=address + + and test as: + + > ASAN_OPTIONS=verbosity=1:halt_on_error=0 ./test-wrong + + +*/ +#include +#include +#include "mimalloc.h" + +#ifdef USE_STD_MALLOC +# define mi(x) x +#else +# define mi(x) mi_##x +#endif + +int main(int argc, char** argv) { + int* p = (int*)mi(malloc)(3*sizeof(int)); + + int* r = (int*)mi_malloc_aligned(8,16); + mi_free(r); + + // illegal byte wise read + char* c = (char*)mi(malloc)(3); + printf("invalid byte: over: %d, under: %d\n", c[4], c[-1]); + mi(free)(c); + + // undefined access + int* q = (int*)mi(malloc)(sizeof(int)); + printf("undefined: %d\n", *q); + + // illegal int read + printf("invalid: over: %d, under: %d\n", q[1], q[-1]); + + *q = 42; + + // buffer overflow + q[1] = 43; + + // buffer underflow + q[-1] = 44; + + mi(free)(q); + + // double free + mi(free)(q); + + // use after free + printf("use-after-free: %d\n", *q); + + // leak p + // mi_free(p) + return 0; +} \ No newline at end of file diff --git a/yass/third_party/mimalloc/test/testhelper.h b/yass/third_party/mimalloc/test/testhelper.h new file mode 100644 index 0000000000..a972758411 --- /dev/null +++ b/yass/third_party/mimalloc/test/testhelper.h @@ -0,0 +1,49 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#ifndef TESTHELPER_H_ +#define TESTHELPER_H_ + +#include +#include +#include + +// --------------------------------------------------------------------------- +// Test macros: CHECK(name,predicate) and CHECK_BODY(name,body) +// --------------------------------------------------------------------------- +static int ok = 0; +static int failed = 0; + +static bool check_result(bool result, const char* testname, const char* fname, long lineno) { + if (!(result)) { + failed++; + fprintf(stderr,"\n FAILED: %s: %s:%ld\n", testname, fname, lineno); + /* exit(1); */ + } + else { + ok++; + fprintf(stderr, "ok.\n"); + } + return true; +} + +#define CHECK_BODY(name) \ + fprintf(stderr,"test: %s... ", name ); \ + errno = 0; \ + for(bool done = false, result = true; !done; done = check_result(result,name,__FILE__,__LINE__)) + +#define CHECK(name,expr) CHECK_BODY(name){ result = (expr); } + +// Print summary of test. Return value can be directly use as a return value for main(). +static inline int print_test_summary(void) +{ + fprintf(stderr,"\n\n---------------------------------------------\n" + "succeeded: %i\n" + "failed : %i\n\n", ok, failed); + return failed; +} + +#endif // TESTHELPER_H_ diff --git a/yass/tools/build.go b/yass/tools/build.go index cb3b77abd7..a9c514a28c 100644 --- a/yass/tools/build.go +++ b/yass/tools/build.go @@ -56,7 +56,6 @@ var clangPath string var useLibCxxFlag bool var enableLtoFlag bool var useMoldFlag bool -var useTcmallocFlag bool var clangTidyModeFlag bool var clangTidyExecutablePathFlag string @@ -170,7 +169,6 @@ func InitFlag() { flag.BoolVar(&useLibCxxFlag, "use-libcxx", true, "Use Custom libc++") flag.BoolVar(&enableLtoFlag, "enable-lto", true, "Enable lto") flag.BoolVar(&useMoldFlag, "use-mold", false, "Use Mold Linker") - flag.BoolVar(&useTcmallocFlag, "use-tcmalloc", true, "Use tcmalloc if possible") flag.BoolVar(&clangTidyModeFlag, "clang-tidy-mode", getEnvBool("ENABLE_CLANG_TIDY", false), "Enable Clang Tidy Build") flag.StringVar(&clangTidyExecutablePathFlag, "clang-tidy-executable-path", getEnv("CLANG_TIDY_EXECUTABLE", ""), "Path to clang-tidy, only used by Clang Tidy Build") @@ -878,11 +876,6 @@ func buildStageGenerateBuildScript() { if msvcTargetArchFlag == "arm" || msvcTargetArchFlag == "arm64" { cmakeArgs = append(cmakeArgs, fmt.Sprintf("-DCMAKE_ASM_FLAGS=--target=%s", targetTriple)) } - if useTcmallocFlag { - cmakeArgs = append(cmakeArgs, "-DUSE_TCMALLOC=on") - } else { - cmakeArgs = append(cmakeArgs, "-DUSE_TCMALLOC=off") - } } if systemNameFlag == "darwin" { @@ -952,11 +945,6 @@ func buildStageGenerateBuildScript() { if mingwDir != clangPath { getAndFixMinGWLibunwind(mingwDir) } - if useTcmallocFlag { - cmakeArgs = append(cmakeArgs, "-DUSE_TCMALLOC=on") - } else { - cmakeArgs = append(cmakeArgs, "-DUSE_TCMALLOC=off") - } } if systemNameFlag == "ios" { @@ -1061,14 +1049,16 @@ func buildStageGenerateBuildScript() { cmakeArgs = append(cmakeArgs, fmt.Sprintf("-DARM_CPU=%s", armCpuFlag)) } if subsystem == "" { - if useTcmallocFlag { - cmakeArgs = append(cmakeArgs, "-DUSE_TCMALLOC=on") - } else { - cmakeArgs = append(cmakeArgs, "-DUSE_TCMALLOC=off") - } - // for compatibility, we build only gtk3 package for now - cmakeArgs = append(cmakeArgs, fmt.Sprintf("-DUSE_GTK4=off")) + cmakeArgs = append(cmakeArgs, "-DUSE_TCMALLOC=on") + } else if subsystem == "musl" { + cmakeArgs = append(cmakeArgs, "-DUSE_TCMALLOC=off") + cmakeArgs = append(cmakeArgs, "-DUSE_MIMALLOC=on") + } else { + cmakeArgs = append(cmakeArgs, "-DUSE_TCMALLOC=off") + cmakeArgs = append(cmakeArgs, "-DUSE_MIMALLOC=off") } + // for compatibility, we build only gtk3 package for now + cmakeArgs = append(cmakeArgs, fmt.Sprintf("-DUSE_GTK4=off")) cmakeArgs = append(cmakeArgs, fmt.Sprintf("-DENABLE_FORTIFY=on")) } @@ -1104,11 +1094,9 @@ func buildStageGenerateBuildScript() { cmakeArgs = append(cmakeArgs, fmt.Sprintf("-DGCC_SYSTEM_PROCESSOR=%s", llvmArch)) cmakeArgs = append(cmakeArgs, fmt.Sprintf("-DGCC_TARGET=%s", llvmTarget)) cmakeArgs = append(cmakeArgs, fmt.Sprintf("-DENABLE_FORTIFY=on")) - if useTcmallocFlag { - cmakeArgs = append(cmakeArgs, "-DUSE_TCMALLOC=on") - } else { - cmakeArgs = append(cmakeArgs, "-DUSE_TCMALLOC=off") - } + // FIXME not enabled due to linkage issue + cmakeArgs = append(cmakeArgs, "-DUSE_TCMALLOC=off") + cmakeArgs = append(cmakeArgs, "-DUSE_MIMALLOC=off") // for compatibility, we build only gtk3 package for now cmakeArgs = append(cmakeArgs, fmt.Sprintf("-DUSE_GTK4=off")) } diff --git a/youtube-dl/test/test_InfoExtractor.py b/youtube-dl/test/test_InfoExtractor.py index d55d6ad542..09100a1d66 100644 --- a/youtube-dl/test/test_InfoExtractor.py +++ b/youtube-dl/test/test_InfoExtractor.py @@ -153,6 +153,9 @@ class TestInfoExtractor(unittest.TestCase): ''' search = self.ie._search_nextjs_data(html, 'testID') self.assertEqual(search['props']['pageProps']['video']['id'], 'testid') + search = self.ie._search_nextjs_data( + 'no next.js data here, move along', 'testID', default={'status': 0}) + self.assertEqual(search['status'], 0) def test_search_nuxt_data(self): html = ''' diff --git a/youtube-dl/test/test_traversal.py b/youtube-dl/test/test_traversal.py new file mode 100644 index 0000000000..00a428edb7 --- /dev/null +++ b/youtube-dl/test/test_traversal.py @@ -0,0 +1,509 @@ +#!/usr/bin/env python +# coding: utf-8 +from __future__ import unicode_literals + +# Allow direct execution +import os +import sys +import unittest +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + + +import re + +from youtube_dl.traversal import ( + dict_get, + get_first, + T, + traverse_obj, +) +from youtube_dl.compat import ( + compat_etree_fromstring, + compat_http_cookies, + compat_str, +) +from youtube_dl.utils import ( + int_or_none, + str_or_none, +) + +_TEST_DATA = { + 100: 100, + 1.2: 1.2, + 'str': 'str', + 'None': None, + '...': Ellipsis, + 'urls': [ + {'index': 0, 'url': 'https://www.example.com/0'}, + {'index': 1, 'url': 'https://www.example.com/1'}, + ], + 'data': ( + {'index': 2}, + {'index': 3}, + ), + 'dict': {}, +} + + +if sys.version_info < (3, 0): + class _TestCase(unittest.TestCase): + + def assertCountEqual(self, *args, **kwargs): + return self.assertItemsEqual(*args, **kwargs) +else: + _TestCase = unittest.TestCase + + +class TestTraversal(_TestCase): + def assertMaybeCountEqual(self, *args, **kwargs): + if sys.version_info < (3, 7): + # random dict order + return self.assertCountEqual(*args, **kwargs) + else: + return self.assertEqual(*args, **kwargs) + + def test_traverse_obj(self): + # instant compat + str = compat_str + + # define a pukka Iterable + def iter_range(stop): + for from_ in range(stop): + yield from_ + + # Test base functionality + self.assertEqual(traverse_obj(_TEST_DATA, ('str',)), 'str', + msg='allow tuple path') + self.assertEqual(traverse_obj(_TEST_DATA, ['str']), 'str', + msg='allow list path') + self.assertEqual(traverse_obj(_TEST_DATA, (value for value in ("str",))), 'str', + msg='allow iterable path') + self.assertEqual(traverse_obj(_TEST_DATA, 'str'), 'str', + msg='single items should be treated as a path') + self.assertEqual(traverse_obj(_TEST_DATA, None), _TEST_DATA) + self.assertEqual(traverse_obj(_TEST_DATA, 100), 100) + self.assertEqual(traverse_obj(_TEST_DATA, 1.2), 1.2) + + # Test Ellipsis behavior + self.assertCountEqual(traverse_obj(_TEST_DATA, Ellipsis), + (item for item in _TEST_DATA.values() if item not in (None, {})), + msg='`...` should give all non-discarded values') + self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', 0, Ellipsis)), _TEST_DATA['urls'][0].values(), + msg='`...` selection for dicts should select all values') + self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'url')), + ['https://www.example.com/0', 'https://www.example.com/1'], + msg='nested `...` queries should work') + self.assertCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'index')), iter_range(4), + msg='`...` query result should be flattened') + self.assertEqual(traverse_obj(iter(range(4)), Ellipsis), list(range(4)), + msg='`...` should accept iterables') + + # Test function as key + self.assertEqual(traverse_obj(_TEST_DATA, lambda x, y: x == 'urls' and isinstance(y, list)), + [_TEST_DATA['urls']], + msg='function as query key should perform a filter based on (key, value)') + self.assertCountEqual(traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)), set(('str',)), + msg='exceptions in the query function should be caught') + self.assertEqual(traverse_obj(iter(range(4)), lambda _, x: x % 2 == 0), [0, 2], + msg='function key should accept iterables') + if __debug__: + with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'): + traverse_obj(_TEST_DATA, lambda a: Ellipsis) + with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'): + traverse_obj(_TEST_DATA, lambda a, b, c: Ellipsis) + + # Test set as key (transformation/type, like `expected_type`) + self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str.upper), )), ['STR'], + msg='Function in set should be a transformation') + self.assertEqual(traverse_obj(_TEST_DATA, ('fail', T(lambda _: 'const'))), 'const', + msg='Function in set should always be called') + self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str))), ['str'], + msg='Type in set should be a type filter') + self.assertMaybeCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str, int))), [100, 'str'], + msg='Multiple types in set should be a type filter') + self.assertEqual(traverse_obj(_TEST_DATA, T(dict)), _TEST_DATA, + msg='A single set should be wrapped into a path') + self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str.upper))), ['STR'], + msg='Transformation function should not raise') + self.assertMaybeCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str_or_none))), + [item for item in map(str_or_none, _TEST_DATA.values()) if item is not None], + msg='Function in set should be a transformation') + if __debug__: + with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'): + traverse_obj(_TEST_DATA, set()) + with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'): + traverse_obj(_TEST_DATA, set((str.upper, str))) + + # Test `slice` as a key + _SLICE_DATA = [0, 1, 2, 3, 4] + self.assertEqual(traverse_obj(_TEST_DATA, ('dict', slice(1))), None, + msg='slice on a dictionary should not throw') + self.assertEqual(traverse_obj(_SLICE_DATA, slice(1)), _SLICE_DATA[:1], + msg='slice key should apply slice to sequence') + self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 2)), _SLICE_DATA[1:2], + msg='slice key should apply slice to sequence') + self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 4, 2)), _SLICE_DATA[1:4:2], + msg='slice key should apply slice to sequence') + + # Test alternative paths + self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'str'), 'str', + msg='multiple `paths` should be treated as alternative paths') + self.assertEqual(traverse_obj(_TEST_DATA, 'str', 100), 'str', + msg='alternatives should exit early') + self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'fail'), None, + msg='alternatives should return `default` if exhausted') + self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, 'fail'), 100), 100, + msg='alternatives should track their own branching return') + self.assertEqual(traverse_obj(_TEST_DATA, ('dict', Ellipsis), ('data', Ellipsis)), list(_TEST_DATA['data']), + msg='alternatives on empty objects should search further') + + # Test branch and path nesting + self.assertEqual(traverse_obj(_TEST_DATA, ('urls', (3, 0), 'url')), ['https://www.example.com/0'], + msg='tuple as key should be treated as branches') + self.assertEqual(traverse_obj(_TEST_DATA, ('urls', [3, 0], 'url')), ['https://www.example.com/0'], + msg='list as key should be treated as branches') + self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ((1, 'fail'), (0, 'url')))), ['https://www.example.com/0'], + msg='double nesting in path should be treated as paths') + self.assertEqual(traverse_obj(['0', [1, 2]], [(0, 1), 0]), [1], + msg='do not fail early on branching') + self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', ((1, ('fail', 'url')), (0, 'url')))), + ['https://www.example.com/0', 'https://www.example.com/1'], + msg='triple nesting in path should be treated as branches') + self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ('fail', (Ellipsis, 'url')))), + ['https://www.example.com/0', 'https://www.example.com/1'], + msg='ellipsis as branch path start gets flattened') + + # Test dictionary as key + self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}), {0: 100, 1: 1.2}, + msg='dict key should result in a dict with the same keys') + self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', 0, 'url')}), + {0: 'https://www.example.com/0'}, + msg='dict key should allow paths') + self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', (3, 0), 'url')}), + {0: ['https://www.example.com/0']}, + msg='tuple in dict path should be treated as branches') + self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, 'fail'), (0, 'url')))}), + {0: ['https://www.example.com/0']}, + msg='double nesting in dict path should be treated as paths') + self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, ('fail', 'url')), (0, 'url')))}), + {0: ['https://www.example.com/1', 'https://www.example.com/0']}, + msg='triple nesting in dict path should be treated as branches') + self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}), {}, + msg='remove `None` values when top level dict key fails') + self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}, default=Ellipsis), {0: Ellipsis}, + msg='use `default` if key fails and `default`') + self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}), {}, + msg='remove empty values when dict key') + self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}, default=Ellipsis), {0: Ellipsis}, + msg='use `default` when dict key and a default') + self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}), {}, + msg='remove empty values when nested dict key fails') + self.assertEqual(traverse_obj(None, {0: 'fail'}), {}, + msg='default to dict if pruned') + self.assertEqual(traverse_obj(None, {0: 'fail'}, default=Ellipsis), {0: Ellipsis}, + msg='default to dict if pruned and default is given') + self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}, default=Ellipsis), {0: {0: Ellipsis}}, + msg='use nested `default` when nested dict key fails and `default`') + self.assertEqual(traverse_obj(_TEST_DATA, {0: ('dict', Ellipsis)}), {}, + msg='remove key if branch in dict key not successful') + + # Testing default parameter behavior + _DEFAULT_DATA = {'None': None, 'int': 0, 'list': []} + self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail'), None, + msg='default value should be `None`') + self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', 'fail', default=Ellipsis), Ellipsis, + msg='chained fails should result in default') + self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', 'int'), 0, + msg='should not short cirquit on `None`') + self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', default=1), 1, + msg='invalid dict key should result in `default`') + self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', default=1), 1, + msg='`None` is a deliberate sentinel and should become `default`') + self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', 10)), None, + msg='`IndexError` should result in `default`') + self.assertEqual(traverse_obj(_DEFAULT_DATA, (Ellipsis, 'fail'), default=1), 1, + msg='if branched but not successful return `default` if defined, not `[]`') + self.assertEqual(traverse_obj(_DEFAULT_DATA, (Ellipsis, 'fail'), default=None), None, + msg='if branched but not successful return `default` even if `default` is `None`') + self.assertEqual(traverse_obj(_DEFAULT_DATA, (Ellipsis, 'fail')), [], + msg='if branched but not successful return `[]`, not `default`') + self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', Ellipsis)), [], + msg='if branched but object is empty return `[]`, not `default`') + self.assertEqual(traverse_obj(None, Ellipsis), [], + msg='if branched but object is `None` return `[]`, not `default`') + self.assertEqual(traverse_obj({0: None}, (0, Ellipsis)), [], + msg='if branched but state is `None` return `[]`, not `default`') + + branching_paths = [ + ('fail', Ellipsis), + (Ellipsis, 'fail'), + 100 * ('fail',) + (Ellipsis,), + (Ellipsis,) + 100 * ('fail',), + ] + for branching_path in branching_paths: + self.assertEqual(traverse_obj({}, branching_path), [], + msg='if branched but state is `None`, return `[]` (not `default`)') + self.assertEqual(traverse_obj({}, 'fail', branching_path), [], + msg='if branching in last alternative and previous did not match, return `[]` (not `default`)') + self.assertEqual(traverse_obj({0: 'x'}, 0, branching_path), 'x', + msg='if branching in last alternative and previous did match, return single value') + self.assertEqual(traverse_obj({0: 'x'}, branching_path, 0), 'x', + msg='if branching in first alternative and non-branching path does match, return single value') + self.assertEqual(traverse_obj({}, branching_path, 'fail'), None, + msg='if branching in first alternative and non-branching path does not match, return `default`') + + # Testing expected_type behavior + _EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0} + self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str), + 'str', msg='accept matching `expected_type` type') + self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int), + None, msg='reject non-matching `expected_type` type') + self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)), + '0', msg='transform type using type function') + self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=lambda _: 1 / 0), + None, msg='wrap expected_type function in try_call') + self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, Ellipsis, expected_type=str), + ['str'], msg='eliminate items that expected_type fails on') + self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int), + {0: 100}, msg='type as expected_type should filter dict values') + self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none), + {0: '100', 1: '1.2'}, msg='function as expected_type should transform dict values') + self.assertEqual(traverse_obj(_TEST_DATA, ({0: 1.2}, 0, set((int_or_none,))), expected_type=int), + 1, msg='expected_type should not filter non-final dict values') + self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int), + {0: {0: 100}}, msg='expected_type should transform deep dict values') + self.assertEqual(traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(Ellipsis)), + [{0: Ellipsis}, {0: Ellipsis}], msg='expected_type should transform branched dict values') + self.assertEqual(traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int), + [4], msg='expected_type regression for type matching in tuple branching') + self.assertEqual(traverse_obj(_TEST_DATA, ['data', Ellipsis], expected_type=int), + [], msg='expected_type regression for type matching in dict result') + + # Test get_all behavior + _GET_ALL_DATA = {'key': [0, 1, 2]} + self.assertEqual(traverse_obj(_GET_ALL_DATA, ('key', Ellipsis), get_all=False), 0, + msg='if not `get_all`, return only first matching value') + self.assertEqual(traverse_obj(_GET_ALL_DATA, Ellipsis, get_all=False), [0, 1, 2], + msg='do not overflatten if not `get_all`') + + # Test casesense behavior + _CASESENSE_DATA = { + 'KeY': 'value0', + 0: { + 'KeY': 'value1', + 0: {'KeY': 'value2'}, + }, + # FULLWIDTH LATIN CAPITAL LETTER K + '\uff2bey': 'value3', + } + self.assertEqual(traverse_obj(_CASESENSE_DATA, 'key'), None, + msg='dict keys should be case sensitive unless `casesense`') + self.assertEqual(traverse_obj(_CASESENSE_DATA, 'keY', + casesense=False), 'value0', + msg='allow non matching key case if `casesense`') + self.assertEqual(traverse_obj(_CASESENSE_DATA, '\uff4bey', # FULLWIDTH LATIN SMALL LETTER K + casesense=False), 'value3', + msg='allow non matching Unicode key case if `casesense`') + self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ('keY',)), + casesense=False), ['value1'], + msg='allow non matching key case in branch if `casesense`') + self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ((0, 'keY'),)), + casesense=False), ['value2'], + msg='allow non matching key case in branch path if `casesense`') + + # Test traverse_string behavior + _TRAVERSE_STRING_DATA = {'str': 'str', 1.2: 1.2} + self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0)), None, + msg='do not traverse into string if not `traverse_string`') + self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0), + _traverse_string=True), 's', + msg='traverse into string if `traverse_string`') + self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, (1.2, 1), + _traverse_string=True), '.', + msg='traverse into converted data if `traverse_string`') + self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', Ellipsis), + _traverse_string=True), 'str', + msg='`...` should result in string (same value) if `traverse_string`') + self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)), + _traverse_string=True), 'sr', + msg='`slice` should result in string if `traverse_string`') + self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == 's'), + _traverse_string=True), 'str', + msg='function should result in string if `traverse_string`') + self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), + _traverse_string=True), ['s', 'r'], + msg='branching should result in list if `traverse_string`') + self.assertEqual(traverse_obj({}, (0, Ellipsis), _traverse_string=True), [], + msg='branching should result in list if `traverse_string`') + self.assertEqual(traverse_obj({}, (0, lambda x, y: True), _traverse_string=True), [], + msg='branching should result in list if `traverse_string`') + self.assertEqual(traverse_obj({}, (0, slice(1)), _traverse_string=True), [], + msg='branching should result in list if `traverse_string`') + + # Test re.Match as input obj + mobj = re.match(r'^0(12)(?P3)(4)?$', '0123') + self.assertEqual(traverse_obj(mobj, Ellipsis), [x for x in mobj.groups() if x is not None], + msg='`...` on a `re.Match` should give its `groups()`') + self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 2)), ['0123', '3'], + msg='function on a `re.Match` should give groupno, value starting at 0') + self.assertEqual(traverse_obj(mobj, 'group'), '3', + msg='str key on a `re.Match` should give group with that name') + self.assertEqual(traverse_obj(mobj, 2), '3', + msg='int key on a `re.Match` should give group with that name') + self.assertEqual(traverse_obj(mobj, 'gRoUp', casesense=False), '3', + msg='str key on a `re.Match` should respect casesense') + self.assertEqual(traverse_obj(mobj, 'fail'), None, + msg='failing str key on a `re.Match` should return `default`') + self.assertEqual(traverse_obj(mobj, 'gRoUpS', casesense=False), None, + msg='failing str key on a `re.Match` should return `default`') + self.assertEqual(traverse_obj(mobj, 8), None, + msg='failing int key on a `re.Match` should return `default`') + self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 'group')), ['0123', '3'], + msg='function on a `re.Match` should give group name as well') + + # Test xml.etree.ElementTree.Element as input obj + etree = compat_etree_fromstring(''' + + + 1 + 2008 + 141100 + + + + + 4 + 2011 + 59900 + + + + 68 + 2011 + 13600 + + + + ''') + self.assertEqual(traverse_obj(etree, ''), etree, + msg='empty str key should return the element itself') + self.assertEqual(traverse_obj(etree, 'country'), list(etree), + msg='str key should return all children with that tag name') + self.assertEqual(traverse_obj(etree, Ellipsis), list(etree), + msg='`...` as key should return all children') + self.assertEqual(traverse_obj(etree, lambda _, x: x[0].text == '4'), [etree[1]], + msg='function as key should get element as value') + self.assertEqual(traverse_obj(etree, lambda i, _: i == 1), [etree[1]], + msg='function as key should get index as key') + self.assertEqual(traverse_obj(etree, 0), etree[0], + msg='int key should return the nth child') + self.assertEqual(traverse_obj(etree, './/neighbor/@name'), + ['Austria', 'Switzerland', 'Malaysia', 'Costa Rica', 'Colombia'], + msg='`@` at end of path should give that attribute') + self.assertEqual(traverse_obj(etree, '//neighbor/@fail'), [None, None, None, None, None], + msg='`@` at end of path should give `None`') + self.assertEqual(traverse_obj(etree, ('//neighbor/@', 2)), {'name': 'Malaysia', 'direction': 'N'}, + msg='`@` should give the full attribute dict') + self.assertEqual(traverse_obj(etree, '//year/text()'), ['2008', '2011', '2011'], + msg='`text()` at end of path should give the inner text') + self.assertEqual(traverse_obj(etree, '//*[@direction]/@direction'), ['E', 'W', 'N', 'W', 'E'], + msg='full python xpath features should be supported') + self.assertEqual(traverse_obj(etree, (0, '@name')), 'Liechtenstein', + msg='special transformations should act on current element') + self.assertEqual(traverse_obj(etree, ('country', 0, Ellipsis, 'text()', T(int_or_none))), [1, 2008, 141100], + msg='special transformations should act on current element') + + def test_traversal_unbranching(self): + self.assertEqual(traverse_obj(_TEST_DATA, [(100, 1.2), all]), [100, 1.2], + msg='`all` should give all results as list') + self.assertEqual(traverse_obj(_TEST_DATA, [(100, 1.2), any]), 100, + msg='`any` should give the first result') + self.assertEqual(traverse_obj(_TEST_DATA, [100, all]), [100], + msg='`all` should give list if non branching') + self.assertEqual(traverse_obj(_TEST_DATA, [100, any]), 100, + msg='`any` should give single item if non branching') + self.assertEqual(traverse_obj(_TEST_DATA, [('dict', 'None', 100), all]), [100], + msg='`all` should filter `None` and empty dict') + self.assertEqual(traverse_obj(_TEST_DATA, [('dict', 'None', 100), any]), 100, + msg='`any` should filter `None` and empty dict') + self.assertEqual(traverse_obj(_TEST_DATA, [{ + 'all': [('dict', 'None', 100, 1.2), all], + 'any': [('dict', 'None', 100, 1.2), any], + }]), {'all': [100, 1.2], 'any': 100}, + msg='`all`/`any` should apply to each dict path separately') + self.assertEqual(traverse_obj(_TEST_DATA, [{ + 'all': [('dict', 'None', 100, 1.2), all], + 'any': [('dict', 'None', 100, 1.2), any], + }], get_all=False), {'all': [100, 1.2], 'any': 100}, + msg='`all`/`any` should apply to dict regardless of `get_all`') + self.assertIs(traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, T(float)]), None, + msg='`all` should reset branching status') + self.assertIs(traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), any, T(float)]), None, + msg='`any` should reset branching status') + self.assertEqual(traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, Ellipsis, T(float)]), [1.2], + msg='`all` should allow further branching') + self.assertEqual(traverse_obj(_TEST_DATA, [('dict', 'None', 'urls', 'data'), any, Ellipsis, 'index']), [0, 1], + msg='`any` should allow further branching') + + def test_traversal_morsel(self): + values = { + 'expires': 'a', + 'path': 'b', + 'comment': 'c', + 'domain': 'd', + 'max-age': 'e', + 'secure': 'f', + 'httponly': 'g', + 'version': 'h', + 'samesite': 'i', + } + # SameSite added in Py3.8, breaks .update for 3.5-3.7 + if sys.version_info < (3, 8): + del values['samesite'] + morsel = compat_http_cookies.Morsel() + morsel.set(str('item_key'), 'item_value', 'coded_value') + morsel.update(values) + values['key'] = str('item_key') + values['value'] = 'item_value' + values = dict((str(k), v) for k, v in values.items()) + # make test pass even without ordered dict + value_set = set(values.values()) + + for key, value in values.items(): + self.assertEqual(traverse_obj(morsel, key), value, + msg='Morsel should provide access to all values') + self.assertEqual(set(traverse_obj(morsel, Ellipsis)), value_set, + msg='`...` should yield all values') + self.assertEqual(set(traverse_obj(morsel, lambda k, v: True)), value_set, + msg='function key should yield all values') + self.assertIs(traverse_obj(morsel, [(None,), any]), morsel, + msg='Morsel should not be implicitly changed to dict on usage') + + def test_get_first(self): + self.assertEqual(get_first([{'a': None}, {'a': 'spam'}], 'a'), 'spam') + + def test_dict_get(self): + FALSE_VALUES = { + 'none': None, + 'false': False, + 'zero': 0, + 'empty_string': '', + 'empty_list': [], + } + d = FALSE_VALUES.copy() + d['a'] = 42 + self.assertEqual(dict_get(d, 'a'), 42) + self.assertEqual(dict_get(d, 'b'), None) + self.assertEqual(dict_get(d, 'b', 42), 42) + self.assertEqual(dict_get(d, ('a', )), 42) + self.assertEqual(dict_get(d, ('b', 'a', )), 42) + self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42) + self.assertEqual(dict_get(d, ('b', 'c', )), None) + self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42) + for key, false_value in FALSE_VALUES.items(): + self.assertEqual(dict_get(d, ('b', 'c', key, )), None) + self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value) + + +if __name__ == '__main__': + unittest.main() diff --git a/youtube-dl/test/test_utils.py b/youtube-dl/test/test_utils.py index ca36909a8c..de7fe80b8b 100644 --- a/youtube-dl/test/test_utils.py +++ b/youtube-dl/test/test_utils.py @@ -14,7 +14,6 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import io import itertools import json -import re import xml.etree.ElementTree from youtube_dl.utils import ( @@ -28,7 +27,6 @@ from youtube_dl.utils import ( DateRange, detect_exe_version, determine_ext, - dict_get, encode_base_n, encode_compat_str, encodeFilename, @@ -44,7 +42,6 @@ from youtube_dl.utils import ( get_element_by_attribute, get_elements_by_class, get_elements_by_attribute, - get_first, InAdvancePagedList, int_or_none, intlist_to_bytes, @@ -84,14 +81,11 @@ from youtube_dl.utils import ( sanitized_Request, shell_quote, smuggle_url, - str_or_none, str_to_int, strip_jsonp, strip_or_none, subtitles_filename, - T, timeconvert, - traverse_obj, try_call, unescapeHTML, unified_strdate, @@ -132,10 +126,6 @@ from youtube_dl.compat import ( class TestUtil(unittest.TestCase): - # yt-dlp shim - def assertCountEqual(self, expected, got, msg='count should be the same'): - return self.assertEqual(len(tuple(expected)), len(tuple(got)), msg=msg) - def test_timeconvert(self): self.assertTrue(timeconvert('') is None) self.assertTrue(timeconvert('bougrg') is None) @@ -740,28 +730,6 @@ class TestUtil(unittest.TestCase): self.assertRaises( ValueError, multipart_encode, {b'field': b'value'}, boundary='value') - def test_dict_get(self): - FALSE_VALUES = { - 'none': None, - 'false': False, - 'zero': 0, - 'empty_string': '', - 'empty_list': [], - } - d = FALSE_VALUES.copy() - d['a'] = 42 - self.assertEqual(dict_get(d, 'a'), 42) - self.assertEqual(dict_get(d, 'b'), None) - self.assertEqual(dict_get(d, 'b', 42), 42) - self.assertEqual(dict_get(d, ('a', )), 42) - self.assertEqual(dict_get(d, ('b', 'a', )), 42) - self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42) - self.assertEqual(dict_get(d, ('b', 'c', )), None) - self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42) - for key, false_value in FALSE_VALUES.items(): - self.assertEqual(dict_get(d, ('b', 'c', key, )), None) - self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value) - def test_merge_dicts(self): self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2}) self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1}) @@ -1703,336 +1671,6 @@ Line 1 self.assertEqual(variadic('spam', allowed_types=dict), 'spam') self.assertEqual(variadic('spam', allowed_types=[dict]), 'spam') - def test_traverse_obj(self): - str = compat_str - _TEST_DATA = { - 100: 100, - 1.2: 1.2, - 'str': 'str', - 'None': None, - '...': Ellipsis, - 'urls': [ - {'index': 0, 'url': 'https://www.example.com/0'}, - {'index': 1, 'url': 'https://www.example.com/1'}, - ], - 'data': ( - {'index': 2}, - {'index': 3}, - ), - 'dict': {}, - } - - # define a pukka Iterable - def iter_range(stop): - for from_ in range(stop): - yield from_ - - # Test base functionality - self.assertEqual(traverse_obj(_TEST_DATA, ('str',)), 'str', - msg='allow tuple path') - self.assertEqual(traverse_obj(_TEST_DATA, ['str']), 'str', - msg='allow list path') - self.assertEqual(traverse_obj(_TEST_DATA, (value for value in ("str",))), 'str', - msg='allow iterable path') - self.assertEqual(traverse_obj(_TEST_DATA, 'str'), 'str', - msg='single items should be treated as a path') - self.assertEqual(traverse_obj(_TEST_DATA, None), _TEST_DATA) - self.assertEqual(traverse_obj(_TEST_DATA, 100), 100) - self.assertEqual(traverse_obj(_TEST_DATA, 1.2), 1.2) - - # Test Ellipsis behavior - self.assertCountEqual(traverse_obj(_TEST_DATA, Ellipsis), - (item for item in _TEST_DATA.values() if item not in (None, {})), - msg='`...` should give all non-discarded values') - self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', 0, Ellipsis)), _TEST_DATA['urls'][0].values(), - msg='`...` selection for dicts should select all values') - self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'url')), - ['https://www.example.com/0', 'https://www.example.com/1'], - msg='nested `...` queries should work') - self.assertCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'index')), iter_range(4), - msg='`...` query result should be flattened') - self.assertEqual(traverse_obj(iter(range(4)), Ellipsis), list(range(4)), - msg='`...` should accept iterables') - - # Test function as key - self.assertEqual(traverse_obj(_TEST_DATA, lambda x, y: x == 'urls' and isinstance(y, list)), - [_TEST_DATA['urls']], - msg='function as query key should perform a filter based on (key, value)') - self.assertCountEqual(traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)), set(('str',)), - msg='exceptions in the query function should be caught') - self.assertEqual(traverse_obj(iter(range(4)), lambda _, x: x % 2 == 0), [0, 2], - msg='function key should accept iterables') - if __debug__: - with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'): - traverse_obj(_TEST_DATA, lambda a: Ellipsis) - with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'): - traverse_obj(_TEST_DATA, lambda a, b, c: Ellipsis) - - # Test set as key (transformation/type, like `expected_type`) - self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str.upper), )), ['STR'], - msg='Function in set should be a transformation') - self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str))), ['str'], - msg='Type in set should be a type filter') - self.assertEqual(traverse_obj(_TEST_DATA, T(dict)), _TEST_DATA, - msg='A single set should be wrapped into a path') - self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str.upper))), ['STR'], - msg='Transformation function should not raise') - self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str_or_none))), - [item for item in map(str_or_none, _TEST_DATA.values()) if item is not None], - msg='Function in set should be a transformation') - if __debug__: - with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'): - traverse_obj(_TEST_DATA, set()) - with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'): - traverse_obj(_TEST_DATA, set((str.upper, str))) - - # Test `slice` as a key - _SLICE_DATA = [0, 1, 2, 3, 4] - self.assertEqual(traverse_obj(_TEST_DATA, ('dict', slice(1))), None, - msg='slice on a dictionary should not throw') - self.assertEqual(traverse_obj(_SLICE_DATA, slice(1)), _SLICE_DATA[:1], - msg='slice key should apply slice to sequence') - self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 2)), _SLICE_DATA[1:2], - msg='slice key should apply slice to sequence') - self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 4, 2)), _SLICE_DATA[1:4:2], - msg='slice key should apply slice to sequence') - - # Test alternative paths - self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'str'), 'str', - msg='multiple `paths` should be treated as alternative paths') - self.assertEqual(traverse_obj(_TEST_DATA, 'str', 100), 'str', - msg='alternatives should exit early') - self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'fail'), None, - msg='alternatives should return `default` if exhausted') - self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, 'fail'), 100), 100, - msg='alternatives should track their own branching return') - self.assertEqual(traverse_obj(_TEST_DATA, ('dict', Ellipsis), ('data', Ellipsis)), list(_TEST_DATA['data']), - msg='alternatives on empty objects should search further') - - # Test branch and path nesting - self.assertEqual(traverse_obj(_TEST_DATA, ('urls', (3, 0), 'url')), ['https://www.example.com/0'], - msg='tuple as key should be treated as branches') - self.assertEqual(traverse_obj(_TEST_DATA, ('urls', [3, 0], 'url')), ['https://www.example.com/0'], - msg='list as key should be treated as branches') - self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ((1, 'fail'), (0, 'url')))), ['https://www.example.com/0'], - msg='double nesting in path should be treated as paths') - self.assertEqual(traverse_obj(['0', [1, 2]], [(0, 1), 0]), [1], - msg='do not fail early on branching') - self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', ((1, ('fail', 'url')), (0, 'url')))), - ['https://www.example.com/0', 'https://www.example.com/1'], - msg='triple nesting in path should be treated as branches') - self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ('fail', (Ellipsis, 'url')))), - ['https://www.example.com/0', 'https://www.example.com/1'], - msg='ellipsis as branch path start gets flattened') - - # Test dictionary as key - self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}), {0: 100, 1: 1.2}, - msg='dict key should result in a dict with the same keys') - self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', 0, 'url')}), - {0: 'https://www.example.com/0'}, - msg='dict key should allow paths') - self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', (3, 0), 'url')}), - {0: ['https://www.example.com/0']}, - msg='tuple in dict path should be treated as branches') - self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, 'fail'), (0, 'url')))}), - {0: ['https://www.example.com/0']}, - msg='double nesting in dict path should be treated as paths') - self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, ('fail', 'url')), (0, 'url')))}), - {0: ['https://www.example.com/1', 'https://www.example.com/0']}, - msg='triple nesting in dict path should be treated as branches') - self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}), {}, - msg='remove `None` values when top level dict key fails') - self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}, default=Ellipsis), {0: Ellipsis}, - msg='use `default` if key fails and `default`') - self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}), {}, - msg='remove empty values when dict key') - self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}, default=Ellipsis), {0: Ellipsis}, - msg='use `default` when dict key and a default') - self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}), {}, - msg='remove empty values when nested dict key fails') - self.assertEqual(traverse_obj(None, {0: 'fail'}), {}, - msg='default to dict if pruned') - self.assertEqual(traverse_obj(None, {0: 'fail'}, default=Ellipsis), {0: Ellipsis}, - msg='default to dict if pruned and default is given') - self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}, default=Ellipsis), {0: {0: Ellipsis}}, - msg='use nested `default` when nested dict key fails and `default`') - self.assertEqual(traverse_obj(_TEST_DATA, {0: ('dict', Ellipsis)}), {}, - msg='remove key if branch in dict key not successful') - - # Testing default parameter behavior - _DEFAULT_DATA = {'None': None, 'int': 0, 'list': []} - self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail'), None, - msg='default value should be `None`') - self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', 'fail', default=Ellipsis), Ellipsis, - msg='chained fails should result in default') - self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', 'int'), 0, - msg='should not short cirquit on `None`') - self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', default=1), 1, - msg='invalid dict key should result in `default`') - self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', default=1), 1, - msg='`None` is a deliberate sentinel and should become `default`') - self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', 10)), None, - msg='`IndexError` should result in `default`') - self.assertEqual(traverse_obj(_DEFAULT_DATA, (Ellipsis, 'fail'), default=1), 1, - msg='if branched but not successful return `default` if defined, not `[]`') - self.assertEqual(traverse_obj(_DEFAULT_DATA, (Ellipsis, 'fail'), default=None), None, - msg='if branched but not successful return `default` even if `default` is `None`') - self.assertEqual(traverse_obj(_DEFAULT_DATA, (Ellipsis, 'fail')), [], - msg='if branched but not successful return `[]`, not `default`') - self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', Ellipsis)), [], - msg='if branched but object is empty return `[]`, not `default`') - self.assertEqual(traverse_obj(None, Ellipsis), [], - msg='if branched but object is `None` return `[]`, not `default`') - self.assertEqual(traverse_obj({0: None}, (0, Ellipsis)), [], - msg='if branched but state is `None` return `[]`, not `default`') - - branching_paths = [ - ('fail', Ellipsis), - (Ellipsis, 'fail'), - 100 * ('fail',) + (Ellipsis,), - (Ellipsis,) + 100 * ('fail',), - ] - for branching_path in branching_paths: - self.assertEqual(traverse_obj({}, branching_path), [], - msg='if branched but state is `None`, return `[]` (not `default`)') - self.assertEqual(traverse_obj({}, 'fail', branching_path), [], - msg='if branching in last alternative and previous did not match, return `[]` (not `default`)') - self.assertEqual(traverse_obj({0: 'x'}, 0, branching_path), 'x', - msg='if branching in last alternative and previous did match, return single value') - self.assertEqual(traverse_obj({0: 'x'}, branching_path, 0), 'x', - msg='if branching in first alternative and non-branching path does match, return single value') - self.assertEqual(traverse_obj({}, branching_path, 'fail'), None, - msg='if branching in first alternative and non-branching path does not match, return `default`') - - # Testing expected_type behavior - _EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0} - self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str), - 'str', msg='accept matching `expected_type` type') - self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int), - None, msg='reject non-matching `expected_type` type') - self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)), - '0', msg='transform type using type function') - self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=lambda _: 1 / 0), - None, msg='wrap expected_type function in try_call') - self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, Ellipsis, expected_type=str), - ['str'], msg='eliminate items that expected_type fails on') - self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int), - {0: 100}, msg='type as expected_type should filter dict values') - self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none), - {0: '100', 1: '1.2'}, msg='function as expected_type should transform dict values') - self.assertEqual(traverse_obj(_TEST_DATA, ({0: 1.2}, 0, set((int_or_none,))), expected_type=int), - 1, msg='expected_type should not filter non-final dict values') - self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int), - {0: {0: 100}}, msg='expected_type should transform deep dict values') - self.assertEqual(traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(Ellipsis)), - [{0: Ellipsis}, {0: Ellipsis}], msg='expected_type should transform branched dict values') - self.assertEqual(traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int), - [4], msg='expected_type regression for type matching in tuple branching') - self.assertEqual(traverse_obj(_TEST_DATA, ['data', Ellipsis], expected_type=int), - [], msg='expected_type regression for type matching in dict result') - - # Test get_all behavior - _GET_ALL_DATA = {'key': [0, 1, 2]} - self.assertEqual(traverse_obj(_GET_ALL_DATA, ('key', Ellipsis), get_all=False), 0, - msg='if not `get_all`, return only first matching value') - self.assertEqual(traverse_obj(_GET_ALL_DATA, Ellipsis, get_all=False), [0, 1, 2], - msg='do not overflatten if not `get_all`') - - # Test casesense behavior - _CASESENSE_DATA = { - 'KeY': 'value0', - 0: { - 'KeY': 'value1', - 0: {'KeY': 'value2'}, - }, - # FULLWIDTH LATIN CAPITAL LETTER K - '\uff2bey': 'value3', - } - self.assertEqual(traverse_obj(_CASESENSE_DATA, 'key'), None, - msg='dict keys should be case sensitive unless `casesense`') - self.assertEqual(traverse_obj(_CASESENSE_DATA, 'keY', - casesense=False), 'value0', - msg='allow non matching key case if `casesense`') - self.assertEqual(traverse_obj(_CASESENSE_DATA, '\uff4bey', # FULLWIDTH LATIN SMALL LETTER K - casesense=False), 'value3', - msg='allow non matching Unicode key case if `casesense`') - self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ('keY',)), - casesense=False), ['value1'], - msg='allow non matching key case in branch if `casesense`') - self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ((0, 'keY'),)), - casesense=False), ['value2'], - msg='allow non matching key case in branch path if `casesense`') - - # Test traverse_string behavior - _TRAVERSE_STRING_DATA = {'str': 'str', 1.2: 1.2} - self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0)), None, - msg='do not traverse into string if not `traverse_string`') - self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0), - _traverse_string=True), 's', - msg='traverse into string if `traverse_string`') - self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, (1.2, 1), - _traverse_string=True), '.', - msg='traverse into converted data if `traverse_string`') - self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', Ellipsis), - _traverse_string=True), 'str', - msg='`...` should result in string (same value) if `traverse_string`') - self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)), - _traverse_string=True), 'sr', - msg='`slice` should result in string if `traverse_string`') - self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == 's'), - _traverse_string=True), 'str', - msg='function should result in string if `traverse_string`') - self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), - _traverse_string=True), ['s', 'r'], - msg='branching should result in list if `traverse_string`') - self.assertEqual(traverse_obj({}, (0, Ellipsis), _traverse_string=True), [], - msg='branching should result in list if `traverse_string`') - self.assertEqual(traverse_obj({}, (0, lambda x, y: True), _traverse_string=True), [], - msg='branching should result in list if `traverse_string`') - self.assertEqual(traverse_obj({}, (0, slice(1)), _traverse_string=True), [], - msg='branching should result in list if `traverse_string`') - - # Test is_user_input behavior - _IS_USER_INPUT_DATA = {'range8': list(range(8))} - self.assertEqual(traverse_obj(_IS_USER_INPUT_DATA, ('range8', '3'), - _is_user_input=True), 3, - msg='allow for string indexing if `is_user_input`') - self.assertCountEqual(traverse_obj(_IS_USER_INPUT_DATA, ('range8', '3:'), - _is_user_input=True), tuple(range(8))[3:], - msg='allow for string slice if `is_user_input`') - self.assertCountEqual(traverse_obj(_IS_USER_INPUT_DATA, ('range8', ':4:2'), - _is_user_input=True), tuple(range(8))[:4:2], - msg='allow step in string slice if `is_user_input`') - self.assertCountEqual(traverse_obj(_IS_USER_INPUT_DATA, ('range8', ':'), - _is_user_input=True), range(8), - msg='`:` should be treated as `...` if `is_user_input`') - with self.assertRaises(TypeError, msg='too many params should result in error'): - traverse_obj(_IS_USER_INPUT_DATA, ('range8', ':::'), _is_user_input=True) - - # Test re.Match as input obj - mobj = re.match(r'^0(12)(?P3)(4)?$', '0123') - self.assertEqual(traverse_obj(mobj, Ellipsis), [x for x in mobj.groups() if x is not None], - msg='`...` on a `re.Match` should give its `groups()`') - self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 2)), ['0123', '3'], - msg='function on a `re.Match` should give groupno, value starting at 0') - self.assertEqual(traverse_obj(mobj, 'group'), '3', - msg='str key on a `re.Match` should give group with that name') - self.assertEqual(traverse_obj(mobj, 2), '3', - msg='int key on a `re.Match` should give group with that name') - self.assertEqual(traverse_obj(mobj, 'gRoUp', casesense=False), '3', - msg='str key on a `re.Match` should respect casesense') - self.assertEqual(traverse_obj(mobj, 'fail'), None, - msg='failing str key on a `re.Match` should return `default`') - self.assertEqual(traverse_obj(mobj, 'gRoUpS', casesense=False), None, - msg='failing str key on a `re.Match` should return `default`') - self.assertEqual(traverse_obj(mobj, 8), None, - msg='failing int key on a `re.Match` should return `default`') - self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 'group')), ['0123', '3'], - msg='function on a `re.Match` should give group name as well') - - def test_get_first(self): - self.assertEqual(get_first([{'a': None}, {'a': 'spam'}], 'a'), 'spam') - def test_join_nonempty(self): self.assertEqual(join_nonempty('a', 'b'), 'a-b') self.assertEqual(join_nonempty( diff --git a/youtube-dl/youtube_dl/compat.py b/youtube-dl/youtube_dl/compat.py index 53ff2a892a..ed1a33cf2f 100644 --- a/youtube-dl/youtube_dl/compat.py +++ b/youtube-dl/youtube_dl/compat.py @@ -2719,8 +2719,222 @@ if sys.version_info < (2, 7): if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') return xpath + + # further code below based on CPython 2.7 source + import functools + + _xpath_tokenizer_re = re.compile(r'''(?x) + ( # (1) + '[^']*'|"[^"]*"| # quoted strings, or + ::|//?|\.\.|\(\)|[/.*:[\]()@=] # navigation specials + )| # or (2) + ((?:\{[^}]+\})?[^/[\]()@=\s]+)| # token: optional {ns}, no specials + \s+ # or white space + ''') + + def _xpath_tokenizer(pattern, namespaces=None): + for token in _xpath_tokenizer_re.findall(pattern): + tag = token[1] + if tag and tag[0] != "{" and ":" in tag: + try: + if not namespaces: + raise KeyError + prefix, uri = tag.split(":", 1) + yield token[0], "{%s}%s" % (namespaces[prefix], uri) + except KeyError: + raise SyntaxError("prefix %r not found in prefix map" % prefix) + else: + yield token + + def _get_parent_map(context): + parent_map = context.parent_map + if parent_map is None: + context.parent_map = parent_map = {} + for p in context.root.getiterator(): + for e in p: + parent_map[e] = p + return parent_map + + def _select(context, result, filter_fn=lambda *_: True): + for elem in result: + for e in elem: + if filter_fn(e, elem): + yield e + + def _prepare_child(next_, token): + tag = token[1] + return functools.partial(_select, filter_fn=lambda e, _: e.tag == tag) + + def _prepare_star(next_, token): + return _select + + def _prepare_self(next_, token): + return lambda _, result: (e for e in result) + + def _prepare_descendant(next_, token): + token = next(next_) + if token[0] == "*": + tag = "*" + elif not token[0]: + tag = token[1] + else: + raise SyntaxError("invalid descendant") + + def select(context, result): + for elem in result: + for e in elem.getiterator(tag): + if e is not elem: + yield e + return select + + def _prepare_parent(next_, token): + def select(context, result): + # FIXME: raise error if .. is applied at toplevel? + parent_map = _get_parent_map(context) + result_map = {} + for elem in result: + if elem in parent_map: + parent = parent_map[elem] + if parent not in result_map: + result_map[parent] = None + yield parent + return select + + def _prepare_predicate(next_, token): + signature = [] + predicate = [] + for token in next_: + if token[0] == "]": + break + if token[0] and token[0][:1] in "'\"": + token = "'", token[0][1:-1] + signature.append(token[0] or "-") + predicate.append(token[1]) + + def select(context, result, filter_fn=lambda _: True): + for elem in result: + if filter_fn(elem): + yield elem + + signature = "".join(signature) + # use signature to determine predicate type + if signature == "@-": + # [@attribute] predicate + key = predicate[1] + return functools.partial( + select, filter_fn=lambda el: el.get(key) is not None) + if signature == "@-='": + # [@attribute='value'] + key = predicate[1] + value = predicate[-1] + return functools.partial( + select, filter_fn=lambda el: el.get(key) == value) + if signature == "-" and not re.match(r"\d+$", predicate[0]): + # [tag] + tag = predicate[0] + return functools.partial( + select, filter_fn=lambda el: el.find(tag) is not None) + if signature == "-='" and not re.match(r"\d+$", predicate[0]): + # [tag='value'] + tag = predicate[0] + value = predicate[-1] + + def itertext(el): + for e in el.getiterator(): + e = e.text + if e: + yield e + + def select(context, result): + for elem in result: + for e in elem.findall(tag): + if "".join(itertext(e)) == value: + yield elem + break + return select + if signature == "-" or signature == "-()" or signature == "-()-": + # [index] or [last()] or [last()-index] + if signature == "-": + index = int(predicate[0]) - 1 + else: + if predicate[0] != "last": + raise SyntaxError("unsupported function") + if signature == "-()-": + try: + index = int(predicate[2]) - 1 + except ValueError: + raise SyntaxError("unsupported expression") + else: + index = -1 + + def select(context, result): + parent_map = _get_parent_map(context) + for elem in result: + try: + parent = parent_map[elem] + # FIXME: what if the selector is "*" ? + elems = list(parent.findall(elem.tag)) + if elems[index] is elem: + yield elem + except (IndexError, KeyError): + pass + return select + raise SyntaxError("invalid predicate") + + ops = { + "": _prepare_child, + "*": _prepare_star, + ".": _prepare_self, + "..": _prepare_parent, + "//": _prepare_descendant, + "[": _prepare_predicate, + } + + _cache = {} + + class _SelectorContext: + parent_map = None + + def __init__(self, root): + self.root = root + + ## + # Generate all matching objects. + + def compat_etree_iterfind(elem, path, namespaces=None): + # compile selector pattern + if path[-1:] == "/": + path = path + "*" # implicit all (FIXME: keep this?) + try: + selector = _cache[path] + except KeyError: + if len(_cache) > 100: + _cache.clear() + if path[:1] == "/": + raise SyntaxError("cannot use absolute path on element") + tokens = _xpath_tokenizer(path, namespaces) + selector = [] + for token in tokens: + if token[0] == "/": + continue + try: + selector.append(ops[token[0]](tokens, token)) + except StopIteration: + raise SyntaxError("invalid path") + _cache[path] = selector + # execute selector pattern + result = [elem] + context = _SelectorContext(elem) + for select in selector: + result = select(context, result) + return result + + # end of code based on CPython 2.7 source + + else: compat_xpath = lambda xpath: xpath + compat_etree_iterfind = lambda element, match: element.iterfind(match) compat_os_name = os._name if os.name == 'java' else os.name @@ -2756,7 +2970,7 @@ except (AssertionError, UnicodeEncodeError): def compat_ord(c): - if type(c) is int: + if isinstance(c, int): return c else: return ord(c) @@ -2955,7 +3169,7 @@ except ImportError: return self def __exit__(self, exc_type, exc_val, exc_tb): - return exc_val is not None and isinstance(exc_val, self._exceptions or tuple()) + return exc_type is not None and issubclass(exc_type, self._exceptions or tuple()) # subprocess.Popen context manager @@ -3308,6 +3522,7 @@ __all__ = [ 'compat_contextlib_suppress', 'compat_ctypes_WINFUNCTYPE', 'compat_etree_fromstring', + 'compat_etree_iterfind', 'compat_filter', 'compat_get_terminal_size', 'compat_getenv', diff --git a/youtube-dl/youtube_dl/extractor/common.py b/youtube-dl/youtube_dl/extractor/common.py index 7fae9e57ba..b10e844168 100644 --- a/youtube-dl/youtube_dl/extractor/common.py +++ b/youtube-dl/youtube_dl/extractor/common.py @@ -1169,10 +1169,10 @@ class InfoExtractor(object): def _get_netrc_login_info(self, netrc_machine=None): username = None password = None - netrc_machine = netrc_machine or self._NETRC_MACHINE if self._downloader.params.get('usenetrc', False): try: + netrc_machine = netrc_machine or self._NETRC_MACHINE info = netrc.netrc().authenticators(netrc_machine) if info is not None: username = info[0] @@ -1180,7 +1180,7 @@ class InfoExtractor(object): else: raise netrc.NetrcParseError( 'No authenticators for %s' % netrc_machine) - except (IOError, netrc.NetrcParseError) as err: + except (AttributeError, IOError, netrc.NetrcParseError) as err: self._downloader.report_warning( 'parsing .netrc: %s' % error_to_compat_str(err)) @@ -1490,14 +1490,18 @@ class InfoExtractor(object): return dict((k, v) for k, v in info.items() if v is not None) def _search_nextjs_data(self, webpage, video_id, **kw): - nkw = dict((k, v) for k, v in kw.items() if k in ('transform_source', 'fatal')) - kw.pop('transform_source', None) - next_data = self._search_regex( - r''']+\bid\s*=\s*('|")__NEXT_DATA__\1[^>]*>(?P[^<]+)''', - webpage, 'next.js data', group='nd', **kw) - if not next_data: - return {} - return self._parse_json(next_data, video_id, **nkw) + # ..., *, transform_source=None, fatal=True, default=NO_DEFAULT + + # TODO: remove this backward compat + default = kw.get('default', NO_DEFAULT) + if default == '{}': + kw['default'] = {} + kw = compat_kwargs(kw) + + return self._search_json( + r''']*?\bid\s*=\s*('|")__NEXT_DATA__\1[^>]*>''', + webpage, 'next.js data', video_id, end_pattern='', + **kw) def _search_nuxt_data(self, webpage, video_id, *args, **kwargs): """Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function""" @@ -3296,12 +3300,16 @@ class InfoExtractor(object): return ret @classmethod - def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2): - """ Merge two subtitle dictionaries, language by language. """ - ret = dict(subtitle_dict1) - for lang in subtitle_dict2: - ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang]) - return ret + def _merge_subtitles(cls, subtitle_dict1, *subtitle_dicts, **kwargs): + """ Merge subtitle dictionaries, language by language. """ + + # ..., * , target=None + target = kwargs.get('target') or dict(subtitle_dict1) + + for subtitle_dict in subtitle_dicts: + for lang in subtitle_dict: + target[lang] = cls._merge_subtitle_items(target.get(lang, []), subtitle_dict[lang]) + return target def extract_automatic_captions(self, *args, **kwargs): if (self._downloader.params.get('writeautomaticsub', False) @@ -3334,6 +3342,29 @@ class InfoExtractor(object): def _generic_title(self, url): return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0]) + def _yes_playlist(self, playlist_id, video_id, *args, **kwargs): + # smuggled_data=None, *, playlist_label='playlist', video_label='video' + smuggled_data = args[0] if len(args) == 1 else kwargs.get('smuggled_data') + playlist_label = kwargs.get('playlist_label', 'playlist') + video_label = kwargs.get('video_label', 'video') + + if not playlist_id or not video_id: + return not video_id + + no_playlist = (smuggled_data or {}).get('force_noplaylist') + if no_playlist is not None: + return not no_playlist + + video_id = '' if video_id is True else ' ' + video_id + noplaylist = self.get_param('noplaylist') + self.to_screen( + 'Downloading just the {0}{1} because of --no-playlist'.format(video_label, video_id) + if noplaylist else + 'Downloading {0}{1} - add --no-playlist to download just the {2}{3}'.format( + playlist_label, '' if playlist_id is True else ' ' + playlist_id, + video_label, video_id)) + return not noplaylist + class SearchInfoExtractor(InfoExtractor): """ diff --git a/youtube-dl/youtube_dl/traversal.py b/youtube-dl/youtube_dl/traversal.py new file mode 100644 index 0000000000..834cfef7fa --- /dev/null +++ b/youtube-dl/youtube_dl/traversal.py @@ -0,0 +1,10 @@ +# coding: utf-8 + +# TODO: move these utils.fns here and move import to utils +# flake8: noqa +from .utils import ( + dict_get, + get_first, + T, + traverse_obj, +) diff --git a/youtube-dl/youtube_dl/utils.py b/youtube-dl/youtube_dl/utils.py index e1b05b3072..cd43035667 100644 --- a/youtube-dl/youtube_dl/utils.py +++ b/youtube-dl/youtube_dl/utils.py @@ -49,11 +49,14 @@ from .compat import ( compat_cookiejar, compat_ctypes_WINFUNCTYPE, compat_datetime_timedelta_total_seconds, + compat_etree_Element, compat_etree_fromstring, + compat_etree_iterfind, compat_expanduser, compat_html_entities, compat_html_entities_html5, compat_http_client, + compat_http_cookies, compat_integer_types, compat_kwargs, compat_ncompress as ncompress, @@ -6253,15 +6256,16 @@ if __debug__: def traverse_obj(obj, *paths, **kwargs): """ - Safely traverse nested `dict`s and `Iterable`s + Safely traverse nested `dict`s and `Iterable`s, etc >>> obj = [{}, {"key": "value"}] >>> traverse_obj(obj, (1, "key")) - "value" + 'value' Each of the provided `paths` is tested and the first producing a valid result will be returned. The next path will also be tested if the path branched but no results could be found. - Supported values for traversal are `Mapping`, `Iterable` and `re.Match`. + Supported values for traversal are `Mapping`, `Iterable`, `re.Match`, `xml.etree.ElementTree` + (xpath) and `http.cookies.Morsel`. Unhelpful values (`{}`, `None`) are treated as the absence of a value and discarded. The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`. @@ -6269,8 +6273,9 @@ def traverse_obj(obj, *paths, **kwargs): The keys in the path can be one of: - `None`: Return the current object. - `set`: Requires the only item in the set to be a type or function, - like `{type}`/`{func}`. If a `type`, returns only values - of this type. If a function, returns `func(obj)`. + like `{type}`/`{type, type, ...}`/`{func}`. If one or more `type`s, + return only values that have one of the types. If a function, + return `func(obj)`. - `str`/`int`: Return `obj[key]`. For `re.Match`, return `obj.group(key)`. - `slice`: Branch out and return all values in `obj[key]`. - `Ellipsis`: Branch out and return a list of all values. @@ -6282,8 +6287,10 @@ def traverse_obj(obj, *paths, **kwargs): For `Iterable`s, `key` is the enumeration count of the value. For `re.Match`es, `key` is the group number (0 = full match) as well as additionally any group names, if given. - - `dict` Transform the current object and return a matching dict. + - `dict`: Transform the current object and return a matching dict. Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`. + - `any`-builtin: Take the first matching object and return it, resetting branching. + - `all`-builtin: Take all matching objects and return them as a list, resetting branching. `tuple`, `list`, and `dict` all support nested paths and branches. @@ -6299,10 +6306,8 @@ def traverse_obj(obj, *paths, **kwargs): @param get_all If `False`, return the first matching result, otherwise all matching ones. @param casesense If `False`, consider string dictionary keys as case insensitive. - The following are only meant to be used by YoutubeDL.prepare_outtmpl and are not part of the API + The following is only meant to be used by YoutubeDL.prepare_outtmpl and is not part of the API - @param _is_user_input Whether the keys are generated from user input. - If `True` strings get converted to `int`/`slice` if needed. @param _traverse_string Whether to traverse into objects as strings. If `True`, any non-compatible object will first be converted into a string and then traversed into. @@ -6322,7 +6327,6 @@ def traverse_obj(obj, *paths, **kwargs): expected_type = kwargs.get('expected_type') get_all = kwargs.get('get_all', True) casesense = kwargs.get('casesense', True) - _is_user_input = kwargs.get('_is_user_input', False) _traverse_string = kwargs.get('_traverse_string', False) # instant compat @@ -6336,10 +6340,8 @@ def traverse_obj(obj, *paths, **kwargs): type_test = lambda val: try_call(expected_type or IDENTITY, args=(val,)) def lookup_or_none(v, k, getter=None): - try: + with compat_contextlib_suppress(LookupError): return getter(v, k) if getter else v[k] - except IndexError: - return None def from_iterable(iterables): # chain.from_iterable(['ABC', 'DEF']) --> A B C D E F @@ -6361,12 +6363,13 @@ def traverse_obj(obj, *paths, **kwargs): result = obj elif isinstance(key, set): - assert len(key) == 1, 'Set should only be used to wrap a single item' - item = next(iter(key)) - if isinstance(item, type): - result = obj if isinstance(obj, item) else None + assert len(key) >= 1, 'At least one item is required in a `set` key' + if all(isinstance(item, type) for item in key): + result = obj if isinstance(obj, tuple(key)) else None else: - result = try_call(item, args=(obj,)) + item = next(iter(key)) + assert len(key) == 1, 'Multiple items in a `set` key must all be types' + result = try_call(item, args=(obj,)) if not isinstance(item, type) else None elif isinstance(key, (list, tuple)): branching = True @@ -6375,9 +6378,11 @@ def traverse_obj(obj, *paths, **kwargs): elif key is Ellipsis: branching = True + if isinstance(obj, compat_http_cookies.Morsel): + obj = dict(obj, key=obj.key, value=obj.value) if isinstance(obj, compat_collections_abc.Mapping): result = obj.values() - elif is_iterable_like(obj): + elif is_iterable_like(obj, (compat_collections_abc.Iterable, compat_etree_Element)): result = obj elif isinstance(obj, compat_re_Match): result = obj.groups() @@ -6389,9 +6394,11 @@ def traverse_obj(obj, *paths, **kwargs): elif callable(key): branching = True + if isinstance(obj, compat_http_cookies.Morsel): + obj = dict(obj, key=obj.key, value=obj.value) if isinstance(obj, compat_collections_abc.Mapping): iter_obj = obj.items() - elif is_iterable_like(obj): + elif is_iterable_like(obj, (compat_collections_abc.Iterable, compat_etree_Element)): iter_obj = enumerate(obj) elif isinstance(obj, compat_re_Match): iter_obj = itertools.chain( @@ -6413,6 +6420,8 @@ def traverse_obj(obj, *paths, **kwargs): if v is not None or default is not NO_DEFAULT) or None elif isinstance(obj, compat_collections_abc.Mapping): + if isinstance(obj, compat_http_cookies.Morsel): + obj = dict(obj, key=obj.key, value=obj.value) result = (try_call(obj.get, args=(key,)) if casesense or try_call(obj.__contains__, args=(key,)) else next((v for k, v in obj.items() if casefold(k) == key), None)) @@ -6430,12 +6439,40 @@ def traverse_obj(obj, *paths, **kwargs): else: result = None if isinstance(key, (int, slice)): - if is_iterable_like(obj, compat_collections_abc.Sequence): + if is_iterable_like(obj, (compat_collections_abc.Sequence, compat_etree_Element)): branching = isinstance(key, slice) result = lookup_or_none(obj, key) elif _traverse_string: result = lookup_or_none(str(obj), key) + elif isinstance(obj, compat_etree_Element) and isinstance(key, str): + xpath, _, special = key.rpartition('/') + if not special.startswith('@') and not special.endswith('()'): + xpath = key + special = None + + # Allow abbreviations of relative paths, absolute paths error + if xpath.startswith('/'): + xpath = '.' + xpath + elif xpath and not xpath.startswith('./'): + xpath = './' + xpath + + def apply_specials(element): + if special is None: + return element + if special == '@': + return element.attrib + if special.startswith('@'): + return try_call(element.attrib.get, args=(special[1:],)) + if special == 'text()': + return element.text + raise SyntaxError('apply_specials is missing case for {0!r}'.format(special)) + + if xpath: + result = list(map(apply_specials, compat_etree_iterfind(obj, xpath))) + else: + result = apply_specials(obj) + return branching, result if branching else (result,) def lazy_last(iterable): @@ -6456,17 +6493,18 @@ def traverse_obj(obj, *paths, **kwargs): key = None for last, key in lazy_last(variadic(path, (str, bytes, dict, set))): - if _is_user_input and isinstance(key, str): - if key == ':': - key = Ellipsis - elif ':' in key: - key = slice(*map(int_or_none, key.split(':'))) - elif int_or_none(key) is not None: - key = int(key) - if not casesense and isinstance(key, str): key = compat_casefold(key) + if key in (any, all): + has_branched = False + filtered_objs = (obj for obj in objs if obj not in (None, {})) + if key is any: + objs = (next(filtered_objs, None),) + else: + objs = (list(filtered_objs),) + continue + if __debug__ and callable(key): # Verify function signature _try_bind_args(key, None, None) @@ -6505,9 +6543,9 @@ def traverse_obj(obj, *paths, **kwargs): return None if default is NO_DEFAULT else default -def T(x): - """ For use in yt-dl instead of {type} or set((type,)) """ - return set((x,)) +def T(*x): + """ For use in yt-dl instead of {type, ...} or set((type, ...)) """ + return set(x) def get_first(obj, keys, **kwargs): diff --git a/yt-dlp/yt_dlp/extractor/orf.py b/yt-dlp/yt_dlp/extractor/orf.py index 3c837becdb..039f33bd66 100644 --- a/yt-dlp/yt_dlp/extractor/orf.py +++ b/yt-dlp/yt_dlp/extractor/orf.py @@ -12,7 +12,9 @@ from ..utils import ( mimetype2ext, orderedSet, parse_age_limit, + parse_iso8601, remove_end, + str_or_none, strip_jsonp, try_call, unified_strdate, @@ -390,7 +392,7 @@ class ORFFM4StoryIE(InfoExtractor): class ORFONIE(InfoExtractor): IE_NAME = 'orf:on' - _VALID_URL = r'https?://on\.orf\.at/video/(?P\d+)' + _VALID_URL = r'https?://on\.orf\.at/video/(?P\d+)(?:/(?P\d+))?' _TESTS = [{ 'url': 'https://on.orf.at/video/14210000/school-of-champions-48', 'info_dict': { @@ -401,10 +403,14 @@ class ORFONIE(InfoExtractor): 'title': 'School of Champions (4/8)', 'description': 'md5:d09ad279fc2e8502611e7648484b6afd', 'media_type': 'episode', - 'timestamp': 1706472362, - 'upload_date': '20240128', + 'timestamp': 1706558922, + 'upload_date': '20240129', + 'release_timestamp': 1706472362, + 'release_date': '20240128', + 'modified_timestamp': 1712756663, + 'modified_date': '20240410', '_old_archive_ids': ['orftvthek 14210000'], - } + }, }, { 'url': 'https://on.orf.at/video/3220355', 'md5': 'f94d98e667cf9a3851317efb4e136662', @@ -418,18 +424,87 @@ class ORFONIE(InfoExtractor): 'media_type': 'episode', 'timestamp': 52916400, 'upload_date': '19710905', + 'release_timestamp': 52916400, + 'release_date': '19710905', + 'modified_timestamp': 1498536049, + 'modified_date': '20170627', '_old_archive_ids': ['orftvthek 3220355'], - } + }, + }, { + # Video with multiple segments selecting the second segment + 'url': 'https://on.orf.at/video/14226549/15639808/jugendbande-einbrueche-aus-langeweile', + 'md5': '90f4ebff86b4580837b8a361d0232a9e', + 'info_dict': { + 'id': '15639808', + 'ext': 'mp4', + 'duration': 97.707, + 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0175/43/thumb_17442704_segments_highlight_teaser.jpg', + 'title': 'Jugendbande: Einbrüche aus Langeweile', + 'description': 'md5:193df0bf0d91cf16830c211078097120', + 'media_type': 'segment', + 'timestamp': 1715792400, + 'upload_date': '20240515', + 'modified_timestamp': 1715794394, + 'modified_date': '20240515', + '_old_archive_ids': ['orftvthek 15639808'], + }, + 'params': {'noplaylist': True}, + }, { + # Video with multiple segments and no combined version + 'url': 'https://on.orf.at/video/14227864/formel-1-grosser-preis-von-monaco-2024', + 'info_dict': { + '_type': 'multi_video', + 'id': '14227864', + 'duration': 18410.52, + 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0176/04/thumb_17503881_segments_highlight_teaser.jpg', + 'title': 'Formel 1: Großer Preis von Monaco 2024', + 'description': 'md5:aeeb010710ccf70ce28ccb4482243d4f', + 'media_type': 'episode', + 'timestamp': 1716721200, + 'upload_date': '20240526', + 'release_timestamp': 1716721802, + 'release_date': '20240526', + 'modified_timestamp': 1716967501, + 'modified_date': '20240529', + }, + 'playlist_count': 42, + }, { + # Video with multiple segments, but with combined version + 'url': 'https://on.orf.at/video/14228172', + 'info_dict': { + 'id': '14228172', + 'ext': 'mp4', + 'duration': 3294.878, + 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0176/17/thumb_17516455_segments_highlight_teaser.jpg', + 'title': 'Willkommen Österreich mit Stermann & Grissemann', + 'description': 'md5:5de034d033a9c27f989343be3bbd4839', + 'media_type': 'episode', + 'timestamp': 1716926584, + 'upload_date': '20240528', + 'release_timestamp': 1716919202, + 'release_date': '20240528', + 'modified_timestamp': 1716968045, + 'modified_date': '20240529', + '_old_archive_ids': ['orftvthek 14228172'], + }, }] - def _extract_video(self, video_id): - encrypted_id = base64.b64encode(f'3dSlfek03nsLKdj4Jsd{video_id}'.encode()).decode() - api_json = self._download_json( - f'https://api-tvthek.orf.at/api/v4.3/public/episode/encrypted/{encrypted_id}', video_id) - - if traverse_obj(api_json, 'is_drm_protected'): - self.report_drm(video_id) + @staticmethod + def _parse_metadata(api_json): + return traverse_obj(api_json, { + 'id': ('id', {int}, {str_or_none}), + 'age_limit': ('age_classification', {parse_age_limit}), + 'duration': ('exact_duration', {functools.partial(float_or_none, scale=1000)}), + 'title': (('title', 'headline'), {str}), + 'description': (('description', 'teaser_text'), {str}), + 'media_type': ('video_type', {str}), + 'thumbnail': ('_embedded', 'image', 'public_urls', 'highlight_teaser', 'url', {url_or_none}), + 'timestamp': (('date', 'episode_date'), {parse_iso8601}), + 'release_timestamp': ('release_date', {parse_iso8601}), + 'modified_timestamp': ('updated_at', {parse_iso8601}), + }, get_all=False) + def _extract_video_info(self, video_id, api_json): formats, subtitles = [], {} for manifest_type in traverse_obj(api_json, ('sources', {dict.keys}, ...)): for manifest_url in traverse_obj(api_json, ('sources', manifest_type, ..., 'src', {url_or_none})): @@ -454,24 +529,30 @@ class ORFONIE(InfoExtractor): 'formats': formats, 'subtitles': subtitles, '_old_archive_ids': [make_archive_id('ORFTVthek', video_id)], - **traverse_obj(api_json, { - 'age_limit': ('age_classification', {parse_age_limit}), - 'duration': ('duration_second', {float_or_none}), - 'title': (('title', 'headline'), {str}), - 'description': (('description', 'teaser_text'), {str}), - 'media_type': ('video_type', {str}), - }, get_all=False), + **self._parse_metadata(api_json), } def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) + video_id, segment_id = self._match_valid_url(url).group('id', 'segment') - return { - 'id': video_id, - 'title': self._html_search_meta(['og:title', 'twitter:title'], webpage, default=None), - 'description': self._html_search_meta( - ['description', 'og:description', 'twitter:description'], webpage, default=None), - **self._search_json_ld(webpage, video_id, fatal=False), - **self._extract_video(video_id), - } + encrypted_id = base64.b64encode(f'3dSlfek03nsLKdj4Jsd{video_id}'.encode()).decode() + api_json = self._download_json( + f'https://api-tvthek.orf.at/api/v4.3/public/episode/encrypted/{encrypted_id}', video_id) + + if traverse_obj(api_json, 'is_drm_protected'): + self.report_drm(video_id) + + segments = traverse_obj(api_json, ('_embedded', 'segments', lambda _, v: v['id'])) + selected_segment = traverse_obj(segments, (lambda _, v: str(v['id']) == segment_id, any)) + + # selected_segment will be falsy if input URL did not include a valid segment_id + if selected_segment and not self._yes_playlist(video_id, segment_id, playlist_label='episode', video_label='segment'): + return self._extract_video_info(segment_id, selected_segment) + + # Even some segmented videos have an unsegmented version available in API response root + if not traverse_obj(api_json, ('sources', ..., ..., 'src', {url_or_none})): + return self.playlist_result( + (self._extract_video_info(str(segment['id']), segment) for segment in segments), + video_id, **self._parse_metadata(api_json), multi_video=True) + + return self._extract_video_info(video_id, api_json)

as a postfix to the mimalloc dll (default is 'override') + e.g. use --postfix=override-debug to link with mimalloc-override-debug.dll + +notes: + Without '--inplace' an injected is generated with the same name ending in '-mi'. + Ensure 'mimalloc-redirect.dll' is in the same folder as the mimalloc dll. + +examples: + > minject --list myprogram.exe + > minject --force --inplace myprogram.exe +``` diff --git a/yass/third_party/mimalloc/cmake/JoinPaths.cmake b/yass/third_party/mimalloc/cmake/JoinPaths.cmake new file mode 100644 index 0000000000..c68d91b84d --- /dev/null +++ b/yass/third_party/mimalloc/cmake/JoinPaths.cmake @@ -0,0 +1,23 @@ +# This module provides function for joining paths +# known from most languages +# +# SPDX-License-Identifier: (MIT OR CC0-1.0) +# Copyright 2020 Jan Tojnar +# https://github.com/jtojnar/cmake-snips +# +# Modelled after Python’s os.path.join +# https://docs.python.org/3.7/library/os.path.html#os.path.join +# Windows not supported +function(join_paths joined_path first_path_segment) + set(temp_path "${first_path_segment}") + foreach(current_segment IN LISTS ARGN) + if(NOT ("${current_segment}" STREQUAL "")) + if(IS_ABSOLUTE "${current_segment}") + set(temp_path "${current_segment}") + else() + set(temp_path "${temp_path}/${current_segment}") + endif() + endif() + endforeach() + set(${joined_path} "${temp_path}" PARENT_SCOPE) +endfunction() diff --git a/yass/third_party/mimalloc/cmake/mimalloc-config-version.cmake b/yass/third_party/mimalloc/cmake/mimalloc-config-version.cmake new file mode 100644 index 0000000000..81fd3c9da7 --- /dev/null +++ b/yass/third_party/mimalloc/cmake/mimalloc-config-version.cmake @@ -0,0 +1,19 @@ +set(mi_version_major 2) +set(mi_version_minor 1) +set(mi_version_patch 7) +set(mi_version ${mi_version_major}.${mi_version_minor}) + +set(PACKAGE_VERSION ${mi_version}) +if(PACKAGE_FIND_VERSION_MAJOR) + if("${PACKAGE_FIND_VERSION_MAJOR}" EQUAL "${mi_version_major}") + if ("${PACKAGE_FIND_VERSION_MINOR}" EQUAL "${mi_version_minor}") + set(PACKAGE_VERSION_EXACT TRUE) + elseif("${PACKAGE_FIND_VERSION_MINOR}" LESS "${mi_version_minor}") + set(PACKAGE_VERSION_COMPATIBLE TRUE) + else() + set(PACKAGE_VERSION_UNSUITABLE TRUE) + endif() + else() + set(PACKAGE_VERSION_UNSUITABLE TRUE) + endif() +endif() diff --git a/yass/third_party/mimalloc/cmake/mimalloc-config.cmake b/yass/third_party/mimalloc/cmake/mimalloc-config.cmake new file mode 100644 index 0000000000..a49b02a25a --- /dev/null +++ b/yass/third_party/mimalloc/cmake/mimalloc-config.cmake @@ -0,0 +1,14 @@ +include(${CMAKE_CURRENT_LIST_DIR}/mimalloc.cmake) +get_filename_component(MIMALLOC_CMAKE_DIR "${CMAKE_CURRENT_LIST_DIR}" PATH) # one up from the cmake dir, e.g. /usr/local/lib/cmake/mimalloc-2.0 +get_filename_component(MIMALLOC_VERSION_DIR "${CMAKE_CURRENT_LIST_DIR}" NAME) +string(REPLACE "/lib/cmake" "/lib" MIMALLOC_LIBRARY_DIR "${MIMALLOC_CMAKE_DIR}") +if("${MIMALLOC_VERSION_DIR}" EQUAL "mimalloc") + # top level install + string(REPLACE "/lib/cmake" "/include" MIMALLOC_INCLUDE_DIR "${MIMALLOC_CMAKE_DIR}") + set(MIMALLOC_OBJECT_DIR "${MIMALLOC_LIBRARY_DIR}") +else() + # versioned + string(REPLACE "/lib/cmake/" "/include/" MIMALLOC_INCLUDE_DIR "${CMAKE_CURRENT_LIST_DIR}") + string(REPLACE "/lib/cmake/" "/lib/" MIMALLOC_OBJECT_DIR "${CMAKE_CURRENT_LIST_DIR}") +endif() +set(MIMALLOC_TARGET_DIR "${MIMALLOC_LIBRARY_DIR}") # legacy diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg new file mode 100644 index 0000000000..9005097423 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg @@ -0,0 +1,887 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg new file mode 100644 index 0000000000..2d853edcb5 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg @@ -0,0 +1,1185 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg new file mode 100644 index 0000000000..393bfad973 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg @@ -0,0 +1,757 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg new file mode 100644 index 0000000000..419dc250fd --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg @@ -0,0 +1,1028 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-1.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-1.svg new file mode 100644 index 0000000000..c296a04891 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-1.svg @@ -0,0 +1,769 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg new file mode 100644 index 0000000000..b8a2f20e57 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg @@ -0,0 +1,868 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg new file mode 100644 index 0000000000..4a7e21e716 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg @@ -0,0 +1,1157 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-2.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-2.svg new file mode 100644 index 0000000000..917ea57301 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-2.svg @@ -0,0 +1,983 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-rss-1.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-rss-1.svg new file mode 100644 index 0000000000..375ebd2043 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-rss-1.svg @@ -0,0 +1,683 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-rss-2.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-rss-2.svg new file mode 100644 index 0000000000..cb2bbc89e3 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-r5a-rss-2.svg @@ -0,0 +1,854 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-spec-rss.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-spec-rss.svg new file mode 100644 index 0000000000..2c936166c5 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-spec-rss.svg @@ -0,0 +1,713 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-spec.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-spec.svg new file mode 100644 index 0000000000..af2b41ba91 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-spec.svg @@ -0,0 +1,713 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-z4-1.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-z4-1.svg new file mode 100644 index 0000000000..dacd8ab943 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-z4-1.svg @@ -0,0 +1,890 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-z4-2.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-z4-2.svg new file mode 100644 index 0000000000..9990cdcc3f --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-z4-2.svg @@ -0,0 +1,1146 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-z4-rss-1.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-z4-rss-1.svg new file mode 100644 index 0000000000..891f7d68fa --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-z4-rss-1.svg @@ -0,0 +1,796 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2020/bench-z4-rss-2.svg b/yass/third_party/mimalloc/doc/bench-2020/bench-z4-rss-2.svg new file mode 100644 index 0000000000..f4265378a8 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2020/bench-z4-rss-2.svg @@ -0,0 +1,974 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg b/yass/third_party/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg new file mode 100644 index 0000000000..86a97bfd2c --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg @@ -0,0 +1,952 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg b/yass/third_party/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg new file mode 100644 index 0000000000..c748877027 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg @@ -0,0 +1,1255 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg b/yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg new file mode 100644 index 0000000000..bc91c218cb --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg @@ -0,0 +1,955 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg b/yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg new file mode 100644 index 0000000000..e8b04a0d98 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg @@ -0,0 +1,1269 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg b/yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg new file mode 100644 index 0000000000..6cd36aaabf --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg @@ -0,0 +1,836 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg b/yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg new file mode 100644 index 0000000000..c81072e9b8 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg @@ -0,0 +1,1131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/bench-2021/bench-macmini-2021-01-30.svg b/yass/third_party/mimalloc/doc/bench-2021/bench-macmini-2021-01-30.svg new file mode 100644 index 0000000000..ece64185f1 --- /dev/null +++ b/yass/third_party/mimalloc/doc/bench-2021/bench-macmini-2021-01-30.svg @@ -0,0 +1,766 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/yass/third_party/mimalloc/doc/doxyfile b/yass/third_party/mimalloc/doc/doxyfile new file mode 100644 index 0000000000..d03a70f57c --- /dev/null +++ b/yass/third_party/mimalloc/doc/doxyfile @@ -0,0 +1,2659 @@ +# Doxyfile 1.9.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = mi-malloc + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = 1.8/2.1 + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = mimalloc-logo.svg + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = .. + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all generated output in the proper direction. +# Possible values are: None, LTR, RTL and Context. +# The default value is: None. + +OUTPUT_TEXT_DIRECTION = None + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = YES + +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 2 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines (in the resulting output). You can put ^^ in the value part of an +# alias to insert a newline as if a physical newline was in the original file. +# When you need a literal { or } or , in the value part of an alias you have to +# escape them by means of a backslash (\), this can lead to conflicts with the +# commands \{ and \} for these it is advised to use the version @{ and @} or use +# a double escape (\\{ and \\}) + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, +# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See https://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 5. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 0 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = YES + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = YES + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which effectively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# declarations. If set to NO, these declarations will be included in the +# documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. +# The default value is: system dependent. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 0 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = NO + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = NO + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. If +# EXTRACT_ALL is set to YES then this flag will automatically be disabled. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = mimalloc-doc.h + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), +# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl, +# *.ucf, *.qsf and *.ice. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f \ + *.for \ + *.tcl \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# entity all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see https://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: +# http://clang.llvm.org/) for more accurate parsing at the cost of reduced +# performance. This can be particularly helpful with template rich C++ code for +# which doxygen's built-in parser lacks the necessary type information. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse_libclang=ON option for CMake. +# The default value is: NO. + +CLANG_ASSISTED_PARSING = NO + +# If clang assisted parsing is enabled and the CLANG_ADD_INC_PATHS tag is set to +# YES then doxygen will add the directory of each input to the include path. +# The default value is: YES. + +CLANG_ADD_INC_PATHS = YES + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_OPTIONS = + +# If clang assisted parsing is enabled you can provide the clang parser with the +# path to the directory containing a file called compile_commands.json. This +# file is the compilation database (see: +# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the +# options used when the source files were built. This is equivalent to +# specifying the -p option to a clang tool, such as clang-check. These options +# will then be passed to the parser. Any options specified with CLANG_OPTIONS +# will be added as well. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse_libclang=ON option for CMake. + +CLANG_DATABASE_PATH = + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = docs + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = mimalloc-doxygen.css + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 189 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 12 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 240 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via JavaScript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have JavaScript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = NO + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: +# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the main .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = YES + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 180 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANSPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. + +FORMULA_MACROFILE = + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# https://www.mathjax.org) which uses client side JavaScript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from https://www.mathjax.org before deployment. +# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , / zjOurt;XArz2&-{}yIiKegudj}#qx^D%bxpIo~+EL6mM(Y?Jox~bX-lMA3Ys;_R{*H z@vGci!^4ZK%BnA441YCw6thx)O0P`5Tg~UEtA6)3SB$ideMH0$|6!Rw?yD!vXt% zM$zASIY_o5pq5fsO}$hynFH3&l$<3~{FFd3777uVE0?N^(3o(^)!kAfcAo(?l3O23 zEQt^DaAHn#tmNmyS(@8RSbwg@wMkf!-Z6067gPct08l~2r;@|EDG)c(eVG9P5*~g` z`ut<#Z~*{dZEe^~ImT7fTYW@;OB&JA%b7&9eaC1)R<(il3Mvf_*qh>qHYhDgYO-F~ z2hO!UIktgs^QEG4f0ipd~1q{2i=!4xSAvxA-^aY3lyjHfXl_2mKVx zlH<`&$%K6oSThDBG&(e(=`oOKaF}2T4|$9?$FoAL4<^ZU(8uyQEEXw?s{I`8iIwFZ zMKik%G;Ypfwp`QuVZNeAEvEF0_0SDu7kp?Zhn}e(NbOO5IvTrdzW;zxUob1ap!kz# zK>^x5IeaVlftZzL!Dv%Hxl|&>n*lvtW+fa3-2~2rt)@X-!@lrN{-Nrfoo(@Nut0?B zk2-T-mP#DEZSOS28e%qk;(P}LQzruVYmCPrfCuX5D8$^6F*v)>HG{2>s;3g%=RAmW zvPvA`j#IHI&P&;8iUpi$Ff0t9pazqw{&T)t3^s(ZKZ0G*00qoT_#o(ir`!LtmEi^H zO_mRqanxK}v{teMZ+=5^L5fv1pjNoABYpyO`6{dm@!?ACERmn7ue1w_mOLhBnne|D z9VUBzrVvlrx6^-=5O9WO{n}eZ`oB;|0M@9;J)N5N2j$>X6mT>UTfp4l(OZ%m2z~0MJ#k>u(685kygfW2A<~yb%t^e&D);OMouPs>_wgVbGw>m}Uate0UM< z_q=U{(Zz`Qn<#jXHRLuh4ga~I_>21Qi(r?i%>2;50XaIC@d2R`3spktNouly>{gdF6vW! z4Ej=@`BjHgN^w8z?x2b!3@oA;&Z-N(Uu~r6Ys0X+`P`W_DhCuoYKBYwsgy;&e15rm ziOzZ>`iBEW5UEa_r8~u27NlITZZS9719qe6$rq*j;pN);A1&wJhB#Uid+lj|_Bt?V zmbv$RNjy4+-AOr?nF2}0*5ogf)#|;f!2E98dhlYz6L@-UjaPb3Wz=QY;fRQvK@&5e z2ps_1QroKA4zVhV*iD{}kyj6@wj0mGnI3^B+`4%cv{nZOueZtQGH|a&+|s5hGh?Fb z%Ju>e7p;oLh2G7R;)vI_8PNJ&P3Xjy!jaCSwhGq1v*C!q_@tbEhAe;o=I*nFmbY-3 z_a?o;uJnu5@dq-5M+!snu6fg?v$E>zpixk!DZ6HT{T!!C6SYrEjzmW1ud3pwUjz(m z1}93rdq$b#4Zb8M3cr+p_FO?GE4_^Gt~7_n-*<;Zze*}W7=|P!kkYHhAd;Q!xd!No zp<$Zf^CUmSPL6~}UW0vTw_TD)hFx|x!&Egc8iH}R))C6qa=PXbyQO5>O`!=%v*J-m zPfhDSvecd&i+PG;_Y{!19dW;sqV){3ssORkC<0Web}CV(wKIP%z|&dC+pH>DBqR*# zQ>Ylb=tpgx!|_jVy5x`q6n(R6ooLv!Bw*Lh$$jmlaEH%tjz=Yr;U_tVLhfiPm$bCP z3d;H>|3Ur&0e{}7Us_>7r9(2J2@z7JN!8hv3iOEwsio5FArTT%y_4a8QKnWjXc5Tk znz~#nh>lli(V;i)h6_8#9^(c@a#a<=I4oG1BbfArs1oi+y$P{vmM>D6D;SGZy_vi6=v8n&N_ewjjZ!DiSJgi`3Z zRm&OYCCo39=XRU-=3B;2F?!l6w&>C;y%#sozg&s+f0Gb867X^-q)SvbZ_*0Z7V@5D z5Xq$dj4>dV-YvMgSGS>Sum)4UmJHJv*y68$Oi^*MTH;F7PbUMjl$_l2V9w2w&+rWQ zPw@661t&Fhf|hX7NQ!n#cd+CA;2!b{>w}TN8FA=fIePehf@1s+?@%GP02Nq_;|~?k zRVn0N;`vp3+oI)^howUY$^uivvd>&eOGDQ0gTi+k0+^Zp%F z92F`omy_@u?w<1N!{uwexWWs_upQf?{a&H$gj3O9(X{Q};xXu#KSEZAom`|6Kp+{@ z_sLf`!XI*O795sC2wYb}fe_zQC~|pRY<|Z0xQN%n=H$JzUJUhI4(% zh2L$hb|2u`tNnDzJMYK5M8Ea=tSExN>$JQs?or!S?|6y)x8v1~r~BH_nol@uP6{+6N z4Q~d0+$1b|m-LLkKWzb_btb+f6tl>Gx)cyECSdR>O#cn1)P$pwKm+Xr40dTn=~6G2 zpXPiu5;2pza$&;F(n7Q9D1hfAO2f-ZXvdHHi1U-CC>QzoCiywvJJ@4u?vP#g6p?y0 zr-f9>b%&?4Iv)KAcej2+zQ{klh1UeJo1KN?&ZSLpwKzhJ5=k6r=UfBk(!kt&{gUHj z52Wv%KRpKjiOE#RP6Ty~^?!RZ;3phqiReT;=q)DL0xcr3Hz`z0?Sp{9UNaXDk;eJx-p}*b)F1^`l%789Jenu zAAQFq5T5FQ3L~YJTjAyomJqdKBa2~n@!a20xmwc==;;(7g zUZ1(In0{MZ$1%*5Jo77p9pDt$N0!cGha3svlezG(x?%r7mA0uI{SJwxU2dc$3>fpf~S1q7IQ25 zjcB6wJGXD=^&ANjugdV8>KK2cbgdn7;gY`q6CHwuR;IXyPSKs5J+Pvt55y*|{OhgqKEZbwHyVcJ!%}2pYVTy!(7;O9iq8sHfr#57O@AJ9fF#8ek zP&q8cxAauGb!*$KCSmYR908)e-*wCK;QO(!sbNE}YB_9=UkqQHs>-xZd#`ELt(;1F zH~i#cmW57oFV+u)A^j=40>%dNkL=4pqof|f%-SLzMFAO}fzcfF-l9VhbDC^`)qmew z2d%?9qsrNLs1G_-`>N+;*`PXAmBX4QsWF$37M7XyErY^WK}oYz26ULpqFAH_qB3e} z7`TzBL;5RFNFJq2fR&kz^rdNv*QlJ#-2q8UT%btVf4*~2(2GmA&r-+x1dZ$uZX#-* zQt$r(rn?g&9AbIUW(wm9F z>VEMa4ixs0g{E78N#qaT)Ul*9BR(cp*G`Lsz1>!G##G>c6h7Rd)T|1{`w7h zo=Bcn{I2NNhdUvu{2I^K7pnJ)iz(aJNpJf5@w>_4soe{ipPVJS4A|ei*?!C+GL`c0LEq~n<%=lGX ziIYP(c-9%kq@LO~vy%2?QC8Dw*3*9Lij8B?I(yJTCFK>8loI11QqTHwVK?OcvAQb1 z3}eRk`!;!YT#5Z;7qA|KIU}6?1Xg1P0;#TVZwju-3bKh7%S28GNFFL)@yUFlEn9yN zg%(vuNeNlER@K!dp#&Co&cD8aam6TCx6tl#@wqF;#f3l2k_zkC|Kwj>m zc~U0I(iF?iF8H948E6eik8&679iH2eIY}Tj70Q`z%!X+E8pu6JjT9i22!|*8ZRC#I zs$KhsdPP@dP(b{$aWtV~%ciP{5>E%MXksHIxSTYcDJEt^F(-v`RT&PM-Ap{Y$X+G; zT1grkdN!wP1_K)Ea-c&=LOFW|S-PK0Zdw4_x^6j7D1_O{Nl6?Nb5A}&hR@83CwA>A z38p~uUF%&wydYA4d`olNN_YOGwAUJ9y@9ptfVQ+{agfi7YEt#7nMZ`OhwSG(c-^+y zN2ta-gcgUt*lf9S#kl6N)jF;BLBYa3nfnbdzh;ejYOqcZWbRJqW<(n_v5jKPGi4`^ z+0#)pRcm)?N-20mer*pKlrd|{g`5QMf=lDMY_g&v!xbVE|;Etc`y@b%{k+q zEfwsP5>vt)%oqE{fUjez^Ec!nB8X&g_cfo5F;Cc8#8(9e|BhrWw6wn4y}_c&j?yKu zl#EIa2cfF^>F^T^dhv5rqNv_081B1EEJqHyn!nDe!Ac2j_iGP|doDBgPQQuFlh)aW zUClO{Z$rQ{IchQ|!tU;TSdDyl_j!%g*sF&@!tH+LC44p=wC+K@qQ4 z&0hJ9u}Xnf%cdpuE;5n9Gqb!znWsrd0cbw30R=if8$v zsIe`M3MvKSelb+)evPVYdC7FtdG zg5#|x|6g6-9oNLxMH@gtsTPnfA{_)o=?I}nm)B6zN?- zdWQhgmEL=Y#CL-Ce&7Ax@4Y{S0h!5}nKQHZS$plZ7Ew3!6V?x0onzRgE%B@}T#wDfpZ5-&}Dz01p^0cq=##$49VOt{j~s#6x>x27I;S zXGtdf55Qz&TM+k8mWL=Siaiib?ogtYJrU-{e;jcI)LqmZ@SUPk$)9$iILBx#1Sx9w=L=jfDs@5Dq z$V-ds#5)Hu{_QV14?al~PY3qLY|OOYClV5Vkj^N72b24tk0{ms27jq-K~m5`0}$xq zN31V+$qgmEzUdBP;zifr5VFrL6?AyROFnWm1q3Y>P`hoDH0Q+4#9e9Hdrw_(jdg10 zd7KWv0%%_R4A3W2U{u}TFr9LV4?g>Ag*$u|D|It9<>GlDMyLQmPEHu;@x#@9aLs?X z7RBU2tkgg_^ypOMWC^;*4YWV!IW_+Llyp~^7dj^ZJ&iK>&a`?f&&_m)7S!2tW|;Pi zv8n!ziJKirf?mww?II%%wBAt-#k0-k{I6M~8Qi%E=0sYT&@J{Oy zQNJ4a0jD*#*f;!BT#YZ_ASdB&+U9JYvl?Q>J}fc?baxvdRY+d=M! z&5}~F{x&r9UHetM{Og&NhN+HWBM&78&7h3*z8HZ!;fYPFtW+RwTf<#`d5K!{;QK)OW> zp+6HLh?nsSI-?(zfIwG*f}7{CQy+nV0@t10K^IM!FB2yEZh<9gCi5=c@6R@j&ZtN9#`n1n_vTDCCnT z2W0aXq@6}U7gY{Nv|Yh_M?AM`mccwHC-uXE_oqCu1IpuA6y#L5tF{f@tQfXUCnogf zRHTi5(^AQxk0GA!gtK9`XWHS^3dQRgn6j#N=gV=0HmcSKV{3`jC=o?aR{QkKB6^db za7bjjh1D&8R81&y5~&eyT zL~iM+;n^3{1+}z+eyy;-Up9_VmnH~w*TUbWX5%MQ3vzp+;~<(!Lo^vy=4$53@t=t8 zKx$1QrwJtFwcF`bxTp+=aP(&LzDy${fMLEfoQV8Arb0{s{K9daEc_3o*mN=9;nHX+2A#S zj46p~QrBg@)QR~8SH>{Dy!^6s_2B2lT|_S%tCF-=MpFWKD?CE(3wztr3K{0CXH`q2 z<1?z+XYIW_SC@!RUHkmw9ioGf)>S?%?+aC`3T(q*W-Z@ts8&&Qhd@sH(`^ofMQ&7x zXQsKjW={ss!7_hC_!G^=W;0xOxnV5dqhL`n4@TOSN+sw6cSn`pC!NEu!rw{r!MsRl6B+jZhaBz z`2fl_;bWZ3=u2L8uh>{j*Iw>Og>Vdot2Zs;do8l2{NeIm&R!CqO~I|Guy*G}tA?Bc z=tsFFv>3ne;dSMr-9naVJ!d+`Stj>42F8>+D(um+_VgXnWOmmgpP2<0A+z9Z_b$7 zhj+9P(}shaBC44w+D>=`RQkUm+QY0h@e6hjWktU-n4wnmV@4iMDWWo3h&(_SW0y^t zVWm{Zp=lM-Cyfp}=wB-V897vQp*gki=S4s$Op5Dvv~}bdiM5Q+WluKMk2c@AUeCQn zf6g>vD}BgqDmVZ&*=L`~{<{4_gFJ&y=?W7oQ{*HK2E zK?4|d>rIrJRQ|{H0o1*a%8Gs#Xz&p{>j_TAVgqF^=A6Qd1u~E;qeXIdjK5MB%BOuDLzl7psTlR0bN{n5C}bJx_{jKCQaMserD6-o=H&yR2}61 zYvopzkvSOcOXV5rc|g+}^Gk!o2;R%12u&$u#4_&}KC8n_H`$9j9M5NpF?~v*U`|{x zkXne7V{8eU@g>z#uudTmj63()f#E#y0KTM`j)zbIYL^fzs;XaYXRT!#o0E!XtCt%P z)%=%8Cx1|!ya_!XB=~Yv!*Ql%!gTYC@>zg~rKd*oyWT)SR_v7>x?NjH{8WPD;qvg_ zcxZ$4)&6*{QpiRr|5Q2*2HB(V3&e7hpd~?JWEFi=F!V*V9cI4@sxd_u`g3hc1JTw) z(4shksM#ij@+}O(D_Jo{@iKk8!(xzAoxzPk!=S;4jX}IfTnuRj0^ZI4=?@^Bjd%U~ zI;$YPtcJtmK^ifOg1q5q+@15{tNgT|yp(d#!_y&DKAG3!Z7Q^SlMd6{czS`sqZ&a` z;AkS3!?5D~8&Yp;c0mM8K#4(R!wP5&~bG2bJKo?DXf) zg9fyuo6T`VCo-P2U`ZYcQ4~qT(UwXNz>vk`8$6Q$*7v7wb{XtxwXG8Xp-H1G1sG_Z z;&=YvkQ=%E@a0(O@wM8LI?urOy=O3xoBu?f-1~IX68M_~ya1Qvl$r$q6@P?)D!{C` zN>}WFAJ1gw;4)CS4Z5!?f0rJi;+RPXd8FeA1n_`udJL7D&S~I@9xKrEHOOFE+VSQj z5JwpOT?hgb7yo&5`2RyC4p5$?FXCqUMH4PK0)WAIRQ-w{pOGaF(K0_(vsS;CfHfi0 zuO!w~$tr|79R^{qOKCP8*<_a-hCs=exD29LqE5O!^>3&sSYzb z_~BJCLC^R4zC~Yd+9|qJH~un055~1BeHr0Ix~;4s-G;Ho%MS_!uIg3o3vtqy)ofjb z0Xqx0yF3*G(yBR~pv)?RFT{o`W~uksl;%kjIB~`1fcZFCYEHRArp5X^(kU*Z>3LM* z`(gydo{chC8!!G*zZ&qIik|>$Cva9-0RJJJ6|8aWvB8MzJl4C$(G4g$w$k+RM%r33 zvVapKh8ip2TG~F*OpFy5k^tkHv$eq)S34 zx(CA8Eq>6l(E-1`LiFwhzP^nA{eh(G_mmkva!g2kA)2_2n(BR>J4xTU`Yv9_*Fy5^ zvi!$|VkD*7aN&4MvdOQ*rJZQz$~-zH{F@5D3~1t1UqK~SMSv30T zLix$4^TI_+E6E&fSz~7=j(YB(vifu_g}?p^j`bXByK_16x=6HSv+8^kgQ1jBvU2z1 zl6ODa66P$lMLYGJ;(8vgT6!=x3gxkWr~vKAFKn8$KhQl{ANsNN_H;kW(t70HRkEE- zite8K2K!cMmrnQ54Xxepj&IvhEE_9My#{6tA6#S2{03B?@?ZK&UdMx|t_TxWW8v78 z@~Pu-@-`c=XJ6k4MCKY}*ESg*O6Da-ic7U?jaZovS1t0C^f&b%D81NRs5GM{>eK6H z(MUe{$x3ud>NMFuwWRz_+{0HJw5vM@sQsue<;=$yzMjr)$7_ZP=et^U-v{O{BsPOd|$U=(ETncms$<-3Cy>*NTMO?b~Ct0b})7b*a)#|Ooz!!VH?7-Kv zQW-E%0Uw5`%%w3R;X%GWALTQQt@V+dbjd&(-CxBYgY{)~vFn%50-mjog}&S0iw3A}__V6hpk)Ti-vU`^vfnLM zAD9u?XrhcB=|jzsK#Q7xLoTyN*^m_^ZoW6KPVfrxc5HSQiSJyw%sST8``WeSit}GG zd%5A06HSiv&>o^I=~<#TD^b58Bu8$Wdd@k<7dbT^m{WDq?ar>zmG-b5%D*_hi8 zR({mGnbkVCvS9pD|N4A9&mTrz?`>_Tx*EA#P{z-WcG|%-Iqsnp;#)=zM>y7sTe10% z8frhCM6M!QYU@fG>h3FAdc*H;k-ObGA=N&Wxh%21%VQ^S5)=Mp(ugW#=1tDT*85ndB+1+6 zs4?q>Ge^c9+kX0^*$_qFd%2`!H3?^AR7@Y&U)l@NgfE?SLoDf=?CBFi>RS+vhRMND zKk|Xr6H**ON>U~Rxkm$dKMS@!+{0g6z$^dj-(1u`RieMUn|K4I2wc*}J=a zBOY7Nwr3=Q;JpNA!53nE#1{3KwxAf&5veq$BnS7lMNL^x5UKX=KL(Y45NQANPa8N7 z%CmvEg4@s@!yKIG>hWe7q!9Lw#lPh{&#i>-#Dysu^$``U-bohcJ~(rjZq_E zyq;Ojm+QNKs?y7S3s+BWqQSb(^CbRJKUI5%EzWgaK0@DMTBXY{2iS^V-vt9GNT{fk z0mj8dp@sW9br?hi^bY@xRQ;!Xgl}kibfQ9ymD0gh^WK==-ewy11N$h0lHv_tArx@L zUa&xB^GL^Sy7-FdTKld;9R@e=@3b&`+$PW))PM2&8rP-K$F- zHjF-F?BY#l3O+WY!aOukfg zDuQ@nNyH?@Q+*U2cUk0c5lCu2&&s|Do7AYFoI5RT+1!lK(5TS`l3*ZbBsLlVm<;X< z&`+(?$VC0i6qzgEhNc|!+L;aXNHJQIta_6ovkTd3K@qqej%yiM8P0NOTEA3i(}$&{ znmfE0+%>(4T$CpLoDKBz4K`8Nyicgh?lgGy5lGrlOr&>999iR)f~oYHiBSE4B1oBx zpxFiR7P;3S92!;uqg#CP=z-&mSOduSUHxX|La$AmV?D0}%>&Aycur71{3tJ+Bu)1k zzC9-u6)8@P#+9R)_wt8MQ+=sy&_kd$zH*-Ur*~@N_n~>or11w3@}L{>?wmoUyDuQk zmjWVQ!DSrvsq1tk<9~m}Ma+>ffKa(ea1M4x2=WTqpdmm`EVEx(nnVeWyTfCy=S_4M z1S2!UV=oFU8=_d8rOCj0qRb6`bQrO$r|C6=(g0vG$eHjF$rnj&O8=YkD}($Ce_()@ z7XmzrH1T=CasXfZb=FJ*4q?JUO+cOwN0jsQ>;0Uo0^@oFv&9#=Flpw$k>LDGOOy(yYHhYuW|RGBZVM<;0L6> z-ks~^YQ?9Jm)+(S(^7F7ewf*5v>)FcDE|>Sl^s=#eu{qNtfI~FDqO8wEn|5A zd8M7Wi8h=ZFA%X7~yZZf5mGNI<#|3RW zb1pxzk!|H`K}v!xz`PJ2CGK8y`7_*73|4@Gy5t^TX@b*B=kX2nXul4CTu=jF!3eYJQqllyM0m zR#0$#!75(PFW!`^lbd~+H6oFC>2}#E=|d>>Lkpyg%9RE5l=EC)Qd-Ht-hw~-^eY7m z_{JMz!Pq`l=NIk0?a2i;)#M476qA$Vvl@EmMC7rxH&4M<2;!u<^&mm+;Ej9eiJ*l` ztLM^RMpJ=Fuu{%%!X*)b+W2G?pOp({$}?rJ)i!U&@YFhWDsV37r$oKj0prR@ zkPvzh)y2<|#Jm>VI%C%^WUoD|UFX5>wzJ#lMpXvO4WCnt`>PS%+&hN;v8TqbH|X-? ztXp=wYv+?2`6VXU1FhU2CWaroc{&EOLc@5v>*mhd3;C+!jei~|9DVH-Qc<=LHD0s3 zY*rPO?^)FOthd)&Ltf*iQST}8TFHvAV-Q=>FA~p0vnqR-^+v%$?K@GFc~}X908o9Z z5JIp;SYJ3Rdp^8-U3MVb`wPDN`ePOQC09A}oYf1jnJCT#gwxz(PLL;Mv`TYJpxtCo z*}M#)=~v(RlK#?LEDR|vK9VqYG-GJa5?qiVaA}ONqYF;IqFuVly%~p{W%_w@Dr$&! zFoT4pJMXXWGqz&A58v=&ldS0DK1WX&QMU6r&u68`Ocl>>Q&xL_Di?r8FABW>^k9d4 zbmgK48OVJ(QB!P5Shsf8IjpN>A!47$f`ar%0x~g$_etlKYI0hOfzrvI^?l&dp;5A2 zRM)pEghq;D9G}nyPZbjvj=4TI-wV0*cE3RJRBhgVurxg)Tyw`3O;@WW>cGa@dbYpt zB}$JiXNZ9(P)@NscyzvUH!F5;A%AS;zTC@a<#(Hs)phuvTwf`gP_7$)#`wuBbiLCu z6hqg|@@dP`jYH5mO<8-{T%OleHR@E_-zJTSt^I4>Yx3z)6PK9iqAPAB?t^@GjAbKt zJ+4i3E&pKtis4o&Ta%e@vUi}PnB}~Tv90RAm!W-1cfj;KfIu#Tr|pdPrRnAU7MqxZ z=o&Fk zA3@{ze0Zg`?$-=CsTl>s#QsOyuFXj;nV}XzVeh3S5{o%Fs?Ip`EvqI~e;QCLi+wRQ zvt6)*%D)(|J`skiH0YNTvU72vh0fn?|HfNz+xXB4sLHy}6~^RSpp@hWO%ZBwz2%u` zptbeIIQf?Aep$kAh_oP{T=SwxKNboqZLpDTt?oIv+*!8;oa!#%@_hOy&R;@nqW}AH zkB=VNgbt+UpnOifIq^yo%>XBJw=Cf?yx|rVX@i%3g@5X);>SCVW6z`+O{T_4H8G5Z96&KZ{CqZ?q5lz7_K}yPu$|0Fy^}X_mErS2;A0h2p|pNrx1_G zVVGAQ=#Id*clN0uY$Wm|(j>|V*nbkCfA}=`hQL!ebX5U5&z2qhBjvr=ogu8JkHt`YF?GYmK;*$~#8a67vIzLYL2j!z)J2;+406sCLfvBQLsB*FVuY`nsJ;XS1{6yGT!QU4 z&@f3fX{!B(Jdy!4drpG3@d8P%?cb2ROpz&v#&nH+m#^;-04aI?r@jk@{CdrDvFd(ds| zX?VSz)i18r_n<;uz%*Q3YufvM6Fv>7Q^2xhj5C);CxN-s3$t}P7v9QQ=9ONcU3530~SVm#(=@zT#NaKEuAHNUh&1XirUcdyp?Yj zpSp-HpbuqaC@@VC`e=FKs;{o z>CxWO#Lr#Gd^kEBvEe9-Z{n}Kbt&sCEW`#;>#U`Ei^^hW;)#WoD3BYIuL`cVqt=ryT)x!`8;jIml(eZt5!p6r+@9_X_^K{! zzMsF^7ST&${@q%PqfWyyC*gx!H!%w`B2=inI*$hrUK5pe(hAbBX4F&7(`>qt^abpou61Lf8pKaJSX3iD|8_2%BMb%rg}OnW?K|_Y@pp7{b8}4HO%zst#+p) z!Y})3wsvpcv&52oe9StfiQGS0^UhuG4_Va>RNOlD2zu*aGN}QEe^6RpQ)yVLA1^T9 z%Ji8^JM z8Y#5K_w2m{(iyg!EtBk0H2B@XXq<)T``BOF65}w5TCuKB4g;a?+Q5p*EWwT|znqk_ z8Ag)CoF$NQD&hWL7;7YL?0uVmHJ(}A!uBDSu@aeR4sJ+;k_&PU$ zLsDM9l#Ai*($0nHMUFus1$VDNu1_u17%PS9q6nAbB611+dobVA(fj*ZoK8!e`qk9} z29e(c=TB=Z2C7fJp^w6Qy*|yiuWN#a%pb@&X8Y1N9M4**$g>v)M9ib~f$MO85q95Y zx2_y{)&G(%64scM;9)~ZX2toVQqP%7c*bK=@pwOnjfIL)rs%cUvQl$#@Pyk;bXYdz zSmQ9TQjk*%vxcz&FV{@syCi3N#AZBqKifL(_Ja?7ciWg2S7Bv- zHXKoo3`u_9Q0qgEKcdN$<*8(ex$di;zZx#9!(MV({}bWxyfI=0X}`>@Oh28-aA1Ay zVqPL)tJoVswiPeM-)rookir3Rq0Q_I{icxJQ3F;koI>vLC|G6Tyj_TIfU1)XXZL?g z|C*)ReR%)i8HLjh84zKk%}1Uis#~>I%?Ayzg)A(o=g70I9NpEL(-zSbQU|&=_U)G$ z{D%wd#kRvaENVHu_c4cIyL!~;q=9Z6=cWj~dI()l$$kNBH^Q{7z}nNE6Q8H|=m@$A zP?M1}FCgbg1+W#c#yt1J;Qbr$|0m73CWoz)D&0Kx24D!{1$1?ARprbC5Z0O@b}(>m zwi+jkn(l8g7F3eJ)6N0}4_8ybzi>Ar874mvYI6MY-nt4!oF-%~_~oc9EmG+IKAka+Y_^m%Spr@^r5p=zRP5-v0YY zcz2D;#jhd`=+hS6uRx6q@s5eN*Sr$!&@N)h4)4C-R8j=W>K@L{p)Kj58)z?=Lm^hU zTO12`KNEvQ3`b1Lz)_Uafu8#xcj9yN8imu+Qjy6N3O%{dzW`$ed2A46kafT4NQ0@C z)gT6lV%ltWGHvT3n?NTMZc<1v1(@#KjKNj{aqPxoFH0bWPPK_~ zL4^h=VmWr;BTOqiOfATrdsBrNU_hI04zM;-uGL3bD2O%a*8_Nyi@rHrWw08$M6|>s z(2xNkQB^7}g^Zn}5L69rgGaGFBdVU07xHux$65M6d_&$34CYKGE# zn>rPst(SF?sLsU)n)nrypKab~XQy0DJIaPR0nJ10nJ7)|1v~*c zQk`wJXNq@^6)Gk(bz{>lGj9`R>gJbDmR@=R_tMMx4MDf><*rCM?&mp&PAc!Ekalo& zaW;cISppb5|4G__1&|P!fMox^*Z*v1bhEMlNdw*P322VhPi{9%_EHH6%#u67RW9Bd zn$;+2*-_Le4YS2a3Cn9X}7RoumSw4<0>=>;={w#ZLH6g$D3loAc3T~vUC8j z{G9~l8*L%l^psB-Fah;u%}X|Pkpn^(KFCwX^U8~VQFQ8l%HW}0s&XPTuDDJ4(^;h`#jICL<;W>#pwLW;x#F5& zAp7SGP@(hpM_#$}P2y(c(0F)jmhHeGD(re!qOgP(%_Ks5UcScA`auwF6vg&Tx`Wiv zV4GVQnLv8URb*Ig*E1=wI30U-<6+fk$i){IXxG`?pf@}zxZZOy7eM>Om2iQ z&CjrN`t^-2mptKF4zK&%H%Cz{^bGq}nQ!+#O01IjIr3KTyu#a5kDT;wIE-E^&Z?jc zPtvV6Gu}+T_@oY9X3DZ=Nl$?u4l925<@Jq;C$R^ZfJaS1p|F=ZY(*8ShKBMXR!A(=V0G3%}ljMZM@V*JdE){&92lt?j=pyrrBp#npiR^2i0Q6elpN)G&NC#Q|^P0+g9 z$$z8Rr%dR3%{(`uPU;)t7_Az)YldS1N}A>bC|FUsp&`XOPhq8d0L57;D(!l_&IWvvEV!DtbjI zvB9$&ZJ$3WlKzI_U@u=4N!2pkNMlbc6{;6w8ibj1JQ@zqyG$#08Ag5}PR|40+|h}6 z18J4vq9127%;AmlFNh}LJ=v41K2M(!yY6T75-9yz5r#F?N-t7{KIr-WX^5bs=a^Ty zy2_F#X)BQH!FG1=C9!PIiIyNVUBDl;6k1%f5JH7RL8*ypCgsxQc6NjccRQu0{&12= z!i#%8mOqEt9)Cf$#fog*q|s4tv>s0lVD`N=ck>;hFHArcl#aEO(0`zLUdqTn8>H?9 zgTJ0%oZp5Y&%mP`-P@olYd&-gs&6(gV|DR@XYm`xcMB`Avc)B5s|L+yNAG4qK_tME z|3V)=t`@EJfBoa-Wut_Y@#Y+N%V~pn#t6BVthi{5Y%Gk?uQUe*gLbOs%G2aUsvlk= z-qmlBo8LJ0Qpi=D6RH4%Rgz3G{z>oVf`frz2ZRp9wafXvn=1;Z$b*5@FQ5o!YYG;C zpn~vP5yOlv`)>(hKufj{PhJcVdvRFwf65wQTZ&>oXFm&i2B$b|7RGIV2F=xnSVB$$ z+YzkXHLyemz0>NeDLVNL z34pvJby^&B%;KL+bsgmiJ?I8@8F-QZT%-=8o#G{ZyY&FxObVf07e$>31 zRp{VpDdqz@i=#%)+>wnX_hGM1w->-USn^K^LckIa3{zS5&!l^u(Jhn>gZNnE>K%lq zWn;EG{AWBUesmR6PQm@0Gcs3L+Tt&46etxTPX-8LKC4k8o29R5YZJpDG8&X)z^BD6 zi!&YO*>_hHol6&xg$RF37)hArmI0H$40Je}X;KKyVjDA_DI=a5JbUy^pOz~qO2L8{ z>JC=Kj;C3Nun#(iZ&&sCh&Gn4MgtyYdNV1PAER(av&cWa(<(3(2v=!lxGyzjBu*v+ zm}mSQlK*b?`n!ndc76^FiK3Eq+&n|q6pSz@;bHZ^zIHQRE4~)0BhK3_70EC3;GmcE zb1&Wsy8`Tkd<-pf9T&~EyionRg-sWqb!NX(A00v=9W;~BIIGIvv$(yjAkUTEjn6t! z`aBJool&GF0LwsGAXkqls_Hnzmyl(qyBZc zi#kK^sUoa~H6?^QA#rXv$}H?|goVfF98TMLL1KGK-AKT}kSCL221$=3MC{m_Q$-v+ zB}ES9*U0OmjF}yihP;BW_QcU~pj5&l)OB;TUNicKXnuN#5J!hHFMZVUkOe($zaG86 zgK&h=?s8Evxv4K?Lr19`+K+o|*`-75#2!|Dx|^D~_b}NaMVqtOk|>EA{z`v3LC{@} zP|?=A;M4jwd4>-Q+DeDf_1+}~JDNcuL$&4>z@P2qw+CTH&BZK_r!$qBO-jn2y|=Fr zgD7dBwES*INWFP_-OupTWqY%0u#H=;YOt-=MFXrFKO@UCPlQc~JEo zk!!u#(NBWm-0|h#5QnNtrA_E-N($yr@o!i}JL4ugqE$aIzxJL|cO%+oL0Q_0NO^5% zAcVcWZRfGsCjr6bAMI{ZJ%`pZp6HLfeX+h5dW9K%)#&?QFC=$$XsT*lBrJwFg|V}? zyxNvKY=N4e*o7g&9l1nU)|6nvyHbl2di1}3Zr(^J?pFM`inI&u)5on-{t1s@%1Bw zn7BKb)5dNb6Kzg-qQ`Uvq!Z@6!%}9g3=VRFJgK>yeRel?enSHJWDOowTz-BOA?o3| zc48n_U`$&;`eqfbX}dl z-^J8uGd_q&iR&cZb4j_-O~safZ9=)6t=kCO87*{YRU_<+rC`vBdX(T>n{O)Ki8ASn z84PI)19QapwYt71rEiJrjjzcn5hM>K$;xS0f|u4;E*XFi@!O0fYh6|Gk?T9r&%PBD zBJUh!JFxxpSfiKe^9pywb%ky=#f#r{NYhA9ZkDkokB)9=)q0*$h82gldr{EQu%g?mROci>TwS9@9r0RR`6{qL*ue0&gxUne8x zTdi2xa09y5Fk^U=BDmCvNEl>+-=hWz+I-PMy#2o+$_cn=qosMFNXP1QZ;2DRIrz8s zELeE1*WXaCH$3&8e70<)=87b>*x7HV#(F9uAPf64C0dml%0O$_5kOLTbU#{(y`amu z(sn-533Q^K2lW5U01({&*}nPCS0WI=u|5qO#J^UnO{cu1cn9$-|3m@P4W0DUbu}!r z3I^s&585`;v#fwXDsJfuT5HI9ga)yT`}v?xb_}zLaf2_ypcE8m8t|V~F*EmpfW-Kw z4f42;(ED6~DY3zut2EcgaUB%WnYNdosKQEiEo;WU;G81PZ=*t*;GaRs_Jiz&d(c@` zn%uO`SSlSp<3HH2T2){i>=OS0fR;DEd9w@pE^_H7#On#--O5O(EQO>dmiDWg)=9VO z3tB$qZ*-A2e0mvJM^5T1xIe?TS7&YJOS3l5~Z$?e9VBOt^2f-8jkg^rQl zHR&`UmR2I}vl07YHxva+^w~&M78v!&{4#_pZDr8AR9K$Y3~N>__XW@PGd~>>s_RZ% zz8xxVU@bVNBa=Dv=cxdzZQy!w5u{$)|2sLY(?NI8#Vp?Yg7boqjXgLSWmO8{#7k@Z zWJg`2axoOc(3`e~_gc4Ac;vL?^x4w@RBWm2c;mbSJF?Wwk2E#v9TyJDmXn5Lq-XUJ zTofH}Nppp;xMU8V3_9v6@j?aP$K}w84~Vdu-{gLP4l1K}RMjN5ln@+~29J%h4A9YN zhVXH_vrKZzJiK~82wlFx0`$fEK4r|k(BP(Fd4I8uCO2m50cY%(+;ZcH&QtFd)_Si>$(y}t3-4ier0g*D>Bw9ct>on{O+47tnKJywtmFjg6^U~7-w>7Qfl;kK+Q4%gnaZNEfw z3`7DB63aK|)Y2axeb%U??Z21l6_a?+G$vBcHaG92DN-yR}^6(oXyJ{)} z)!jlyo?m$zym$J3%KrUp=_?McsOKz_O|1^(6IKMtQIV&N=x3eT(jO_c>MF>gDn%6r z*Gx!bJdgwt-Re(OmCjO~W^=OFBEN5X^f#XUg)K8m4Cqi%nX6}xyD_r%>kCV3BBuQy zPh1=zr%S<3MeiD_PD9{++!?5klPd!0iL%r^lHkdo3b{~O`A)$>T;zq~mG%9DRH@xX zG23=oh5$(p^~YhtvCXU)n9ZnD1~Mfg^slK9olL@HO+`&}x+nt%|E>#{wmPZ zQAe5#MpA2(zG#nuSgt|2VGI7#_|r`b zo#@Y6OS){inc9;#7g(oE0%)z$FF&xESi-NySQlR#Fh13L*iM)9~O{1i4(Uc+i-lU4SEb3`R3e0T8kpHDl6^_ z?$Xi+?GY4ZXQ!43or8^kBY2by4E{3vRJQcXgOIRWhpT#=sXuL7v%mYCd_i{Uw!6NZ zWYgN8DR#2a#RqMvRk8UFmsunkHTnkT3?$_l8jJh2jMgcgFNPoxi`HqecU-7O-AR3Z z3M|(xMWn4>C36X>p&H5eB!vKVoqS zw?Cx;6t`1KYSjtn0tw?qU~K8y)vtK>@iLN$-7aG*tTnL?+s%yCTZqIRC0G#)oyr?I z-zPP-Y7DoJeBrd<>>>20UdQ=j?X z1+2qYQ%Cf+mC}ZT0-K`;F%Ref|Fa?0?RMit2v`E^CK6vWCytu-kQ6XZ73= zw<|n4IS=fQ3Y-u|-^{c9KVbKr&abaHPvXCiA5w9K#~(BE81IF{1%u>%jl?@{Q=A(7 zrge-^FOAhF%ZEI1sUfe5`*Fjm@^4j?cRs(qxT7FNA1^I|e7}s! za3`wI4VHdwT3~XlEz3K)Xuq6itjxJlslr9R2v?_CL~BYUzb`FY)CEfEsQ`wasogpw9(?deEf^|4+;#Fn ze8pY1K5gRj@BO^I;O^jVgEMJxS+|y=s8jrimY~zC2Ci)S)4G)asswpnl2WrAEtRG! z$EXqo?j*`JYTkbEY=3haO{3CA+@ZdsrMCor@^p$v$=@J}h&T6M2?4)m(^=J6)mzHE z-62J$G=S1hE!Cr=Dyyv*?$LY7ds6meRH<(?@)yHk;%j zoyqCfdu6@)KqS4lxR7pi&ZLPdM{>P3v}kmVB_wKbPp`!*cUUarom0$xtJI8-&8J6h z=CtOUdNXye<_pY^-!Jj_r{;|vldN=avJ98flSw&p^mJUhAi8F!x0V&s#*13mGQDse zZSY=8Z~ZL9!*WA?!_;q$jIYr8lqKpfQI1vJ{H~G9j0c~0#%o8N-uR8xenG$=@9>F| z7*4G^*Nv1Z4!PRQ2Rx`G!LAcM)9SUfH@7G4Z?BhYZ?*lEovrs`(>(2mjYSWN?M+^! zOl6Lc(}BC@rTR009mk0igRAm4N8f&a?bfS0xSka<%2==bGWI%h!!56Z`pWpjTW_sj zi&jO)tV&zbrLHSAaBLBOF!6@ z%|qvKcl1kc@l2l9lgE=h*YSxn_D?Yc-Qr`$#+@1S*0;3gU<5i9A6%cJ>eB??{Ie5P<(=NAJ{l_Y7C&NVhd%j>dYHY l2>QT!tifjBrlVcQSUfH`}hwHa2tnXKg2Lw(ZS!C)cKvZJz8pxx4q_zTbHmyw1#L z=BwXt)$g+CD8wi*Ffi!ya#HFrFrSM4%`cG<{`DqaojU&pI5!D-O{9O#56LnD28I$w zUP@fkEBmbbDekMh2L$SEbwQ5pZM`_7wXrZlO8QgoLZMR;1xUd33H4KF$1nI|(TL|( zI91?X65MZ?w0+igNw5-B(|sugiXGmWCH!V;b#X|Jt*6&me5WX!&&c^@{k+A~Z>!Pz z>bJ-Y-Lq8Q5w3rs%H98S@xeLzId3kUz~)y@w|{98(V~8xw+$SOOAyZ9N zp@{!fh(6(q-_W8)AmL;|gaH;;n?=NS>?I}4v`vo6b3FAiR@EDeNLt)|zL>oY)oQf` z5R_pQz~{p40c+yS>E^#fCefwCp%f>=#Sak&Go#3e;o;dPMlK#6GHQ5$&ox?Iyaksl zUep7~SJ!Mo7g0_^3%4BmUFOl0{rNcXS7Vj}eGvu53tla0)yJa9ER8a%c(vqyq)186 zxNDipC3aAx3V(|`NhTFBKR|s&omVX%r1w|xPdvZu8`U0pvafG>+Jg=)<9&T1&YNB5 z#5_(wc{dV+-wYuDzl;>(!9JA=Ouz9Pk zUbMMtmi|KU*s1yMo7^QEb?XdSMOv&e6FEO-`fb#e`Gf=x_a7O2eP5DGN}AtD$e%Y# zb}%`Bawe8L=)heq?0Q|MBOA(U-9Z#;oCl%79!L5ud!0Yf|CHM46={&F)r#}s5XRjRs~<@QZOiW-e@fJM zxRO`&>U7IbRxcG#T6}{`9N^7sV*q~aNXZjXL>=R_^noJ9N>vDdY2jhN`HD1mcXn}R zrZ|3n&I0b70m@qd2@1zSnbQ^~z*?Ro9I%Ar910{;EkdG2 z`eI*RN>e_<)Yh-AO;d<|+iBpW!j_(A)X*kmUEelT&l;hW9Fmo)hLdP}379*T$9no3 zG5e6FMzM&WDrQTUb>>|L<=iZe^rizC)Hk+{^v&Q}jl`vsg455z<&lvs*Sct6Zr4}g z$)BGd%co4l$Y;9T%KJWommY)7_SDvph_yvY6`l^*N*sb7xy@nQSSrtnfW3u0Q14H( ziP?^b2A7twNv`?3o88t_s1Rk-JrMIR9l~};eZum)>ExS-K>9Ut%F{BqnAUyZ63ZE@ zcBiwCb)spDK1S2erZVQBgPk`K;GA9gRqD17Z)jCFJWc6GPq^kB7#?p9`Gzly)}1B= z7oU!;=jIUpINj2u*=1u*OiTf2 z;HX*qb~>TS9?j?X9(}wNFMn8$XhHhj=ygnHJfW?{=bWRu)|e`q$uVi{av=YXNn{)E zRD;mNzjfV$O1`SJRJXPqX;{DF7;do0uAgh3`*O=~WO8kZ*~#fo+PKp>W~!Cdp>GT< zLtf(9Qoc}V9*@YdZWeyje*}(L#V?#a#ahGp_JFp;#-1t z{U={fqMz>hZ#wd;N;Mmi1;Y_Z$wYr#USXRvN|8mXe`o3vSg(-bqIV}_qBbdX^@|2} zd&D=rO>nYcah%O79-PHSF9yyaCR&*7o`u`qrARDp0UQSJ?HBj^c`Z z>?)2m99apZdw{HqXl)4O&%P?JA><^JcF9E>#Yh?dfR#c3Ag3l*gvF!)B*Tqp0Q3pN z=wxHZ`BlX+a=ml{@LYD12;yC;3csYjUN<03d0!6v)HvqxG_H*LQSuJi`pvN|wCP=y zQWqf{RN$q$)(Q^^D-^g!jWE|mkWg*Z;a!n{{`qPQbVvU0YM(YmWx;)``g3+c;-HcM z0^!;`hH3UPM5$3*E1F{irazC`t5CZHa}r*QJH{?6ouO4I{Y;apU8~=3k7N6s3uULYb!3d)>w7T2`$hE}9NX>_GvB8?} z2@;RcbvZaVbTSSjLJ!BKVNCNLRT_iE9`q|z=PPZjSCtpYd`|!vTs2Cr^Ry* ztC_dMJ*fJ3{^v8)*zd94FDJfEwQM$72hUP|zeMx_!U6&PI-<|mj66PGN7j5CM@}2^ zT%ANN4so{grHj)CS6yGvzlmf7z;=utPX02oKwDgOYLpM5`#H`8m4t8P60!e^ zhf62Zm2WOE$;L88cWoRDb5RwcmL}n>8*C#`z)Av_7~NE`Q)$t+ z`}TPLb>wAriO+P4Dmp=&$O)~~`VuqOt%k(yQg}F>G9RHDpk9`M;AgfRw|vY#?0q@c zuk-`sB{BK)C4N_&8wIzN*qc{K$!Q|=iSFC9j|TUjDH$|yXOJi@l2u-eIO{skodNHF zTyOz#m1~^NN)z?-NRl~!;=9G0<(#k4)?en>`ug;D0QY)jVId!~OZ`l0<&v|Wp^2ND z+wks5Vi4Or+{y*FJ~zo)+KOGmOc|sK{XAt`X~5%UgK~6>8VCsH?w%^}rF+oR`r;hf zzY0f@>2rLj9*=aLb*&!k3KY9OTzzW_{O@{XkA339- zaJxGql2)6si_#cEs0W;w{^djmj;mI2eb{yhY?K)IMp{K}4!)U{b_%_W#y#%nxcEMh z*GewmN#3dT+X!S`J*Ba!)|Yz%EDABL+uG(wVgU_QlvTNwH$Ey|hD2Ar6whuguEz4y`ctw{+cRefGPnG(_qQ%5${U-D-pYuLLzKbU z8JfNha$Sl!luLbz)AgJ%DE@_@we>XnN^3^WfSys|@q?~pJOVia7U9e28HU|l8~)R)qg~PH;&O{fbl&H< z?y~cp-yjO(feTxLrFFw>j-GZ)j-C=d7P%l%tg^AHg23(oJSvX03>a43wv}&+uatTM z$DA?1zp_7?Ou|8@StzUM-=(X~oZq8Np{aoiCZS3pB3%<7~-Q9d5!xge7{;#r=}`K%P+ESI7lgx{SIqB^ zTh<6wG9l2m!}hOf)K~hitI@f2;GuNxA<>1LeQTU-@3jGyIB07A9~x8+XA{O1G?BT4 z!{cmJ{hsG-w%$2)z4TpPcp|k%1@`^J&fZ>@I^pwmbTvCz1zVi&Ab9~OX~x`%rR_0X@7VV@iuk zKF+v~KA4NVua7y`o|2O_Wq7(~)#y_WYa_%&RhNWyDuQ;!V>F7hNiCIdy0n;I!0Fry zgFoGS&!{w(mjsAJFD0Tb>Fz&uaQ&$}3Y@W-B<|=IK520xo$9+R_SX3ikG}lc*E|A* z^yv_b!ZdC_DncR3>g^#%3@y5UuhX5nn1cl6h~s-tfoz|Ed<^b$gb<96@(P6N$Td5u zAIAu#;5-a`GU^%di`&<$_#=bYf$yl>e#-NH-$enXW(Mx10R%^ZQ?R_$3Thqptq49+ z!k8@yA8|+Y+S;={M6H?H{OSIXyYQ3zv)9pU8yU?T%QNqQJ`#IcL|^9qDZ6R&9tl|P z3jWh()>S8Vv`?{|L>|E1y0C{cIu#K7XEp&B-9`2;!w0=C{F_amZ=fD#NjGT>&F&#J%5dnI{HI zV_6ePaySBS`QjC9RfhZc5fnX|>sG4`9>eCBS5N=Ru1#Vo*(y zyRC=>bP)P(A>Ovqp_E+ebrx~?aZw#2qA$X}sNCHX_h;WxR}!UxyXiLqsD@jNJ=YN@ ze$mz6b!_3Kb)vTRy5{0kZu}|NgTz7{U7xX#=g~lj!-fV9Ih0e8fSaIJL@XUJNRL1Q zmyUwBF-BDm9{tb|%M@J&|Lo_FJX*ptpfvxYa4PtK+Ow+h;%YcCzFBrs9Gk!X zv@;A5J4?d43VVytqNGOG78eJwLVRJcIgfcfjy+Ueo@5(ZU)uHFerH{#j+YK>`&Bw$ zz31c#ez^s<3exl?$Wq{vLo(Is)5teRLJaTAf~^fB=+ixalL!i~SlRzE9VE+X`*w(yjlY>g1;Qo(TYbp68O9+E) zAtO#%1P~)#lNsv%WV39|Ycj)Bzof_rR1lj9yjdv*2Qd}M6x*k#<_PWYU^`4xb$h4R z!0vPO6zDNCctnN10+|~AJ4qMv)$7}$O5H`noQD0>W5jO)=|1p`#$z~^%!}_!ACL#_ z?3Kkp$Qy<|r$=BQ=!<*rh@6~|l-oFibyGXLH_~VDX{X8_iI@*ZA#3_o{U(wt{9ISK ztokAXW`qwjk5dafU=*=LB>&72hx$ukDt4olx7{7?fm{4*bGwBofbQm#vNbz(@s|WD zfun-U`(!_v`sZ@9;U0@SwZ|pF)1*3abxn=4JUEK)zvQ@3mbozAbpJzqpq7^Mp1-7W zd*JvCKgdahJQnq+i~?20vTNmOtx72v`qrN*)A{4?^b-8{T2n`oz$9Vc9*Q#y=sc}i z)7uU};i9>5UT`>iR$tcNcXHtvp0MoF_H+%9k+EG`-gY3Y=&R1WxLlsBz0v1s56_^G zjrUkCR{XaWD}#XguOMF-h2jDNey!?Ayf5QM2D*z2{e%XKIyN7eRf?ST_8QL<#W&9mA6Gps+~ zo_{8D#H#G5`aP*oAhW1^-)HISVSz1U%gFU+pD1bNQ!-7(o1gO`E~bfSkcrO&#skGG zhL9?N2;0F{eunR`4?Rg$9D~gvLg?hAeI)HxEi1U#5o4$8g*n%iUGn`+zb=?cX7ejO zU9;XU>{u0Gw?E<%@suXlidI)kxaeG;BHfdGFQX~D+5H>dCHZd9qys5;i`xLe5$%Gq zQDOJ;jXI4x$^qB?v9)H2lzRBaql_OZdYyRboVFGPWBwVe5}(G&%8RMH0{-O&!wHXi zm#L1#{as=MGj|b>d8RL+J|_UM>o4j^-sAW_HQjoyJ?1~<=*xy&UO(hdAvJeS^?OXM z;jBS6_>x)XPeEJD!O3oP6t8Gq=2k&Xj1o5!?>j08Tg=@2#Ilz|5r%3fpjeYR(pebf(CTRMdlOVIl^8Kk2ww{COi;fr(Or zRro-gssIFL zhr)}ql*1n9rAW7Qo2&MRk#$3?PexMv;c#lkdHvc;dcP7RdJqyUa4;V99oSTpvgT~Y1n2QYg4ji}8a$-WQP_{Ph46BB;ADy2~M2TQ{Y1)U%+ z3Plw;3`a6lLv~M_k2@m6Os%S_oUpY{e>an6Tub@%BwqM4)>fgTCJ*XVp|R^J-d9bm z5VGG_+KuDB`A-#-G(`3XJP7 zuc)|EM47Q7l@Ypdw49|tM*nJ-!1<05@LS2ng}_P@Ye>?TB}{|~0*YCCSq&873x|0NGAoJlAUU~{>PwZKi1{)sBE-4|D-nSD_9Yp6%JvDg zXMopoPwX(pO+TyWwP67Y$E4ENwTu$g)5ySIhfZ4VH5j3I$uDZ%qY2|fF=nAZ1$8~A^#EBCN+z0vi0RXtbzB-J>}Jin@0!Tc>6pYv2_}| zy0#a_FLlArg)~E^9o@&`!>rqM0&%cFEYsHb`@yj6z(tQQR#CtPW~?zRK!HLLIfOWy z@q6+p7|%Z6okbl_sSFnnw~D!l($~iFU5kn`^`~drC^))v0cD9x?)17QXwi&VQSHKP zLga@AXHVAN+BRnY!t^|ngc)iyi!O6&U%y(3qd|Tj38KS?KaIwQ#XGDJxElS9hwcT`gCW*O; z&Rgfi?Dh)IKQC(ho`XeQOmi~1@@~jkXZ1#7mWUv)HK2#2rKHe|{YEKQniGd;D4!Uf zj?B^|DsFfuj)t8G3>LD+oIrz8hZs=sQoC5&{`X+FgcI$UukWoNG1OTYNkG z4BZJC38H-GL}7R7XQKXYW8%a-jxOI&Gs2!ZDwsZ?MhPJyimD>JW!>0x5SaoZKl3P_$$!=Z}E**ta)HhvvB_#TZ0xfo104_F^62PjxTV zE4{*Sc=q&w-~d3JNf{=`Nc|pj_}uW8fBEJoziH{2j5cqeH-8J9W(xIgTf0Z*#~GzS z5CS3X)Lfs7QT69>xo>W~Uv=GQe*XbW_o%t7D=-VX-J+Fm4IGOdv8yu?s!}N0CjbqY z`yB(CuLW~jidTXTC)O|LLXdWUQ;`KD4E16jxrj(Q+uAa-*-hiT#6vXJ=4+*B$xYYq znk)LGyWEq+>y^EU4A^!W{bK*yLm*Gd70%nFxM)r07ZbE?7oix9OQFtFx6I>6buE8- z{%^H9)!l0mYgtBaxWeYIJK>Ah`yn^v(pZg`C2JiZQgRc!XE`TS#D`YcN5}QM{Yk$E z&cekG+q(aW0)Nn;E5jUvA}D!z0k%5;6V(NQb_i{Y@1zu-4sW=yv_Bc2MZ$phan+)X zW|R1Ei4bsq^m7!d15t{7sGBpgLX@39&(?>eYlV(T{8(WrJJT8AQXZdN;7+R8zqlZ7 zG@NB7n0K5^#CP22Wy~f+p*jG@byZ_2GBi#@7I&(*D`4#vQOwi3l9%O!?jz@8e}4O6 z9hfKGSXfTx+exRYq+7*`ozKju58>dcp-6$D!g;Gho*X^=YLZlWz|sn-3?^CEU+BYaP+Fx!hUCK{T z1Xo|s5^2Duz5a^GrpPbTUsrQ10uApgI06S99*Ycwn{KLuLb6H6{90y}!lDz5;K=Pz zllUf^ClNOam?;$|_g5IDC|+SA4y~6`M6| zZ{UqgKI#9h4kOCrfG8;kW>u5k)$a&?w4CKL)5tG{!SMo$E3`x00zsR#mU9_+*%a1O zaSHe}ycA>P#QLVMXKbLaYD0APA-xU5~epDd*-T;{%tyG^k zOT9V6WQca@A4>fHJwdUFy6e%lrR$Ny@JV4u)m+9sLlIdnj;%>g<=JmbJ zC`c(bzox!ME3dWHS+PejV5=Q2rBz)xQGbIeB+v@3=bQjzXsPX9!1SO5&>L*~uOa{L zCFI(lTBBs@>VG=+%?{qPo=TteC_<&86rPjh_#F6vkdp)$o2N^k$J&A4`DrT&ZXF3(+6Q5wL!?7jjjksB^DKoi`f_&itACC}WMwOK~)4^&e_^jpU!T;g155;FtcG zk=uk;7L2Jdkq}?suQ_^AE{2{qI@|_-->7ueOWn3UNk!bf$BUzrz^e6=LXqvIpJ}@A z117jZj-Q*1eH{qPA5tIR7GDJR3H-`GnXjz_U-KPA{?=gX^n{b#`M1=A+>xv`jckHk z0hq(6w?ky2eo&UE$|!L964C*Hp`SVphb|JoPamr@JQI1d6<|b7P!(25fQC>>N%Qy< zm~JG+OkPl4iuYsGuGw^jd8Tm*DRs1uYOY&yqFAEB$OH69_8s)Z@Abp|Iq_h@cXc6q z;T+$`{r7Wb&c|DOTi92`zlhgXqv%F=c20GaXmu_G4|u=c=3_F*o0#-IL_SCCxdbvBF|oGV&;G_@oWt(nhi-dKW_0 zutMK?C5wEysBKsuvzI6EirBVdY*rL6*FE}_$!q$`1&1*XIH5L&lDS*uljL#dpfkmH zOhTC=;XIdTE#Ey}&RcxF5F6b(=OyTE&`jr3ukkU*8z z8ZXcF=eXtwO#TKLk|(#DG~*v;)NbXnrIU^g-`uwBeP~rxkM$2*JzuZRJ=Zr3L!<<3 z8DskJex&1ae)YwgScJay68p5o@{f$9QKlSzpUvC9_Pp3t*VMC{Hufw-J zr)Dv0T#vZCIy-IVG|wt0=a~(Q)rH6d^Vp1JJVDO8hcGLV5d(H?C~n z{rm%UtYGGa`+i3-s|pQ(QsR^=a|xxo5+}`i?3D7V5p3RKVK{y>LwQP%9a0VGmb6Ji zTa>>Mk6Ft^94{vBFk--AUaiW5s@3`p8GAS`Iw|*KWsdtNdGlQWK;zAeq{2#5&Q8Vx zukFgePe1PW1C7l+@H8N{jB7av4vCW>xNQG>bR(@`J> z*^3cwk3RCJscp2D_*h@&HFw5>J^Ct)^IHbB5&r2I;B;loD-R$Ob4~jYo0Cfv|Kx$e z`UUx<`Pcz;$*VMZj_Bbjj=hn{IuL5zw!l5CWJ8FtwQfF+qx0R?AZ{tO|JiG&Sh~x_ z*X=uZ*6w}8z%j;n|1L>*0%Et*EbN<=dG&gmJ$CJdIlNa$kn^cz9r+ zL-zRAHwM>QbzLfThUctM@w5M;kAu4ZUK&l5u%BgMrOP>_@#T+9P^t0t!(=Pz(iFef z4ZvbNAJ0>WnNV)$;KFZqi&u#**Tu*+avw*4B_PY?39AG$%)V9%<&|D>6olz)#$3sF zwy4VKkxm)zjK5#OuS7b=DyKE}HgV(H&rAASrR-5EF&K>OJrgon5Klqy zHUO`cda!+~+((6(Q&vAcR{L{VvnTG2%eY}X*3xzmkDmCV3VmfP$z3*(!`Q}DrzOI2 zQKEyIpMCy6-ef}MIULQ9zpl4$?Br3TaEenUcyf$CFVi6kGcL@N2tD9uw8PZy(n>_G z>V@^f4br!zaGf#^1=xxnTw6nv58sRE=(S7~#eT@gx^Dq))RS_r>BQgbuF{(6BX zWu*`7Apy8+>Sepo%{AF3NNXA~p-e)yf?$W6yhjg|sj%<1udbBwzi{VzJs?Lt(&#UF zQN&}Igbg$ddsjKnJfXKbn>4hEls(Q|s^$op{cIfHy+N~QS$L-!x=8*P3cONq6NJ3J z864i)Gn(8!4!LQwhVa!MGS$?u^7VL4>d1ktJh3N!{o+s;JmLyJG612NzH@nTBNyzscDOI^Dz3!Cbj`B|A}`0~wO9Wu*~5KbiJ_b%V4o~IAZI1`b3 zZIpZt*Lih)OxAQQ9gzJ-KVo~(UZfGI0>rTT{O$eUem1%Kwxl(;FFLM}Md)ovP$d0o z4-|(NslQlKcE>!-nlN`vT{kwmd4ADp8zQy;)oQXG-98o7?cy?}1Ls{(FxT2v!V~0V*$0HGN=Xxo!i@B{6w7>hB8Jr~h?<>G# zGHY&=DU2JM@Zf*Cu{T_Ec|H+b9U|Kl6nVMfj3xIE!nA< z2D#i)|87N^BP6aINCg2T0P*7QT@D}E&A!fH5;b?fPFvgDjU2=TyCbBfHOCCcuhYu< z8*kEnOyz&iqq&_JpM#;I5l{L-b-Wd~X_&G}6+q$s_y_JEoD4@k$5cKDS6xvCiuj9& z{3ldu*-jum(=B@EW(lOLWm?D~D1&xO_KjT5*)F2L#n>!~_5KrXpwMQ?6)r`)%t?RD z40MyDT#D+nK)L6-*%r`lSd){tZ6v9wf0a+UU|Qc(0HAd2L=-bsu`>5`PNT*+Fs@by zAg@7u;z*2c{lmP~Pg^>3#Ic{QbIEnt;=Fq6?pNGY7?UjC5_2y@4EETuh&saoIC_kH z+h5VmcXGVv9o4FHL>HwiqyA;zQ;c-n>XUS6bt#p+Yh&Rb9lVD8JNOFezr+`xY;6g6 zyFMn_ydQ2w_mAUxhQHJO%Se2QBCsW$%VC)E5Vdw)Vi#m>_qiq8dcQZhN)m0t6MRwn z_s_T5_47+I*w&S2x%meu_77(6|pG&&pKj<)%6tSDKjHSHdr!j&#;;;B2#V5dLUY1=~P6B3S1@@rfk}V7SrP(1z zFWiyvaA#1>0qz`84Kw5B!t=pg9*g>!r7MqBM9sX-dP;v=s?&%y%9CdW-s?k2#vs)FJ zVS2r~xTLqp0VyueCcAE>SdrxC$XBL9Hf(UTKhS3|WyT2LaU_y+fS%h~_#lv^KiOgq zV`KLhE4nFNX2SVf&h-s`o5_nET&p+}c44o<_iN6`0c$Sv87t|XHJ3Jx0W)uGRoojc z9>>X(=hT&Jju8`o>2CdLV-t-|{J30E$nhMcOVnRQgH6xa#O3*hciqsW&htN@Wcman zUU~*{t2M;X1Z39Ug+5_odU=gnsx`FY85jy>dWa?evJkQ|WAT7N-e^a>ULifNnv6@E zMAnq9PB^Bl;pXNFzCSkj#|xrau8rH;x(XH$>ceEjCqudiBnRR67GBcxF-el0pV4R# z=8Sv*SrPUmT?Tc&c!kuc>bonOqHNU5{Xd@%eOEi)^C%ib$(f@c;wZ%X6P9?u&zEaQ zprf{~>}$@wb=kGTfklbNnBxYf;+U0g+!VK%coj3$v5Mk9B!+Wge(2O*djt2d7S<+Z zzZDT#-~-w=d%FB(!(}H*IS(u0%OHdR4uHkzZQ8+Q1d`oRIKWcWTF06w0_dPQLJNxjnyC_FnJJc{gchLd6+S5X_+a3~T`(6pfr4(kEJ2Ry!Di zSR}aVVSXVS@XiP*B(F|8V9Fp{Y3>YQz1L^;)aBVQfLIv-ojtA%3mtO8e*&3s-kf1| zVuX2!->GE0xxJ*5GmuXgK%6Qc`>NxW3uHdWFPUaI`l@AwC&juz;#FlswC65z~JT|JO0l^X&Y=jn3Xf^E@gQ2) zWu&SET@e;3tzayjW4Ql0TA9|fbH3)O`-PO0TqA`BENHm4orz!==$+RL|CZ7RJNt$5 zIAftD@)}crcSBt|1lf`@HA%@Y`qE@b0kv-vwM0br*kscHi>%oH5v+@Kj3u6C}6L{&b5OoZ6KacRqA> z-Wnu{`c{8r!Chw#1oXm84o{8D_qlb;6Kp2TD~R%8Zs-M}DD{uIU~_8dnH)o@(h!)1{&sN()txT>4Y`+!-DU9>O;}( zM<3iJn_%O6xiA1d4Tze8Ok7>PhbI^CdmiuEysszY(rfh(b##=*($+V+C9}g|uuXm%*UvnCn)I|MgB>G zynN@KoK{0u_0Dc;MK=4|+RjyFEtY}$mNDX%j+!az=YQau*ScPp3RL1~H?OB4SmI8F zY1$yPkRC5N_Xw6q^-5Fk0!BU!(X=9_ospAK<5B&tKwL=fnPE4LrpS*IyknHKv6;A| zptj`k|G>(d^AFb_aa_KG%9x0aVC}ghA1AU);HA@FFV17NEKB=%A8GRK4vz($qbIJ( z$^A+PxzF6`NHFHlFXleO{exTP8SMxS@83mfZ6`#&S`KsNmhE#=yYnYZHvE)~%PHS2 zj0u)&L#k4Q5L1xOSDLDOfVf)L)}RG8lbV+;Pz#7p40%WAA2s3zpOTf5bzD05;aB|W1PhpFv zhw`3f`vu{SpX_qqpEFRCUn6Ktn%Z$p!YT$k>+tOXdn<&?w%&okBjEtj$^~vt;lsNV zAYoG8)k;*Ep;7nt?wkE#58r^GZVKYjSHmuAZqhd1X_?*JTU$Xpy z{=o$O;LT6fVRGGY=hQ5WYtWN)5ea}}EJ%UN%A9{r&Q|}T$Wh`&|V%aB(H={>e{W`Lw8ul)+V5twj*)M1p7<`2VJ04={^Dn>mGi|KJ6Ay-WZ=b-3O>KR^4bge)j6lqlQ8= z`;b$5?v2M|dITjdZWR8Jqv;|JMH}M(1pR5xtdvq@G}-R;5taqLl)RaKtYYXT_@TPk zWo;c87hEA!;Zh@k> z)?za^%Y(t)KTn`3`@-zR)(-Ls2NNn)$~UE>IqtP}w_63c;le4OV($eBJOd?n!3~+_ zs6)-@E$E;s0(gs}md)+2q>LU>@b@P=h9+G@U=MmhKusT!iezy0L!B=1QlU9E1emf% zdK;wydbtf8r+A$c-Q^um?RgXZz{KeB2djt%f{WNdJECtzAKBx)kcofd%EL`&lu1vq zNVlc7_X7omt6RV%xM+$Y7GxUWj!Zl;)*1DWa-@A=#^nWqjqF$apV9N)?mXK-UL#LX z(W^7-kBE&ui&&E`G5wvJ!{`=U@3(WP=1aIz90o&W<-?+g z{qP&a3CN$afvP-Usx-iNJ&75SbH#seVh(Ed!?~VRbkv>GB4?Lt+&lY znncPyt3aSBEn!R7#=mDA+#~tAzw&K#H23681{M$|itz`kTzn*Kfi@y5t5m_@ISx+< zAxxIOHVfJ)se5wiAJ1lpDcZWRZc(ez{rVv}$A!JB1g#tdx$n*L2gxPqSq7oE^OkcK zcAyN1&K%)wv8O+ImpR%~heGJ0T+TSZl{))+;{TQVo%y{CPkQDldiWF9Kh?rYyyO`% zpyF(UBc=%dlF8RlEL#ly%uI;xC(4ffCHX1=pAk zi!Z5_dUk$p;Sj3Ox3MC)Za4JrB)3goF+alCK#!54FLRJ#NwzFrzTs{~DbPsFX+Un=p3c_aAzI;H$cme4tv zyn_BP(hlMMp-o1VJrnU!;9i5jE=SjcQ=sThu9npCd|V*4wUoGKt@CYso98>A1SsHCWbHDrWZF6iV#v%8meT`cjVpERk8B+b_9JP177N_Q5~eod*6O@T1v}&v^c>kO%f{~8d6q+}%BIhVP453r)X+1Y)&un|M}j~uZ8vDb z<0y(+m)19pIoCFZzJ*$aRT|WQ2SS4?>I|!wZJn2e9}zg1s>IeCdl+KGBmG^xmn()NnMbOkeKB+(T7sZ`LBp11?l@YjyYNpE4W$|EFZK&6ALfR& zs!8Gf`jnZGYknaiOM#pQT~>ZkQCX$7NG}JX68e(w=)WD=e+IJlC}S+#z2DN<~nAaJdpe$q@ZGq z4(UVpk^wij_oQBwkEyY%4o95egvDlupejHIrm8u+YtLrQ35g3~M(@i@}UEN|nM;^mr#~CbgO`^mV z8G&sHPz?>{I#SIW2L9_9zuu!e%Mp@6iFzi0ko4Z4js!>iPzR@zle*rlq>%~*44?m$ z?yho;f*;m(Fk^@=$h@d)4U|*+UOu2hxL&iL4h+*$CIdV48&mH3ql+5cR~q5WU0ynRbn4tJagNB#DS zX~U@JJoR*EPm8VpD4BUwWJNjqu*<{Yy{H%$*{@GURu<|Cy3j8)WAL*);t%!7aMQtK z@qd^1^gf}pp_>a!FlN%U|N=_s#pOi=x- zm+`jA;kEJlmVcVjlCzXxP(y(taD1Y$*A^58%7!zrTl_mHQ0(kOEUVlDeUG`;Ck}TN z4G~~UBqxXKpzq~4*f?k7u&f3LjeZrW`WVG3+~^L zq#~Y(*1{@1|IzJ%akV}Kh5>w3WsUHbD0ad1A0OA&Ud)1sbbhyuQ;)|}fpPzU1nA|S zX|wBXS*8(C z^_Z!*%iFuV3#&;E$L$sR?>FAW!WwP0(dKgg`F7DQO{&U|IUoD~1CT&(zmK>*d&dud z_ty~$1m`ML6kAR?`22I`%WE#*eUI20T#Iu}SRmel`f%@$a~Q2@+$nBeU?;n{**>nA zV9P0mD|q+qulRER2}dt}!qxY$Se{+r?`q!t-7m1SJ!CTB;PG>wJ=tfua#VYVG-b(j zZx=f)nP|ho(Fs_X?UsD;^PeG7@$zr}9WVdtFMwbmQKEqxH5C zUQ3$2(rMMH%mH?2hVR^G5vBHh$d{3yGfK$hLxh4Y0;3qta}RfhcVobU25UZFSDOTA zqD@w?YVc`@NIYAU%1P#rbnu*N|F0jv(3a_R!em--dw0{lH}30w&|R)14W9`TxA&#> z<>g6Dw^_Cip(F(VU)tW}SC%9@^ZS{ZyNAbo$2Vl=%egWuvyxRLizPOPW~)f5MY7QV z!S00?0;IhJai_He=%3I=3qb%Eg4hrsH6TjeU8Gn=R(7$n#;lw(-~8U4Z%jVfpoN)x zc*Ko+-%YY*K<0}Z?%{6kZhnUEeCM1bi4j7gwe`7m>-SFh`z`PW(#hc?4jrwF`r>|;mhwR;Te3Uz`6>A^PW`(uw@(x$$WDTl z2~l^1jJlXIr^=?F6l&0CFdp&F*WTgR|LC6)kM}8O8Mat}sxaDOD=)ZfWu0z4&s*R6 z7BY#kBJuf6=f`xD9-U#IBu$Z>7%3E$Ggu+%47#4=u$W;gjkAt&krQ{iq{A(!a;oeM zr;Rrfg$c~Ky1T$~T{{;PiSxvf!C*vZFd|A~PLB`CX4C(FJR@r@QpnoBST682!SVOa z&$!NS*M55)lubJ80s7wXToE%bHRt3B3PkQZW6LSncY zMp{vHP_99BW9%RE3!6G@UR>8oL`^|9CIbEy}+ zaV>a*##$HPMAO>8?vZu%^=(qZo4u(Prt5jO%e&w7&$3uu@$}&X&W|3i{5tORdFd-( zMW$Vxa&!j+q9i6A^f-C&ghyX|#?Eth=8XZ-TFevQfbIUj%cF^@j~ zkbI$u;)w2GoBr-?qTztW^qhOY_>enqyv^6Y{Tux1Z~aHOw8L{>{TebIkk2M0O7rf& z`wO0Y_&)i;0f_@CEC-K1@%ORv|b zic)N~V3w7{-2vUv7EU;HdBVx#&nc>kBuYHl#Ff79QqrLmzCoWgK_0m!1J&V;ZL2kR zKmUlYjz%P%F{|*+)#J-~Mva5uHhKbZ(K}jVj-v6BF8l9Uis&i<&}_$re4gbC97Qq4 zT3;YggwivvR;NJ-3qh=6dc7W5mQj@+mZ-1HGmkb4@J;g&br4C2Mj45)ZD)I%P7*WE z3(n5HXCy<+XkDNqTEa-wwDfRKYh6}PL&^Y*tp>oa!@#S(iuyP8hyv5kv+33Zp%lyb z)mj%|rDU+q+tli1=h+oB@)8Jl)40sDw$?5^BWoKPdixn)Jk}Cgtw-ARFYa_Q4d1lN z6|KQxm(A_!`O7t>vNR6XS%90o*m@3ac(wgrc+aR4DlV9dY`WTG)Fnp`?{aqZbY=5+ zf0tkV?Qb)F?q%O;HBtsn{NPn6XLVXe&>w}E_zfT z#KkU{msLS;AG^Kna^pf$zp&1=l%De*xsia=96i3v$A&{tsok7>1=IdDuYmv_Qsfb&Tza*hUyDiP9mRZXX$?4ew*8G{Tlt9ea_B~IDhzv<0mH=V@L)m zMV4do1!tc<1B^4C=@tu5;ZA+!i}>kd=6OMq zbRf1=wxG(+h*TFDDQpF~491`FC^pezq&EtBW zQE&L)jkb4M@W(~=&pNZMc{QX8v7&$@aWUp_B?B#tO;+IgkRd2rvp-zeKZ+u(^@IU} zN8y8ROE*&rUsBzBMhhxkHIP^>>7)<{p)Odd?f=%V$GKI1LI7J^TXdp?MUiuQevWf4 z%+ZE($QFa|>ibd)-!?3{NhogmuI-7oYreJbXB?PWT5E3s)i_F5?9f?a(tt+k}xE}c%oWHJd?bTuhw-FvxM1!3); z%2lO$5lL_tj@v%YvtUxCe9P+kKGZD8I$^=-isJ`gaB{G!h`#d9FY(L&=r<^h#6$@~ zMZEFGo1h$@eEJbpRq)E!-r>2IU#CAFBe8t+!TUVEcb77sV#^$-eTf-sG>!`649*4R zBfME9t;1REl}juU#x^~T8l%DZeV}G)g?F?j6t#La2`PwFL=Ax=JPS-IMUq4ar6_el zo-aH#89|(MFi5)n5f+WJmeFtvjet&{@y(kgNe8WsCvOBJMyLpx_=a{Oj){~fSztY) zQE1N$jHDtGP~|fgXNR0Vz0cE!57Al0^faS0+U3TJFHjZ*GLgixpfsTAQ8@`_!S>EJ zTRZz?CZUumvuVX-HYcAhFu6tb#`LytQ|1dyK1Jx9yPti|D{p*_;r14T(KbmUDdrhZ z?>}NbE0CR-;dsbsG^E$>V6=x&_jboD7IVsCfr=C=0_zGoojw9zM#w49wgP8-J(UJ| z_te-LW$&0I;X$$3mS`~}^kAtkMjb^zjnl{B&t`vCQjPQk3!yJq; z&3#)D#Ot@?AK++Q)yL?{A)c|nz4%cyL<-mFiDk1l5n@FiO7`|{u(x-UCntyGlNs}~ zb54&BDCaXuqcQmcY=za~eULurA4P(smm=bney>lW6qBish@-!}J z9T$p|L_X_G2(0xnV4p0#Y}w+yCy#*C?~ub`oW|-If-X_o1spohP_be#-l5DgibYN_ znX$FMs6Lw#JgYDZdG9T_>idX2%2-V*s9c`1JpJ2NkVwIxe zKF|RXq0%m=`GV8)6JC1b9frF%sa(YACx1&>=FF#Oy!YkF0Rj%2$E7XwdQIM$Vf*Z&fu)Y1V)&sRgm=|SUol7}rV6Y+qAb*;;F_r4M})9H5rJx3*@CHD~3jv@GuzH4PYhl{%VvbD2P;XM3!2|=zGdh|)nZ3S{!d7=x)Uh{m`$GTVDUi;U9uRS}j zCjWXeh7%TpBSIjFm^^*T;gePR_Se7lE4=#Euk-lm3{*tFHy}wm5P6JEBsS}yRP32y#!v}|3bgSo z^BrVsOnhZ^IPdXFvHBdAFlTK5fDpc(qzJ6kN-uDgA?l@c2SWzqU5FB#6WFpsx^T`Y zA;Zu~V&cSNbxEXPw6)Db#zavLH5t9lbVsNEzf38as3$8RF+1pQ3 zD2SsDiRvM=VfOR^`TUH*_K5Dy7ZJNJAUX+g(npF8anwU4T`Hw;rKK!2MxWp!$zW?2 z_re_}PaYC?1|LHi9^2cw>v3w{J5$ zo8wf1&?TKXrW1ELIX)%YzQK)jfUZhTo<2gE5>sTjSdk1yREtvt#tS|Ln0jqmT>Tcx zCIhMNHHx%uBjIb)xOQ{BwsaJTWsS9Z&A4Sg#p=tm{C-{VwS%th@7nupQb?O#&; zEhn@xFW25CydZ3HiNZZtdMle>iIsaJVOcVDB^npzd2xMS)ggC%E!6GWH+fL&u-}*U z>il^eLM#iPI_EtKV+&@d$2@uTd9!~KNBrYI_(NWM>m7dn!RH_o`u!n^g0fst=>lD3 z6tg+=$tk5S$);!MD)W^M3Lo&6rSTOG!fjKLFJxjIph2T?0v2NK6-#&wi`krk69}VG z4qWTmtq&ymDvQQgQV zf8r}Jlx2xkifGUysSKqqF~%Y_IF~cmhR&eNV1G=vAG7F4viT``Izgcj9gQ>hc;q8~%d-GS2{V~I>+gL4`&L^JSAqu3 zE8_j^aV=%!0D{Z!>$=%Mp=O1-RljKM7j9iBv`5n)r}k<#Kogt(uE%A}s+H{?$HC_g z_?spPf^w-_{Qgq<24T+D$qSyhU^kz?9Ur(B8!!ki84kQoKfiKVV2#gqmB8AR>vYvv zQ7kNXO0>UUqYmb70iwgc=BMGN2I!f zKlm5_k{90mCGOolq_e%nP^L(2n4TXon`Jmtaenxe>Deg~FUYcNg0UsK(ttt=1H_Pn~zhA)EmLQQRTz56I@T<*IfU(dl&;SGK%X=jv4u`ejLp z)5fH1H4K?VMA3-uaKwBvBtAd&`A^!@)K!(CiyW5~pdHR>ifVzimMBS(y#Ym@VeWoP zuiNFh@hz&!DdQJjBN>l5IXs{$N)QewBcw4bo;+f4c%N8n%={F(b2^8+1zin;5@j6##8S|Qbo zWE~=EGs@Z);APbk*8jdPR(@Ttz!-xy24x(PTgwY|UXVKL1M|xH;F}8p(#Xjsi{Gt# z2UpDDvTKuoTK`SULtL?xSFg6&cf-aj$5jli<)PNR%)0&*0uw4%xb|-0aLsIMT*ZNN z5@QIszJidOA(JtZMVmg6!aW%E;At5CDouNZ-=IYG%hLD~c%X_V}lN z{Kvfb#ycEON|M148pCC%^<%blc-7Y)(w|HFK4{N~Hw$GBp7#>MY>cP3j9`GH3 zl{b)tBucu(ahGCthH#ZPmK6vUA>uA^(j`h1aiq|T1x`4UQA(7=$T&d@i_sM_mYzXo z4bBNvnzE&QRq=CqN-Psp9HVWCE_2^8HHuJ!nEu`#k(1;_L6$8@W)sT!0@s^i=X1)^ z;IyME3#=}YrbB1Ai;4#vADol@;ID|&ZRT0Q@VOV69Y3O@B9GFT3SmpK=_4ke{~WuR zp=?D~PU#HBbO$?FGoY9pF+Y63&wunT|HZfeTRypWpP&5YcOXi5^EZB-L6TynqjCx( zBcfQ4L={;vr^wE+reM(TQf39wU`Wy*!*q%g3Kd6~%3v`b5*3z6N`zZ|cj|D-mko3O zy<14E7ea-lr_Vpegn(B}OH00Pl;?$#H^mxwu{~w)8hBvN zT_h@Oy2kY(#A^BeB@W(FJeLTYRj;W9QC>L5vmbeTpOrZaeALPZ+Lm^|tRXZS;!+a} zYR{?_J6Yeh4DAG%)ERFn)%rmSus+k_s%5(Z#nqoh3-A*cA>(>(t7{|p@y+G7kLvSZ zd{VJ&?ccgi$4Z)6qZC(K&ObZ8Q|Ci%`u>8puaCK`AGGm0m!B=pDU1N+G_%tKo;>)t z*}lJboB!mG{{^ELU*`VAv%bzw54rcjk2raJpL{kYFE!F?RNX;K__pGe@$5EjAWmbH zO28RU3=#^Y0w*w~_KH>;-=&pM5Lj2BD_TqPDVY#cx3<8P;cj(4Fy} ze64L--AA#)=-_o4hm|E#dGp1uNsB2numgs>yF_V9m1V?0cQnAo3hkg1DbhH_8iTc( zD2gz~VJb}&#gxV(I*Q@`F2yXz8i&&cGtbDQh$!ulbo=xNyTozK$QX_vJtCjZNTP_v zR8v-!Z&;^dlGu?5h+IjPpJQBt>h7X#m*dAz$UDFA2u)cc2QfvF5t$4rAWk}z#SB?3 zh=wDg-T-4PD(>QhLaGj?$~pMrBYyC`@A5mp_fI)Hd_pA>rjv|#m@u1VWJSr=&X_?z zLff4A@e>|Dd_*ywbK~|c`n@jm#gy)F$VlJf^yn%5q=W5)E*;kR3h%D;t96-gB%9`; zt1J20<66pSsWiNBYS>_06v%C5vWd{J9yeNVov=;~Uq9|;(8iUXl#&=*QHCThDFe(C zBuXPc#IjsUeYD0t{!(yBI2hr0>-T5_joSJM>_)*_A8q3mUb>-m+$*QJftX-vlrUU` zO4ni6w#Tp;QVsCeY8b}5PqzK6FFxbeubU~`4N7@s`Yrf$d8}ouPlN=;HvB4TiYm)0 z<;}Gl%SNL?;!SjUkqFWr&t5yVe_Fr5tG-{oCd7vKLW*{Zg}~r2&QZ)xx&O)g&3;>> zEq?D${)DaDFY@$k!pY%No__uzPwsw5c6{Qqlbj&N_n|S)pj#DYon1nzLL^iglo24T zkAsFhIb)26#BJ^sxb=_~x)=A^63$v8EoxijAbz1;`Z||(;23G^1&x$#}hjJl&G74@PYRacS{UN{ihkwYo|I^>+hd+OW6_U(qPA4-y|L7M)z$2+~h zlg$=LsYnNXbT+56)u%8yYO1i#5cg7yi+w#oC9z5%ktm@sMtf8x1d#JfoZ>rr|lgphRthC!gPA`}@mjtB|P289;h7Ux;9QiXMCORD(D(fpOvukqaY$Zx>#NA5gSDZyqI6(DB?%Zyr8Nje5=9uPzo8$)rgwdF)q^vSjCV|1?J7k7@oE99{9+$#FYu{bP z^x8!IZVSG3R$cvhb-q9i)7StohS`S+7CL zSod~UX6{@=FsRu!?Q_?vuZc;)(hyj`=k34S-d8wEd#=kl*Eh;FJ1-T-^10S#K>hq} z!>III*Qaukkh2O|Z?b?!5_A9nAOJ~3K~xE@V$L=D)RZ)_Tn+1bY5hqEzy{aUIfrtV z`SBsghd!xw=f)nt`v-r*_~tD>`00=NT4gv$Z^j?PZ6;=cgMasW3DkkmkAmRj>bWur)OA>}#V{Cs&e{YBL zqsK%t!5K|L2VGUDPKtA&bw$$eBdlZc3=A`IFZva-bCn4O>C77KKy8SL*9#VJ!$ zVDbu#^>r(Sz&eQ(ia1J$(hij zg{~fr*uD@W+OTJR66}S0?XSJwGbpPsg?KN=W$e*5sb+n6rlDB>a`jJ^UQvC#+VHA} z&N3c<@d*HfVV7_I*6&bC&G*0i9lrSBeX7|x28|7HSj{*!*0NYEXu1QmhX^nxhB#6f zYY-yBN7y8FPYkO|q%M4g1t*ZA0;j|Kw@$ef6j_cfN-zd1TJI*H+8VH?bW4d+lBko? z>2J{)Y=h33A3vqi6~-E0VO%M1NIC~9CLZl066IS*P>`P(OTTe zDP^(1DT|XOMnYLiqTUT`b&un_AM)LQ{lD`E|M`E(%g?{g%vfY)8EkK%jp6oy>A-VQbw1wFWQ(tbd`cyCnsFRn%i84fAim0 zCC@snm#-Ft1rx0Brp}sk)B*L%dauZ_6mFM|6$@N=$kJXayn@wjEtOJeER`f++hGsu zHp4Cin95yD6TOaCv_iArFt2(Bc1=0f&+#e>@@3s=SDvfz*X^uCsHOEQqzfrst!oGt zxfQJScWYJDSbHX`aW@Ym`6}48Ma zENg?{UjcThO&UwvacVy^1!#_rpO6&=aUAjXH-3$ivl)-y{{<&U2fi?AoFXJdRq3n2 zJJ%e)DS%y%aBEcqdkTb#kw!H0Zel!GAKx(6$Cp&COneL~3WSxW4GET_oMZ7tM@4O) zgmYTsO9u%NpgSoM9Yt^Z2Hn99{qZh?-97TNW9FyF&037RT~rcN6$P%WFy(?OpU{zg zjMe12BuOeVBzJ%DKK;=aLc~ZcWj;s7{@KXIl&F*9j3J*E$VgDC6uGlU62)j!aQx_V zOqOAty;%IEBlciG>&!Hds7$HyPO&xb$#J01^4y#ChP>^}bzM-QH0 zvmD_p-E=`#mT2qf4sYQ~M=zEP#{2Y#dn9Q}lz=vx^ZA^!)5W#li+^whhRa~K3vrYU zp3$b!X5(#2lwB8&mnkAk5KIsjxTNW15OSpaCjYW1e4VNF#K!B;x4reh6uwPRzDc4C zb8^mShr2)t-Z1NnH)QECHTADA1m`cG=Q_coT@hl%^V{(A%W_}pdw$umH(o@uUkhSe zf!_Q-ror%9kgu~BezWPh*JX!oU~pTc1Fa~`fjTx!y4PffnezowZm6Zcw@jg zi-;2tans{V3V~Cx-`RK?H$r&Es`CoA_TG&(2ouy^aPK^9)w;r35!Lt8fie!9#zhJi zhJ`)GxpPu5vnH$o;ci}{R2wjjb{ z^9&g$h_s7xP|Y%gflhb8d|^mBDU9~%?e5Y|Vx}ht$kQXF=y+ybRnk#lv}Q4z`L)#G z)A;lf6?!8{PnMAav^H4fP_e{WP}U$+%y2m1?DP?8FeHl-;^Z|tTQ_)eI%9OJPd5>a zMq|`#uOrTHQJg)cI(~aVG6GGW&Dq`>BXorm zj&zjr{O}cSynKt}N00g8_kO_l{;z+-?OQhxF2hzi#X_U#u(LHrB`JwY*y+C+^35c( zvZ9EPGwV7VZ=Er>sRQWxG`%Qm z!$M2j(zSMU8!Y_x+@CeacD08U*hkt>o=t{K<<;N1V8PcA77+T($VPd!=Wtna@0Vxz zg*k*tUj{>6*YaP7l09& z5BfB7wbr3zh|`L|T?xu5mLQb$Z6feql{Oxe77ozhoWV(f$>!umMln6+{K-B4eN~|x zSZye0Gpeeh*XtpZF1_s@lyfi|)$REhv5kltY}g$Y|6eG$xz5X`Fbmt`Vr?3?_=i^#{17P9Q8Ole8M8rbdv%3?1bZk$IOpUnNOyiA0Kh{ z;m>*SlkfA$8PW(!6O;6Ba&!L_tUOS?jH&VwMU(Z<`2D8qO~p}p&HQ$vQdoDmg4Y4B#(q{jlf}0ZQ?c=U`NnifL>-FFaU%mgeViR?{`@l4tA0PN`p@0(!#Ue*U5i;$Q4)zdH z!s*Ff%wmSnzF=BS3^2yE;6WOUQ~**Y-rMYSu~Fjps~k>BoKh$pm=b3TjEO?XQ6Pnn zQA$K$)X@-4>YJ`wDP4675*zZ6R15T0%Kz3lN2Lu)7$Re^N>LV-S8mF8b@r_?H502=?mO)?A(>ZQ-Lf=`eQk+#@iHnMm{k^*8lu~G|R~W`k!ZskCrm8Zw_g*F! zG3nM#)aW*8++{L7CQ~uJH{L-G2K0qQ7)J~dBp7a3FZ70vnUV-q>AYcho~qbkzM-T9eSM(+jhpS zX-20rW7}rT2L<1Jt>X1JV|uqsUfI#S@L~k|6uzPOAO4$^AN-Hce06~9vgw?Uc?j^d zOfGhN-Siu{=q){K|MqB?+PP}K3rZ#Y&*B%hhpwH67o2yQ+oMiIY+EePhMq2gS_C-b zaUmDtPqlVxM6Gknj+^!MNZ*S3vINd&jZ0&1t!T7eW=(m|i~#vqLoM*_TH^9$3c5Bn zHr7*fb$*qvBJC%#d@f>L#5ekS8RrqgE|DsGQ6XHF*Hd4&ZM+VPtNDxdIyEq3G8qd5mpb~^oSnV6) zIU~^4)4in-_{X;_!z2i;vSrOat!qCLX!I6?04Xp!l#r=;4?>2wxV9}ESYL!$g#C;G zr!3Z$2y1YWL(k8c7c+{YK*vU7(w16pSv>aD#~Bcx7EQ za<0iqG`=l_w1}w7jhA0$A-gCOv32V?l8)f;;UQV|lzum2aPvi8{MK*s@>gHu@BZdJ zbT{G6U-}wn^PG>r`!kr@)pV6L@?Ew}+su*D8gD#M0fq9AR1? z*Yk{;mIvN_Zx^?`j#6sF6xU_owST+*-DRV(xg4$b;)+o%-1{Sk&gRD)MWoboIKq5Kp8Qugjvh^|1pYvZ4^3v($k_ z-*q>5D#8njrMq&uLfSN^_3fJ!d_6xGl0BugT;>?paTg~f*qpQD`&gjc8<7mhjCY?W zNfU~yVo_u)&W>Smjw-z(v7&w`-U?_F`TY6-p%lqri?}yJBdJV9{}I6|8WB_(C=0cE}mWJ>n$7YK;^MBL$>AA{*g!!PcO|d{MBlP~j^_)Ym8kR8>V3#f|YB zMRDLz2_H^!AZ^a{@C!P(-(WtSaOa(`;re4DVR`h!zvA@q0i;{l;RqK!X1jNjS6+IF zkKg?Pr=S0X&(0r!NN}eI==lVR#LT9A`tDz$q(a0UwjzO(2_!MjXcWFnu!_1w4$?@_ zzjYfqJ7+eVv0s+FRnFn<2+%+TA8Pm@hDQmE7vK!M+~LuC9qx}~zW1GS^CI3bzFciO zYgXRFnikp=(p=N@>oK48Y72E=muHP<6{Ga$1dtxLwia;MAULHr)p6F)CNH$-SkL>* zKx0jD(G5SjAg){xK4R7T@KM@}^Jy#M>kt*zbFwxKFM5KaX={SoTiD=YHjU33*H{Ft zbGt|0g8@7HH<8^gMP4DaBR@K!nokg>#5&&`ttRv|_oQ`iya6Fe24m8l9c*cQWdWUG z788_8h`Jr$Fi#t-4rl_#KczsZ@CWnvvvzV_>x6!Nx84E3;R1^;K!mjs5blJ-vj&qS zp)wU|r-QYYw5v$FU4#%6c}7-d=+fepKSrhf^AqJ9SC-ICiMu^S+M_$@GrqBljQW^L zqbDae`Djyv4 zELEr~jWL!u?ogHm!uMprg%nXGlt&llQ{-&Q?BIa?-CfR{=IGNeI6FL}JU^v8c#Lo* zC&f8WbjisRf@Hu9(zm~~R=g+$G@c*AOih-v9&N=J$K`Eu=Lv2$0o~po!HU-ZW9!`o&-vmM-x-4TPlc< zKYy8A4~nRT@;dU0oWc5oE3DZGvr`52CM3 zO)h1mSV^R*6@I%z>ryQSOkhcSc87sJ(9;}Evyg|;4CP#rbGSl>-&S22mE>n-?vz_rp@LV7-4-5J1Ikcpsjp-`t3 z=*rOTc1gP(k|ZJRMz~W$Zi~iPupV{A8dxkUTwWn2Q=*Q4#nsJgc>h*au>X7$Oc}p1Xq!s%&?()iS8SVi5;~_lCAs%x7$6bDU zx69exJN(7JPWj*<=l=Oqw&D?oMad%4l(|8M4!!;Dlq%O$*NnHqmy8)%;RBIPSH+g_ ztC=EjF_2rgpl%jf5RlKxPr8cL){ajsjk{LRoRG7Ehhv`c?ljJ*HR?+fImh$!T8^A@c^~-wNR}Jrs2GtmzXFi={78$+%kYqf@TE%2ukfuY5 z?3_h$hLhUIP(*0gA|0;PiEw4g0^&%agdm%q<8+BAOI%f=R6;V|L8%m7l{jY*&NSYB z?Y#;C(kYzK%{i7#Nt#@uWju`VxJ46JZLhO-Q7)wfaTKAoChc@6ijpXbh~t05T{2XE=DS{D}Bu%*)32yF70*v_{Esxsz*5v38S>D4!dY~} zarQHGMIs|PZpt$mGs}s07*EvS|w?dH*r;-$eNm5mbq^xx|*}&6_E?M7T%8b zeO(mB)-~Kjh&QQ5z|`e)zRbAbDY5Dm)$?~(9D9>`t%sCi6*OvlJ7IhKkqces8+8kG|vYF8Z z!)aL;arH#&M%TI$zjzHBphg6?Dwv)fVubHVtn-4>R;2wMP?E|RkO^rjs1^$daJ7}8 zR%7cb8ajysTb5XThOZ6l8rELYAEDyJH>ngFgkHu0gSrvUfiqZPydmM%=e@Mvm0KE% z;l~#ip0l2cZ-SW_DTOuK|Ev88k|d(r?VxpqwbnQNbG{^3+V3$Mj)-GLzuTuWnrt>_ zk!MWja~3)1aN0_P)etGTu^n?`dylRDfJA}7plWy;i3kf>y1E^@!$^rE@q{U1No7pF zm@|qZL@roZfpr37h0nRdVYQ_y3`#{9qrnItXOD%Bkeb1pDM#}&vUyIglTw^5=rYCF8IfAh??So1jb6;y+a57hn%pdi zjliNPi@7JzL@}nUxX$MZ(CaV$CMCLQzm0IBugfsCOG<1yYc0zpzH+l` zYrFM*FA?h3DG}A4-?N}J2ceZf$a!(KH?~fsxoEgN3reo{QA%Ba={6~-+DmL=hr)RQ zSe6_;xkr)B=#952OM}Fcc6$E4W58*Wq=SeAMK*8f;Pt#k>;Ef63I~{=tb%76MG0|| zf(?e13q7@K#Tf8URZlu>YI787&P2tsIeIXgu=rG*W<3ae>v!iG!=bL|V4YV$MP3rc zie9f9?yI4yO0q0NDPMqeI2=(qhsiUf6DTPej>nA0TinE=i;C%N%E@fPw6qLT#myVL z?2d*+ejy-4gcHg;D$bZ~-n~`!!AGj{wzfBG$U-g*Une4iwl za;w|n^%gu183GFui3(&V6LhAJH8^jn zxb>o)8{m_*IjvV2Xm4`>Cd^}l7gz6B!=;4mmoV4pK(GO}KSTCR3u&`eA2xmncpG{ab{J;hwFgfY>S0 zLCSD6;HJEd0n$hjIfJl)ofm4{DOKlW$)Jn^q%R?|j><@+h>%uLl$vQ)QE83V6|-5v zBDWMqvnUFr^TwwX0hW}Ku8oPMquYTrh8;O%Z`biV&pWoh;`rq)N9UD@*JDUuPvFo% zJmk3SW1ipR_^*5X`M-g8Pxr~5dz%-Ydz)ALFLM8tAs;_Hzbu=cfReY_lszHqclVuH@DGD&q_FgZEG0a;mcJUOSgb&H}bLHU~1&KQJK z=)A%f1;|#_#PFf#kUFqzmuqDGz%_(kcXJH|gmA5* z576x7jWp@KG!M@SRPCipK~Zb(Fedkry1DWjs8Zq8AlaczgUvkr7y# zl2S74IG$Gt`(4HD9m~tFc6eo5@X||`R|-ve(q;FXIomI57*vp3jvmCk_b}$YA4D9V z-Qq)P725y+AOJ~3K~#(Q8`!?9#FaekGD5#mgPw9{C*xwW>(c%?*d$0EDiL|rdnhVhueswkx5T7 z(|6Ek&_~jX%=9Fajm`Q%8;wlj45w$<&0ffE>_7v!+u5rsD>Gx!Lquj})v3DopxLH? zUDP>sa*2$L2zUSc-`$tg&)%mLg(@1FMWm@4P7aT`xT*2ZqX3LRbH7kbt0^INbgdv- zp$mg54>2gZ&aw zjv+T;_gMFne9%#|)iMm0^w8Aivu#**qki|bdp3HO{XiSa^az64Y@5urlzL00;K-bS zXS;32scqbu;jXd=-ix=(R*WsR{`XC*dUt}?+vA&Ked4I{D7RrW*|QF|Q%rrGe0BTU zaE%CU)6jOA0&En;bVdX`4y`S%b6APYug?j-A&T0_J~H@jM)29ITg0_1FrajqprlB) zw0kzh_}``y0ePK<5t%`as0r$}ecr>QK|r)7bPih1={lDNwzxS1gw`S{o$7 z$-xmul22z?_mb8cTF?SUbgFk0(;U>2m1jt$$_12)S%B(6kT%Y-Y8$TVC1)2)Zf0xkXMFd)Z*V_ap8WVfaQT(R)hB#- z{)|VD@16V|@5NR+m)%8Pm^y!s^9}vk(84>*t?xd|1*oE0%NNpgO=}Sv4yr z)}m(-+D?r<=$+@{LLkAzx_6i6vW_ZuCFP|8>-m^TmFuPQHmc3Ah~GO z9eh8-=DM{(+$E+*L!o$?N_Mn_OJOP*K)i zmfnfKZ+KtDoa+1O{klv=^dXgr4m&p3(a(c#C@6YBe?#8HbWjX2SK`eZ@~FYp;d)03y6VL=s>kReEgWRt1F&=`Hai2 zo^bTw9zh9Rbok|x?vDN+|chUifIO8R!5poAQAnQ$kO@9s@-~C0|IZG>&{g<45W( z!qJyu!(q*+M9x~s?l7W;BZALWH-Rhy(%)|Hl^LoFS-`AW8AAz(UP~UqYCUWkLfF-X zVWS@_0wJH(3dOUt=UAM0 zXCtdoBnk55fAIwGHvZNRa*XM?-p8r50!c;TVh#?5Fchtn{t6otD}B}9KM{`B({0#Q z@;wbelvLj-=6rMBqm8EX9&Ic|VKCWxxmjEfjJC=e4yRMJN$05yLyJm`Pop);Xi6ki zoQ=Y~^Z3<@vt~(Mchq&@vRTp9j`5<fvD<1um0}h_F z)IVwYpcT%n;WM$Ee{T8eXOXM+l#hO0^XzQJFP}TUx(KOqw((4jPHA-TNH{}VFm0gq zptR&dphA0nPV54cQnR{vpYu=dVGn=AO}D_U0?HIzfATZJ>Wur}_yMN}-=&(}r@a3V zzo>E38?Hb53I6<=*WdjEn)-~(Pe14V|NQT{|Mp}4;Lrb*7uQ#uU!7r!l7s01i^Vmj zsL~uzlBx$gSS@Sj^$mwV`~mmwozk5iaC-JSMjN_zi82MM(8xHV{}t_S^^E!-gthZ- z$L6sMBi|-6Z2OY7+a^u9-)tbp49mr2tqeKO`px1cZ4A5Za<<2qm_U-BRgD9a)Edr` z?G^ns+NJ>Rwf3Chc(~*U#|T@(bL)P$$>uL_Fa3Km3D&>n9bybk_CE}+hFcfhdb**&+$_Tf+#eM_0? zpK07;jwd63ZiL^CVA|X3BQZw1u0iKjTND)mjj{!%C{Z!c)^i%)FgZK{TM@iV!a_8I z_g5=yS)#R}TP`-Npw!9qg(yL(9F(OJ2}euvmq4a9XCWdn4nlHJI>uuVcR+$$uSNlV z9>U}+Qb>$BT~W%XSXe_@me`_TKA+Py9ZG9djNHF>4{cLSpHY(Y6M|6&rSr2yxa=Zl zH*=nyUvquEq|X4+_aTr3&-VpR?!!O*A^gcVg?Ii~aq_z2!6S=r1zB}`@RNd{d=CHb zzpSXvEFXV(#IO7jEdiwhrFd2`;*^EfFmDaQil91Nh?Ju7qznj<5~zyUB!tFuS9zju zSzWEL(?b-Vi?99*)oZ78IuffHb?X7o)h9p4`zFnoHZr+?f~hRUq0I3rkC&u4^7Xd7mqK3{$C z5p`43EoxeU;2gHFY0cgZ2L|q(fgx`mVbhN>hC*(~NUXKfmz90kZW|VjjOMGXH=PZt z^DFk6u^@ce#&pYbkayFKTlGGcfem*X4jQ*@ICK4C`m`^DopIaEKZ?8P#n<*7#=NFG zw_S5%E%Arn11YjT^Hl<3XBWdd%Dg{jTX6NeUE0p>z6f^p zRYdI-MElR#KQ0VCOTudqVEvm{sn;rGPgO{e*Eqx}gh23t5W(0Y`}G3FAi|hK5=ItVNxp2pFHF8*$YZrpsSLg71{^d#obDd!(9msy_TM1 zdA4)W!FsdV)_QkErSCgu`nCQ zGD1_dQd#hOLO*~(r)=CL&_GGnOmpr*6-HydH3a>9iX@2cHf3aOjSqdu;}lfHC=CJ1 za>AsV;JxGG>JslA%4%k_lIm!J=u~Q4$7Fe`Mc`_A#e1Pa$%5kpcg9wdpD8~0vf`(|HhfvXhy0s>3YJx+sj3OybjTx(_AMor4uw}M ztkd}5@CH=W1eeGXUKAoBm0~Xx_p1Y(5=1%{u1o&!eyu1?pC3^vn^?CYC8tJG0Z`qx zbgP=eST3J@!2R+eH!sea9iMXlt@ntl8@h`pH1iu4S1)jWMQ{sVd+YaE-OOpPF43{Y z>B!{Zi23}Q)#`@x^E2kFj%cAYE9Q$E%Bte|g(HgN_}(E}SDc)l@a)M~NQnIEgAaNA z?RQwl$i>Z)X1S!amT5V`Etfmq_Wt%;#TjkQ8(^&&?hEwW_4D}LUB65I-(Z;yI@;IM zTH&^k+|}nE1s^Y-;TKt3Mql^7`-vL2c-kZQE$-h1M2 zjn9oc`ZXwN!}DeG!#=1YMxZB#^blWIzgNibG(owm#SOuCpfsXOoDWz#!6+d#OI+9B zI)}u7Zx#gK4#t;Og0TkQb#!gFUOxHnJ(CO|4kg0%$oX%ET35t4l&}bqkt0$V&ym3i zH)_2eDLY<79Q|KwHTbq7P#VL*bc#}%>+5TlizQ0IbXswAct~kAIwtM)_026~4OWE8 zY8*MsOB9q%`%p6tT8DrN^Ol< zjOc6~S~Eq|ikpkCutz6pJeTLF@Ey$21JtC%oV-r4xTd)|Lzh#!<${azj}hmvx=68@ zxO%rCB4U0?I< zfBa|2#gbxn3X;mH`+7xm?spHOL}5aUq9E_w(v_qc|GhE67)HK3)?&1=GEwT zkG>dMTK=(D>AzOrAP#K7y(KKje!89rA^o$gpJ|shJhqIs-|_Zbc&VV-)$47mcFXs) z;W-Z*CvN<8&&B&QUjjz%=}WH+)Wv1S~D7TfmG#AHquAmio@p$KGx+Re6;TWxu zAWTe&4uQq>Iot(Eh8Ed%oiD%!~sAu`;Mq6SPLh35BcCqETX? z^b{gU3`__FYY=5p%vWC(SCU^bl9eurL@NZXKomYC0z`-@rIFSO2_ajO5v>*XPNpon zDfROUL=_lo(at3oh0fNUQiLv9|DqLkc8s=f(0%c1CQ4xrAMxcEf6v2{cQ}0SdraOu zK|lMF>Gd_MFMh@R^Iu@J#XtL)v+D~EA3Wmt@F5p38dST)Hyu`lu8S<5KjG{*A9H$g z#PjD@Os7*`fBXjTee+#@`ZxcX#pMzg8$#o$s)7fn_xRNpUobm8;^8~*aD8^p^>ZMs z=sHI$O-|c%xAyfaz2=#uj53|` z@^_a{OK&S*D|k;~Es!#6lvRcEft#Bfy!Xr|6Yf8F0Dx5nt0cYq7&!|k{4f8!;HkH? zs$epip|s&b98(2eA|YxNf)$O`9-|DR0!lh`0F?`TX;Eksi8z8pjLwQ+GLG;e=Ca~B zdwF(cDA5#FXq`vLE+>oDskoy=eCH{x;@yyvVVR(alH2!EUUWZ z+yC{S^FcdjdH$R?zVUn1D3)LRn)8GEoId^wPG5V>#TTDo4K(u`I;EK$JtVdbi?h#A zbz1xDOkN+6wfE(!HTmQ!T zttaBu+W3!ggbIvWzuE4~;;^w9=DV)NPyRM1SB-g{C9`|;S-;M|yZTDj53%+E^>D~7 zpUL)MjlSM)+bqIYCY{}J-mdGtY^;Yz*#Xn-gYEY9qYqX@HpR;J{RLobxy7_0_hUI8 zQz1FV`+tJ~Vy!^kRS<2D!FKwn;kbS0Z~Ys8mmKi9Kd6)^Fm(7BlCzr#3hMfrtLI-5 zn+D}T0;4h|GAt;qQ)U8Xb*8_k8zot#K^t^YCSg|;h%FIoL8}DYMF_zSW{&FdzYL9y zEvCQ*G79iYf^V;04SPUHdnI;iVrM*m@s#iV=#LP$iq#j~INE#=8W+KI*o&uV$aYI%W~9#b5> zMjIpb^)rYqXhYN`-b@G-;5@6R&-wgUKjZPE2lyCy^7IM8xBTG`zfG(!Xs<4~e)fd+ z`jU@+{SiO-(I4^YljlrkGhTc1P3}Ej{!;ugI;< z_bxEqJ|D-xFeE`Zi8`Hd=DT34JI89krL%|)yNmMew|cns{aAQxX!yB(U8r(=k5`H2 zO&`oFP|K~B9O>&=sAR=w%R~ANbmL~Z{B_<$f6M2BwXh$DEr?_`+D4u}8Q65gcY{z% z<<`eJEBa^l1bhhi5V2O_qsJBzzgQ8w226r^eCUW>Uj$RoN(ed8XXs;%`x}RBTWf=e zMZ|zaG?ElPlM?l66w1dzF#y4bE+xY>^|s{Slg8jOWXQsQDQ0nvu2 z5)^Nho4%NL#)MCClu8QZM~p<@iEL-)K5R-U}kvjjr)M6 zUak1<55B=SzVR9#{oSVsEz`2%m;dRnc<}I)Pgf0<`mcES;34&Di8-85UoZIZZ+}W# zFJ7^7Mswg!M@4c}|Jagv$wluFN3_mcmj1=je||WaLbbL&~wK5XKDKqjOOKj_%Zuk+u#yE}V###x;K1~r~9HpI# zpklVh`neo3#1(eD=a^SqjEKwWaiSyTyHbRh>J$O!$%N^{2S`y7qfV=6BZiIjAew?` zN@7u=$|<^AmEaf88RT4STyP9Z8;P`XP8?uMwx+! z&DcKDIAnATZBmAXquC)6VSaN%K&U2Djt&n|5-4mzL?K!yi$#;_43U^?gydDVQ78x) z2^1O%ZK+i9yBWzAh{VClK_?4KXDCm5uqdp`mQi1yEoNq;NwBW98lw%SsIY}amnEhw zDXJ=2Zd%aFV2wo=1=<+2(pXz^dU8ymbSnEELf(%8CXWN8oO(V&Xs&tk{(oYzI76C_ zFMj!Vl(Px3FmyLhIe-5bEWY@N?)n16BE;XrQe~+U#zl%LM z1)w=Q=db_8zheIEjHX`j?8O1AMxWS7jVp}2oY8`(WWivVgoH#VVTNYAG>#vrxr zVC*rh{B`_BYs+R03lDg5>^~w%Kwr!F$zl}ZfKjVzJ6oVF{MZql@vQgz@YQI zPW6aHQnFxW3_b^~>z>yh)B885l%7a~mz*3rY8)oTiG`4aUTFu5b!H0=W-D&yOSDe) z&Aj(H2sULKCnmCzmI$kJbnKw*s6T$6*~ux>M{ltB{8PgG47a>M)Rf}>eSEhdI`CmZ z42sF=yG%|V(Y6bWoics=T}oOGA3o&6fA_x;RyCjh;wR{XQzrHe&cA%Y$G`fFKl%M1 z@t6PnU-Rtgm&`cj!Gni%bwjwRdHT^O%&)GnlZv_7S__8Fjj_*B*3Ug3##!g&*6~9Lgejf+2omdI+W)^=(#TU5jco?shCsQah5IUh*q*c27Ev}qdId;EK{O$ZCl$sXp^YVE--V75X&XYjpsSaua(y=E)Ob;og;127{h?ob`B_vN zFM(T%UnKeTA~2m!S+y;z)rzvToF1J}DTDFJN26R;7&6OMj9Qm2R|)KllAt$b3`Q%o z7L-mQT3TmIHOKEri0A;K1!dARnJDNWbUt0n7$IssEVrN~6K(~i40w-oF{k&5NX}ZV zw8Dx_M3B&zp%AcZp1QqZ;yMNv@{DM@Tv*8zoNVpGYA&Ry~3 zr~iRBfAr_fyCw6pkI>Ax_uh}t$FH;e?C-ez@@FXqN?FXo5kV=;#Fdjvm~@ zmWGSp{FI9ie}?LASY4mmr6FMiG2j~?=m{^FnVfB&EV7Iej1-~UIPE$3l>Z11_t)Zwr{ksFbeA^ea{rmXw zc3&XRuRazirgs^35k7`f?kHTc^>LS@H?7G&t418UAeka2s;~FEW|$@K!aA$;_}#Bp z)%r8UZa@sQ5(+W6)e7Hq7;Ct`oTIHlTZ`y|s1>R#h_S==!VHMf;hW1;Z>Q_%T#I<0 zYACQS^`eUceQ=*@GQo$4cYzpuO2g#>5)x#kv;j$pM`O0Gvc8!zvX6Z%fQd0}$sGUy z03ZNKL_t&wjLr8xRDe!{Nm-;o-c?$M3xqo!PGw<=y5m6Kcc<&G~BmW^pG(}m^ zbRDg4G2NWA4}VEf8WxL7wDCj~<-rMVGE3G`b4Ab-;_8Ub&In?$$B*$|Q(twsS;z6a zKj89%pAmfrI41WWQ2Xaxytv?RfA({}`OahB`olkBGAViEoj3TKpZ%2f?27j0il%Eh zJUrs?-Z7tj@-g$P#gIMYF71w%QMS7!;*msq>^0CHI2ma;yBz0`Cg(8WHm;s(;q*pO zT>oLMwf75rhogO83o+(|l$lu-A%yXPH*_2^7yaWiE#*c?fbmRXycY&3vM&c)^J$v;Es07 zC$jao+g~QX#dy2|YYI28kzjP(mt7FsHVnEEme9Yen38TPQt<3}vs_y}G9126g_V0J z3^eDV2U%0!5Y)RQ@QyKw2%-W$`o!=Sh>~gG%NxqcA;rM~&3u8)7fNfC#AHzf zhZxE4X?-l&H)f*37A9pOuqh#HjFI3vy4DeM;#ltjeUycYf)_>9DG1mN6~I#VcbBb0 z=R9RmCJ0lhl5!$*X&qC9?s=E)M>c7(+oTdhC^s4uCXf1c_!nTBYNC}t(~Rm07b zC%pLVV~m~9)-}KS*)MTj&4X{g!*qH?b#TPN@l#d~nyX8?dQRw8{Q4I^=i~Q3WqEy* zl$ATSJ0B@IrX{G5N;nJ?I6F}D%o>O6ZOkr63u@!*G)r9;2;(G+GTLm$tzXc$H_i3N zGNy3%Yd*$xg4e)k!w(r-T|ODUbSRx^>O*d%M+}&HjfN$1(luW8j($2iz|yC3_kkq~pxb4p=3R9jtJU2*VLrfcNXL6P-~ig9xt^!Fzcf(-Mq7iQZf zF4p}FgeXjkW2*9qRkOq%970rdR~@GhU!w!MdX=spBUTxVwM0B_StBkYFtd>jKw&nWyB|1ht9%C#bl0P-Z_6S1CfNgSO(y@0jZjE{W ze8aok@~djC2d8~q*C-K=Pft@nL$ctMlEDu)d}b-CAVF+Gk8Y-HtxLb(Wf9R#6y>wFQA>5jLxq7$Pl!HUv5%oun~$zDt5b0&R$d;BW+73^?Zr zT_Ac-+a$JJP$ald45cwhNItdur$;QBmerzWzVbtCWY-0%sz_E_%;Vd39GHrlI>jdu zXzK-&vZ9_}&|W=Z{`?8$!?$>CdcaxKG|xW)@x*3{NQ1xlg1WhZVnR7NLQjt{C*MI| z{hXUm-{+%$_b>SNpZ<3gR#-0PeD%c{-}=GpJbv>LPoA97TwihV(feF{@j2#jf(@FV z{XakE=EWI0r1VU8ZFj6^wAmv4<~QE#N0Tn5Se=k(i`uh2_F#zYplaVPV7xnir~m91 z_Bi7Lq{$sfrmcsyb+PX`M+Q=e0eo_hrg}EF&wLsDGv0`I37T6mHCt_6izsPXNTT=> zc*!&Dy3VdXN`|*yJz3U$-z99)s$W|~@z!}`AIvQ>$FBSJJ?zhiB=vf-!Ja-2Pxgw< zZ~XK2TpCK#kNUAIEByL8IitspQYPC9Nn&8hXwAI5&yvk71)wdb4<2!T_7trQ%hd`m z$)WALkR0*i=%OQZHJx}0tCMrTEPxafAfw zyrdLFF|bJyG7maa(=!uq^gLr>yREqNl5Rt3uRA<&wb1A3j5A<$+2tcrs34k?Z>b_(^J zZZ&6Go)Ya52k*@Z8rszy=Mq7sP$|QQYi}R~riZUFee(w_fAe#E1Ks&^+QkJvM7qTS zSD$kB?2HhE`MhTS>(=6*-YiD2k8UzN#I`l9v zr!6=_MxQ=1V5mK#Mup8|pudFN`es1PavMV&yR44wSdoEE71u&+>^R=2B)i8_0U^lk zZ?{`71Yczxz)J2v;kMOshk)H(^I}`z?2G@}RBpD!(MZY4Ukr05_U8PaA>ZheB+FJZ{5bB< zE=48w*G&U}ElZ9bJRo?7TP=_f>AIHS6Z(4=Iwnux+< zv{0FXSy^&0ols6Iiqc?=Mp=s%jVD#jjdVn3@xH?oS#^=7X=y@-3xUoh^rdTS+SU^S zI3I{UqKr?8csrloZwM@wE4tP*olPi|qA-TTgBh3CH*|ij(1nz}HuFzN5Z)n&{s5J^tQ1 zl&A0DtRb`xr2-OKv??i!g5Vv#3(V&$E}lQ-`s_=bYw;q?PEUCH>ilozoA>z zG>hw3&V|0+9i~&H$B#Lgr>LO>c`9zzqr}-#k+okUmG;foiECoO=Dsk(=yq(*yC9hH z_q!lF^8I&GbjLT3eLlMJ2Apx_ejnQ7nC}@=+x^(3^$r%>c=wO_*o-5)oJm<^EeK`D zUbnHi#uU2^No9zV+~d2qg!Q-#+iiv;`{26G>=`K+mr}BqoDjmG>u=s^1e5Kv3itT{ z@5)+y*>U6N*oJk-f8Xr%=-R#C@YX%z`f=6zt6P`ahLT4bRv$~ZEkJjT!SJax)%2Lv z;taZ)(0STU@|P-6XgoRuL>vTzHYV9o-lKfLnuu}{Ekb1r4yKmr zq$Dbj&Vf&D+t9TwD<4@kj=E`RW27Z=cC%!$s9CK#mWvfWMuLQO=9ZwTn~u(Tyz^*n zp-cOvmSm*{BC+w*?KP8C$HXSudS&2XQgXFSezO=;^4Rpa$pTILoT4ZSh$Omy2>8`C z`rv>lo_PKlHy{2NX5X0-ePHt7Ehgt*5zanA`G)#(LEA2ovS8jVpt{fDgU3j5OkAFE zb#uk}XP@$|KlyK&JE1r_!5rNqXv^h`r`$aIg4lH^ZFu9Y$6R0Bu)4WIx)%R60$$;+ z!YKA-zmxO34_4bK6EGZ`eo~Q~W)Z|E)2QDb6^Spfo~~n)T{eCPc5Y^4Ys7oMG1>Q( zMen!Feeok>C~aLKwcob;K8>T}$H#15XIC5hwS4f|&Cr*qPVo!F<~_dCCi=B$Y4ya0 zEI&ifoQ>m@o12#3u=mBZh@%PwxecL>&$-o!jpm2r`cub3t!HSxEGxzB`~G%qTgdJ6 zC+)w$NUH`mYAe9j5X6Bhzk6&_>43h4%>E;v5<}dW6a6v!=BMPCn4*|rY(cx2)7CfG zN`o~B2GIsx6rc@_YdJWW(g}1;o2(nHi5@x+MRmyOqk<6HWTgYj7F4B9xzO~4wuyL2 zIq}M}pqL)g*b5fdm-ud#*L5P5WG`B>Y?R7=sWmjdZ58jT_m#*3k)xw2nvfvE5Q!pm zF=D&|(F6e(CB;W6@XDj2MM7ey8J)^E#O%M(I%VxsJrz54Xf%aYC~GJSL(vERx|XZu z4SpeXtD4X@EIQBAi%Z%lX>7a5)AMV(&fz-8a@pW~&ai+d3TS0n)+@YEt67NXoNrI5 zaG3RF~h<#UQW1n*_ z#iwxgHaAZ{Lro5Ft4myWjrATCT{0GwrYtL1Ub4EfSX&VR@8IhE8Bf1_#^k{xOgSMc zA*jG&zCbw$K2jbZ@y5H~qFsHMNF1vcZ!EOyG9|BSccyj+7y{ff`afDEGI+An+)|9U z=$QWyWlPL#t+_sudlxV^HtV;Yp1axxG(++~Fk0|0YdzzypF-Q6ja#1~WEK^>bJ30a zqDE(qOGs?6?Lt6loq~c#$HWkaH2xtbT6Q17A4kUo*2zGHraE9M{C#$<)xs%=yE^cGH=rGC~?*I#e27y2e_Q+m8lbmV@J zduzE5bnNHK#+>P)U6Qh2ON_iir$AVf%9uAzjSqqIvuCtkC`992$MtH(lbah{NPd|`8}PnBfMw@dMMsQ* z)pEhISs^;H_)`~VX5F9~3Yl~JT+PAR0 z#9-<2GO8~ZtmZf9(+4b8OX@CQwa{L@z|H4)Exhyn-=jg(2BDfBpl8QCI6Y!@dBx4M zFZV9Dal2y)2^ny;*uzXh3g{cLe)=uYkK+vUgbZ*?_^1(0s#h>$gy7fHS?k^Ylramb z@545Ep>Hw9ArD@Xcl+U*xFhhix02Klq?7xPB82R~mNi7Vs|}hjYpUx`pGp>kDf&MB z92mhme&k1qYwK+DUV)7(4-BtOf#3)1be&2l9rki*bQ^M8pSkAB?`BPHx8Icz+I8Js z7REjwP2YOxEQClzrMzah4voV*OhdTNo3OLVCd`|l?)06~qjW*+9*>Zz;X-DHxdh1t zt{^3mw5n>g5vGwpeXZWx(Wlp(E%R zQ92O3M~4C>HqQk`lq`(9K#)iX9xVo2TFOweShaah3kL@W)ODQ|Pr+s36e75;ODK!$ zSS)I0rw?hCXSC-Zr8&~pnAszAd7ox}ma<_eP#T(cMU+dd2~6)lX8Gh3EMUYlu?A^t zu0Q<*j3u-ymcRL&*wnbgBVPaJx4CINXBSsYA3f%K-~Jx)=!j=O{p;VcFk17uP+733 z2r{*~*ICQ5p{Xq&%=V_T!G0RSb+EbF?4(L(!7*+d*nYn- zw&1#6pyh+s=l1S{9X_JI4I4}jsO+8bamydE_CGP0qfw3GvPO@xi#4~`vf0q=8LX-y zx+v_9`4~f9Vb@sh>_F1`zpjvZ6|->X-Fm;^9xM4(mh-M-QVPW6cT4XpgVIA~)9ov* z=ca6U>P8UMun0!aNYXXZ{J!mT?E5&pgE5&ygrcf2N+FVS#Ce^S>;~^vX~XMkx}dXG zt(i;;8WBuU5`BSJ4j(kiIVJ}NZA+rHsP2fit%-D$s=`O1ja`bra&=bh3=}0X28;^H zSK%T;BIJzotdA9uadwU|#NNL{tcl%RSZa{(50oQCKT0#${4iOD6KJCSlrwYwIZk-lum*3fl?P>Jc}0U zt7}3`W7|4M&B_q4DSsREA-@$s?e}JBQDPH z8k}pG6&3d22&XiqvPfAG{R$r(U%dZwij(_@(TJ*;&JIu_6c68Ia&*AAzWY6{7LlX- zkLXrc*rNlan$fL(*DzfWe2fE$a)5y1n!ed`-ZqaXdwO-)wqBrkv$KKbQ#uyLK!MuP?QCBQlVoQ;3{QJ%BWpyykAl% zg~xDkFk^abSzcXn)^_;AV;Y~xAS+i8#1KUjX9lG$#smthD2zghL7OrOS`Chr!9oXx z3oT7!=(-NyLE9zl&V`Q7Cx)CAQuWuE)>K_DxPN@a{Axwi{T7px1jkf9NQox870vY- zv(rZ`u4~X0<-<3b+O4XG5u*3ZpM6ERc+SE7 z2efUZt|qwphS&b^pYZ70@A2k)?{fLePcV}isxW!X1y;=+Wcej}{u!h;j-*5X(cKy8F_R*>P)(fj`|JBQ&y79R?ueooD^>Sy|CZ84S zTcyT#)cX+F2(sK3R0EBDn?l=LRQo#C%Yc+SdHf{~I#LM;Mp<4#PQ{mi~rd zRf@t`in3(UblKR~NuWhTC@isU@#X-1^Z*?rZnb2&s-cL8?h!-5<_PFl)XXzD{Uhyo75^F8ddxG~^YqCDp zXTQjmk+n{HQ0z0AN1S&_anOQN9lDKJ5mZR*UrkEYR1}uUv|>6fnI0aX4-Lw9`1zdJ zf)9p?g02vhiF8YcRt{5;u3c&L$sxx_huo|h>QzncJg9=83t~|r)=+py6+NX9Okoiz z5YgDGAXr7`BRUG5?^x9(b=^`gI*fHRO-s;8Kq3N4VU(tIo+4-Ra9u}H6x3Py3c({i zBi1KousETN0adU>LCKSd%*W#Mrb(~R1lP#m@gI2F| ziuwfN4O1;|y6LyuC|QEtj4`)-3U}3L@7h9KC(rD>-`IoqZDubkfAP}xlD#oz@`lQv zy_Jz}6Oa*OE5(<5joXeaGuc&j@RfbsgeQSD%PTv4iE)5u@6w;KA94G}oActf!+#&^ zZl3_!C&2E~cAb}eb%8!hX-ssm!5g;G<6g3?gB3{dT!0)A-wPZWosyFcrPZV$_OReE zCTh2jO%nJeF=gML%uZM}&ymnUgeaC^BD(Z=(b)1BTONYW~ z1=XZNYlCY$j99D@f_R9LPOrn@6Ne;`CxY{-a;r}Mqlia&kLiR~J5a)efG!keRZ&d~ z%4tD$FlBZ$!5$lYC%8t_)Gh9&BN~m&8{*0lf)IQpHeGT@VWi20m@CldC&+*F@KD69>^Gz1Z z1ASa|1u1fuyKJ_7u|wOi-sXK|=eg<2B#e4W^4>vRGql$OL>&1T!?1wI@3^;l)*~as z_w>&n*M7oYc}uZ;`5L)js+ghV1-@B8F++?+L=nWI3WX}lv|hDARTE?dv8(A8&v8bf zwV~7%kw{n1Y3mCLbpp0XV7!XxXb6&nG(>?IloZJ)6f=WObp+oeDIqcCTg%U>Fg_WfNde7!QQciq9kn)2rn+CBFmrH>tB0lE8+K_~XmjEH+>nr>ul@+0F5D)G33$C9`sisq^gL||} z5j#g*EhucHk%*q&r)aNf7cc0^WlifixHrWf9`NAXKVWur!fOxi@#TlVW_f*?;NF#| zZfk5=pzYnW_kRsxB$-&b-oVzpDb}kjq+f2QL+vNwW4l@-)qZ`^uk>9*f#UjXEo zN_&@b4li#>Aa7WIKIgmZg;P$0v@aHB*QU8E!@#be2PGhG7V8`PZ#H1QEn}vyn(DnD zVq>-wl8&3|qn!I|g`^$#5a}Apdi9uw(dORw`Bm?tuT!-vRn)$5z1`xF=wa8h?%Gth zsUNpc(9mBwwQW(m{k;o`F_zgY8`oVk?y`^7rb2G*Vood%LFF_z87}v2li*=(P{jnJ z3!23_(q0pd!6U31kG3=BXP*nAs71#F%b)* zWUXS8#L%}jX(5eqWedu(NUS!k>73)>V8(1(VwDi^w64Q@4LTB|;FO~a0Uwim)H$}% z1(!04i>K?7peU;X4oZr$;^1(~;o+3o=@HY(6g5?-svvd|cO0o=N87%@UDqs@E9Ne6 zwW{%L+6#QwqNB$a1!5GoEQmV$rHmmK8f5}5)CiW=DO4K>u0xcfa~&Z-=UQS^_%`{4 zqChAsq|lUB{eQH*S+gX^mFM^KEtbr@OVzCc3I%Wh2sSrxZ?<($k4ZB<(__}sjHy48 z^eObBXPL&-FM6C8jYhIjuiTq#0we*VNi0=$>u$Nk=I%!i9+4TDH*eJ~5OSdG<`Nm< z;oSVRWzAULuLS9s#0@BB)jY`SN z`N7L%`2i9;+UlGuWx4y{v*dRk5`9av_$`Yk26y*!WW`a+Z!+bENLN9%yrgI2?>+&DO>qm2lLax3S!Q^Vn{? z@uPjQhJEKB=PiAbc*MxTwehqdTXlkzQftV1N?cRIhs7Tk^BjnBDboTEG_!ck#03ZNKL_t(ynaz%P za&?Jr7X)R%73dI>rmkxwM51wMXH(KAsd};mPd$GO)dHIx65176F+*#EggWuR#lFB_ z^oRv335wiw3u4!#=NxKGOcX(bs|b<)#eM;vo`+zJ!8u2k736t>b1SXL4<-riX4Cx_ zKGKCq@CCkW&|O33J)QTP%~(R8hZR6YjE+cf=%^^1{H?>V~FS(R#sG zk+zP+Hh@h}I|5M)3WtOQWD*HPvqU>hUN{bpPjHixG6rg?k+!37mMd)t5)*N)B!k>I zhe($|A4+WszpG@)YLl+pwk?G*c;$%mGgR~x(<1<-1@39Ao(iD{vle@w7b zY;j0Fy-TQOG*>BBXcTBQ@cY#)KmGvq`Ok6s@j1(7Ls?D|K*Vaosv>IL*Ydhud$Hnr z$%8-`5Jr%+s2e8T*5)$&9X7)~eOx1EZb+8gWyjt+-wo|WESz0e-ECr6+YsxWO>ZZ7 ztyPaTl5NfVrW})9MubmlMef^-$D6l;b$Ji1(uot8<#I%c^pLY{ zvp}{jW07^<^e4K%F=TjXgK{!v=XmZIu3akq(;Vjqu|d=Ao$&h>g4+4Z0Q!+3#I!k` zrr%gqk6hjIJ@f|W#xG$2@&aWvp;$t8`y2qr`Gz&$1T(MkMoUK}#2r(uG3ztDzY7V9a{$Pq^1sF|NFRAAh zezD|ov0&A-T-Ghi6;yr5B{N`WIob3O6I*Q8qLoiImc(G3qRDc)4yH$UFhxcPC3r!r zh_xAnRbrFy8nJ!BL>1P4tAYAhNMMGh@oDKvN4vV9xN}128k7-=!xPl>0FxgkN+~-5 zRVH_6p;4xQ>=1=POCrFHqVVk!Vqkgsgs_|=`3#8>H!YL3Y6K^ODNFLa+_O;jw-*vd z(l3}zbbxhCVPg@f(KqW~UEjxsIK+-M^P<}Rvjok)4eEJq4Q0N;{dMj5(Q;qgg2SdH zqYZcvD0(FDL^j&-vyI8kZ+_1t(Op7W*nS4<&sBOIC1RdgZu~jd6Im1GZsx~^Eqpw- zyVl0DKqmXf;YN4f8g{*TeWPF<=}M8f^*of)JbU;z%nG}06O&VYrPghz*A#c5=15XA8Y8E4r6M(PS9ddsa6p!E=r0Qg@&jN=nOFiqb(8ws;dy4h(kl~ z8Vo`bh^j+;O}IS6cT0#Jf=zH}oh&#dB%!qKre-KPvv-JlkV=+2T;>vJ!3##`WTrrO zk~I1*SrkzxS7WC8R96Er>kA1wOH);ZRiNvHE-2bgAgsuYLda5FDTnO9;Ecj4Ay*oe zXBeGfTSwM-!fJ`CI<$|3uBK`m=5tv1MEdM3O#4TyLy8lHRZVCbmgiSgmo-)8dAbZN zI;aH7vSi-&h_VtmZ+Q9Wm>45hmmi_&a8*ZXV47))yrpV7syZ+|xr53I$kPL{87N)# zikl$@jc)_K^K01qz;CGlmt{Q^yafV&b%B}A38E;c#{^dpeWb1`0*d_ZD@ib^lBgXa z!r{>?RFCeWtEU7Mu?KYHT}>&4hi|h-(XJ^BA{< zmQvSqkkaSZ?{i4h)oS4XV}0VJH0aH9>bo^uHibUbC@ULKqsqg(_j`+BP9;e|?Wumb z6{Aucnw_y*Z|nFdXq{q5IxlFdjv_YbkQ4i2J~1YGA0-}XYY4HY0y1l2(UjNgaBhOr z8N`mL3@RE#0uoxf)j7@bDLQzJ(TGiOa8M|TM6FU;2ErE5LaW^}Fhk#akcp&1oxbVl zqGDQRgwWyZhORRxt!Oof7K~FEEKX~*31B=fQ+Vw#DwPNKK3Q#T7w{cqwncozYK_(r zS7v180oEn3gwZ+5I64(*y9SySv8`C1opX6{%2gMrA~XRy4{ZmXf>w7l-6hMj3W8?Q zG|cCos)ma`$MfN5?sD|aiVL-g?S(Gfv}C%^ju?%biee9RRvYa++K z#kPUAjkIk|K08F`4$&@IQwqFDVy2Nm#1KS!L1YLa<@HF6gy6Ad32i{fkm7q`g;Itn zmc^3~sTXI=Pk%?}8?xCk!DSegCq<=O5JX{JNm$Y6xIt4bs21l;M7a9+eN+~x<_j=6 z<{-h_yUIgc&^4cSo)L;3lbrGbR1e<-hzWJIUk%!5*_-dvo1yf;C*3Ufq4ZwJYS`m; ziXzu`l}`k+h&!*PPvjjXn~pld5bo`H6GxEw_(B49pD_A;Kq6yii(R+nvjmn_eGprz zQKjA_p_2iiuNfu-dXIj>U+?!iVlo1L?0^T3W#8Hd$Y2E)qhj7nF#}B0+gns4$&H++^+Qh>;YJ&>B<@eYuG7ZK|kD)~M*|q9EGRH61>Bf{JviFDD;0QCe)~(aMm!jNA#LJj!MGXy{Z# zY(Oi)sDM|n>fpTUIH|e>%MKAkPBu9t%W`s8VpWE*CBCru`~n*^GH>Y4PU)(SY-*V) zg6mWed9t|0h8aIl~vL|A!PcCZ8-@U_=o;`J}R&Vw($MKgWCZ=Zh5@7}vgij8mh z>5u*)lZ>!j(li0B1NCY}SFI?j3OzYU`$Oo^#NLJL3C0q20xpCYA^1_% z=0qJPfVNqx^cxbW$QC(1G!(uf4~k{Gz*>cv0^hC(VMQ^!OViaf^K+E6v=VUw)<74b zX)bWNW4W>v)jbaG-e*3)AmULzPxZDI6`im5u7!Q=h23;|y)wq%sNw(ZO=@TDsorFe z(O0AFft_RfbFQr?+4}ZbB_?jl&L55&7!K4BQyBj?{c!KOZ`#yvW1(!$uD0c}+Qb5e z%~kqf@Zc(Y(YBAG|E9i%v`uBL zu^m=2P#TFI-ON*5#FdE4(ivle4lRK$ZH5sfEbyU4+Z=5i5(9xmLF4QcCXwi?ejww} z8FW;A9819TRc}QR`)aJB6115J5y82XtZI!xJB1H|4-GNY_}F1W+m}MuR6eq-6Z+IB z$fr=al9>e)Jw;KIIgK&_d;$guiMlFwFkb{7oh^~3B~uy3W>{TfeIRp!o0Md;W7-g@ zmKS9CgiJJ{4jh&TFfFn3B`dRHl|`y$%c=rtQyd7A|EB?JAOk)ReDQw4og$}bDzbde zXYZfz!9RmcGdMa%mPMZMuGgwIE}6LVwD2GU~Vwd;0dg zHr{x~?8I;#)Q~=tWCF|R{;s({pWkl7Mt)PuepffsM~=TY@tMNx;~U5>d`eLYbd!w z_wGC}MS;m3-KrzTE(vvQ5$WN*s@I%{x>ce!zzn0%SfbHj3otHiVzI-=1|=4(LxmDV z(5f%j5<{}(wICsFfC>-;VaSu}y^X`&Xh0c*c#lzuZMte&VhltH;JcJ)?F`xB5wY%Q zt2qFjK)nEekn!3V|A3$W^vAf%n&X^=x|8He>mr0Ujg@cVvTa$kLI?^QHO6;XYcM7! z%Lo=nok`m=y)#4QQ%Y^I-N#%8;qYEb(|ML3UofAy)BzfS7}C9EzzK@GGfPmO zWxZnl=zZGD3l8oc@$mC+^1*xW!K4AJ(hq$LG4k5yUgqxclst!c=P(Hgt#Aj9Sa;B+ zcb$O;ZuQ#zsYT@tEc5DdpikvZF(y|CFKKV@-ZY zdficKpV>AGQr3QXy$%KsVy?G#^jP~#7wE-Sa*-rw|Ju!Sn@w{Z16r3}=-Y6mUFMSC zHwS9p?{6(bKlx>!+itS*o>fO@Y$-jfUDNW%e!cX_oVVDJ;>V> zn5;yZ9K@uoe@Sr46l)rCw-WKOmV}BF6%l;WkYi$tHp(P3h`5Gz%SaGO?^5wGtVgkH z6VtVqRD4cp&B5^j6z~Q_sQil!q_BMCmkk zI%8r!%hh7e#rzDxlNpB!8WUow&jeU)F@1=m@q&t*@iE5Wy-$oOD$TjJX_(zTVfm;+ zhd_OGPBuG1#Xw#d4({BgXzn6Frcp@$+OdOmISy#-o^02`y1vb?lYHYkWW*q-uAk?IUT)p*X6}BH=QKXm z_U%f4Y9Y&JtkmT?(NPJY4Px?iX`6$ja@9VxXf0qIm>FVw_-(tS^({sxt~0IDcq*mP zXo6Cyw1Z0IpISn{a0~{6BH|HgfXX~MFnwp>g{3i*?*IOmEhlnDBzGOr4dKd+kRDdt@5mi?hHAOp* zrbUGnRuoZ#bq4Dq*`eh?%_t5JkSY-G9gzEm&K3B>j8Zk^xx+lU;8@?G>pG@Z^Web) z?!NL8<>4{SVo7LfbY|(KLaUm4_h!8D%3ThR??T)1{%_u6RWH;1`-ln&B0rf;!C9hs z+T|72XiS!)@(CRZjiJa3z|*0@dz8{xlh)TojRV~5 zguEgduWr8 zo6%t<1^a`yX}&}fYeT3YR7ewuwnS7%6oqjH2`2HiMMsPZqb){RR7f?4l!+)~&_SW7 z`g8WynPv;YsED8~2!t3wDWVrLqnVYH#OtO!MjCSC`0T@1c(1Bxs|w0Vy6up-_>@Er zR69fH8tC6{^fBjN!O6Xe)%+B#;B3eTVZkwsW)yDmR$pjES6(=XdC|rpXuF zdvG6BFIhDUn(i1|958)&!tCxGzie4tF36+i#5pph@NJ8oICSCYqEH@Au$jZ!1B@$} zKRG3?D(=OK%P^;jk**P<*R-)kFccV$X9sjyhH5%u>rpDAtwD4Et+2}C`^gY`QrWQw zBhqs`O2ovDZn;2ZGeW(>>4450xI9&;tuDzXCD;Nj25gSfis0LIg8tU5jzR(_!D?}Z zwT7n5@tvVrEl_}uHL=}|#Xh^;N*KjH%AS&3xpA%C;tjW>mHL~8umvzmG? zoOZ}FT9;ht(P8WU^yR&W)95k!Yxr#R{x(|en&@-fx#40A5-s~E> z#yBz5bQ+(F>oE0C!-^W8Z(p+RKG)vfB*+_2uE(=~=ezNXVeX_LdWt^?5HQi)!n1b6 zgqE8KtzcutKWiL!1>OGR_a%2<@81nAEhA#osj_Hp@r?i}&Y)a@b_FU%Bs74H6t_|eWwK-e>9qEe5EBre;vgv#g`sL@LOqV}DV2>; z#w0qR!HuU9RGAnk$}H7tayiyI%7X)D(<7GW7pb;Rs8JzcyM`>ZqUu|igt5Ifqo#pV+%Cgl~o4s4*a4&Ah71C{wh|-035bs@_d&mJ(nJ*-b9bD*|E%BaZF^wiH?@7qx$!w} z$)Ra9vCm0ifIO!!K9#mx>H}^TLc8r*iNiNO#x1LPAEm+Wk5wA$(WbA)b#(Upe3OuQ z^Z07jmoL8&40Icp-3@bW4>Iw#ak-nip#bA@P;;bR5_Q1o3}rHqC0$qH%mk&1l(%CP zgay)7pc_o+5_L;xz^_uGb5^8r=$E|dbBm%RZaJe!pa)t62r3c{eSLKL`(ph@NVptl z93ggzwb~jcvqL7+1F~sB8v=1rLu{~4VKc>p+~6J@VuC|SvW{ZtP%*HaUlQt;Viviq zmUN9K7@^YOg6H1x3CH&iaR((fvsh9QI-Qls^q4?LtS>R4qbz2K&QoHr(O63Ky@=M4 z{PZbx9f{g9?`k>$Gs(%b0$nw@AP61S*Weqvx<;D}lUv9g+Ef@(NHnyu!w9Hlhl!Dx zY1Ax3J4Q!s1`I*HL2Sd1HCvwQ7*ltII>yY*AX?Iz^h??uHk z7D&Cdv<|M`5XXXK;jAI_!{5>E>=Tq@_6u3RXOF9CWW9~rO=G`1RySIiY>(CR1o;hO zX4t*f_cDyZ`qSYW!yII=i3VA$K|%`9tDYoaeL=OJcWLJB2*@=mX8p*AP zczANa?65=|g+XJLO=w=1Op(ym=(a+)E1b)i6$gp(#;?c^4k?Nw+~kx$a_BXMIwRK#m1SI2bCmCJnWO7Ev~_DGc5C|+Ffq_YpCJ1&(pGb_ z?2xuy;L0hkm{8A8F);&g@)h3_lzMiq(t{{mo0 zZOaH$FhFK*CgvQy94)eRcyF<&c*D;0{Prw5p?wAX1`YH@N?ZJ1O}4kZvEyQ&+iF1i zMcB4(hQnmk-<>^c-v%%7W?;uBY0pY9-WSK)8$-M4?a zD?(L4sc_{TP#HF61QCLTQgSRB9W=69A%&xAJnG^K>suNkzHpRxj{%LkTp^tYqsgox z)GdM{n@+H^5|`&Jt2td1bMJAU^I+1hKj1=ursbloSt z)8ifObFhwX;}Y3T1pTzyO>q>Z6zIO>^7`QCoNVL*xi({V+OsygXC)BB24(n31lo|4 zckP;+t8M=5xURHP$=uO>iR*oRm)QHu6Ng?(86DqDUvq0D6jdMieC8BT{U$2w-L&sw z5=7fWeUm$Gzt9=1pj}02eDAkzF>TpOBHB5MqdUDY^0d`CP6>FQz-M)Z?*yE|WH~W( zJuG`zM}3jMeyv;a3AhwJiX5#Xs8lgknV77Akn&u#!~_c$>7U~;-Xme(D^f-qFouZ6 zxdNjUv@KXqs}xqiio$qJXf=)Q2-=frjnS4;S&Gb(9o@xDEzx(B7b`?vAj=99Bl*m5 zr!Y*#ku`>Vl4=^6vZvN@j&5;9tXC+3=yO7)FcX8Glo%XM(~uF-rO?G0S_Cy$n8slj z9o75qq1p!JCP}!5KyZfq{s~rTx_U+Q5i0`~(G1aws1(+MTV3F*hCC{CW@(kiTZ^No zIq37TLX2cZK~NRBvmE3Ts?`!fljjACDskUQh-6ttj1k|jg7vnbIz$`9NN=zPirEoY zkAFv4JRvMCNQWl$aW#d}B@#VRMnXko?O`&;B$ulag0JX05(#fF6<#Da6JDT%#M&eO z`x+5y@H)luN7pa-%r!ZVCyaZyX=4F&TY<7~BUror(gC+R&-2c?+XjlI@VsLOKaoxr z&oz(H`1RfydV9FgQ1%>7U<*-fz#XP3~LY?Ps_?!%m=iU+I(E0oWd$xT>Dn5) zn(pcmh+5Yt!q*Tr!7EhPVPZ;LO$uSMBZpN5dgK;O39re%@)(t>|DuE_A(cY_G#wjP zzO0qY!8nxYQ7Dv-IHf5vo7{o4bZw35VuFxMBv*oS1$hQy0(zQCO=RVSVlpF}9YAT( zN)XN{+6M0|7NM9}rbW)gS#oPBCo^1LV51_-GBggYBdyUyN^}m^#ANXSMp+6S(Bd(9 zN#;t-#6qD_^9myic=(XjQJw)vT~+GLpsAwjJ5wAn7Gh$Mj>v9?S_6Gt_PM4;;e%JlPC0a0k4 zeuQ)fxIBY~zFdyFQHk8wZcUWB&tvo=ZM{DvxAWG|g)XMJti)vou?1FX2m$W}4TV1iJh>7-4lyKKVoYv1JR;t_2)9pqJV59)~4p|2jhz4}u4Tc_iP zdSz`(Grz#AlmP9FLt^6N+$9vasl9N_Xw#(^TXE~7W71aFk&(VKkUnj z-e}~h1xDECmXz4jS8chR_5oY2Dhyi^ zYxkWNLzYMyhzx9#Y1l0(uSlZwR_o?rsDA2dz}gI>6XSxxBff`|45<}S0FBk4bpU5^ zvqMxiK`DcXqH8^EQ#?@D;n(x+9pVP4CG8GbdGZwh0A6R=d?{txO{>X7U!lE zMMkhKG0V`CDK-XjZHXF8o`G_>k3MGc@e>vo3qrTVIfHE^!P}3H5UseXD_RK{ZCKVT zLhQJI=N?W}a(k{-Qq~MMF~)VeNAgl=v`WkqDhSe5grG=9j3b(qzoo4M5pKgmDW@gVvLuAa)jTmy zr|`8Va|WB|REre@Y}*(`S!C2zOBWPHS)hnCb&XbVba24r;E)glbx1B(%j?uD@K3u21NQJEve zCRs^hP*Pw_L8Jo`bxpc=ot~WtDrSJ&7*xG)R1l&uX;Y7qTs&Ky1bRL}mAHh`m<36N z{H#mCdLqs$Or|NyjAAxRCC`0=BzIj)7naa9cu};~oXQK5X9$sK5)a++Y|7EWglRdU zbQ5MS$C;w10rDsjf(I>z*5(9dP~spcr1M0rFp^<>Om4NJ#My!v0$HBnj|=RgplWK& z;Q{5*G0oW-^UE{**~etZFC)_f+|eN_)qcQQhjEI@bcSRlM8~~Xawd20aA$GVmwPa% zR)LSS%Zfl`Wi44|IGW73IG=NvIbJSHo?Km#$Dzl&zT3JLC_<;Cs<7wzUea1PgwS6tN2vP8{Lr_SJ7_AWk-N3x?0&Cz_ zl&VEu{Ey{iny5i!Sfb#MUw^c_Ue)NI zj6c{cZml)WSvudM*NSfHmD+ey5fNOLk!2ZmTM>Lh5PkjYZ}IQ`{LlIIuYN^S*S$ut z-Cld`HD3GdXLFFunduG!q-}~P8c>g!=BSG<{FTKeheeb(W$_aPx zo?w*W@#$lX)_mp5U*l@$TwO3!|&Ye4a{xhFradpM2 zsCa=Ew8BXrr;f>c{r(P_XFP2;5 zIxVOC@%O&Rd;k1C0B?Qes~pT`e02Hn*f--0g-+Jq7 z{Mow@D^}pucy%VMf zQzp}r^RtU>%e8OGb;#Bn*?cZIYsvGR<#LG-dm%73AP2#^Z7ppxc5h9}Yl3EL{_o7y zv?fNvN49^n8%r9FTf0eP#9E6|k?QIkbYHPkEMyj=HA)Fsjj|I|UoOI^L>`LEl5Q&v z5ufKGkdq>9{9UL${JfJ9Y^4#HU zhE@(0BC)BF80pZ6NmiZE^VTWBn8c2)l*MK_N*lC*(GKk#E-SFMASP?N)Gy2*h$ z3qvGvSf$R%weNtnd*)hx45WO5-lsV>bRzv`GTu<)4gW?WaZ4EX?vJ&FHg zFT?RkMmx$`#uwgroe%=w`ObH)Z3xmMOD2;E-+AjB{L#0+%O8ICyL|DBU&J{FfOC!q zuRK`a)^sxA>FGH?``It}r(gV>FMjz=ilRV(gTq4(4iA}MUGeju{fzm1jsW-W-Q(W< z`~2VsKj0@n`3Y^?^107{4rlGC&w@nA^O85;e4W4i%YR2^6{lxsM5j5qbB8xR^ErO> z;~(+;?|+~7e)B$m@-O~$JrKkFSZ8_o;6Xps!ok4-)7gxOV3gwc_?SQWlRxI+s}K3Z z?|zr#6I8 zkUx0+3rw};TW@`xNKD$EwtVjMuk-b9e3jQ9mbC-v(&fROTRbblr+> zwV?F@uQgQ=I%8>#q4f~FhBor@-9zpk9xPVNnP z3nr#gE>VTv4V+lMb8B%yVM4@Z1xioIVnJc1O!H%=lVc8!?=!o5mxFuvxc}O#Of!Y` z7nC+~GMSMD!4a|InW{*>TH)G?yi80JrOq&3h)*x5AANvYEqi7L$IKO!%1~qlO2N^c zW9k@D)-ov^lgy%e_J_6&y^tB^UL=Nqgrr zE3mzLt}G{rM7pj+BvoNB%Ho{EYJ=9QSMEX&WVl_sC4iw{87c_~z0U0z9r6i~X3(!e z886e+*7D`nF2|jC&fr3eBu!A*UfjEH)v%aVGBG6bl{f!@*B-vYM;|}tTi^N?|Md3T zeDv|hTS83hgg9C(mOOd-lp@QxcmJgn2U9&zL!|`FsM8`HVixop{JN|`jLfgDxSG$m zXg!QEXa${bh*8n`jt~OD`(%!Z3_0`vS^5_v?`SREJr(eIzJHL8&6eCQo18X$Za*ok(a_@w%eeEqi{P05-ixqEu z?aMrT?K3?7;3NL%yWivOci-dm>?uks-udOPI61z{8=wCIfA{^rXH_-nxrxxVEoV=j zvRW?t0DUU%w)Ugn05iQFGEC0;Dv3O*(VfG7|{;NC3fjR!AreB_wS9C=}rThJJEFqo(bTA{7O_WBVgDsF)J7Vn1-ZKiz=xhG20(E5-ez`~21a{#Tryo%6;QzQAW* zeU(RjP*?_^tO%>R=H2(-*s;+tEm6yqilBQ{R z=bc~CwJp{dnmUnOzWU$+FQ1(7>v!Mf;{2S`(^D=k&iKPW{2p!FaxgpO$3OmYg5VD0 zENK!nbtFu^-~7fm_{_ssdHVR2)6=K?_R)uY@7v#@?OKkH@9;N&^Vc9k zrVZcy&bPU^xZwEkh@b!bXAJSE`Fu{>DlC>50~hBPyz}-u{O0|4IXivID-T~G#(;B< zlamwvx4-%;PEVim2VeRU_wV25onO5V68hLh8vdXD^r!skzx)@J(+MEF{r20`b%oY2 zoz3`P|Koq;?ChMce)TJS_TfXGoIb&KEssBX#4q1{m&tUB(Hd^DfIV4qAF10=4`HnVZ- z4c3ClIzdXSQAgMSa1{C)^~?4(hvU+(q6TAAEI@?_F(AsKOok2-33dOw07|1mMC%R} zyFN*>?~gX4oUF9=qUsqebeG~R3PfpCOr8a;1W9OyRBB>KFh=9M0L9?!i*&I?M5987 z#3YmiFe=h1PgI6dJ1`pWGzx>&7Huucq^fT|MuKn16})=yE{C%c<1EG+vb-cO3aprf zC=`p1ptMObpti+jmbMXm)e?e7OMzHL)D5ixr!~eXoOM{^ATz`+)q+wn5SKMZdpyF? z;axl)$qXlxjOYVJXVN~TEJu^Ow9AN8D~$4(STVClG^L?wG)<;4r6Y7TYT4jB&%Ew< zbb5-NOgYX9r0sADCb|32l|iWoZzbCF?lC;lUwSt0fF60KMfOgN%SyX$BfJeg{PLtaj@> zBcKods}XMF`EA$)27mZ97u`)v2;*4Dx)U2e{4@JG{C+^GJ@Z$4&Z851_TPX1ADAx} z1cY~g{cDP%xW?W9@WBTk@aWMaeAf{};HN+N39Ho#i9%h~{O#ZVO)4rE;r;i2gNlK2 zddQO}Pk85@cY5MO#XE1m&4(X;*asbjs;W?0Q&kl|_`y#&JUYS{!(#CcSC^N`0N)wU zj~_ka^yyQ&R`}^penFO5jM0SX`TqC6&%L|H7~}ZmJ8yG#afwJ|v0CwWfA{xH%aZrn zcYD`aiUs`gSHDK%Kos7${Oo5x!+XzfKl}h=3};t!+Ag7Re)z*5aJ83Y9ooaO-4Mp z`XZt8C>o4*5VEv+yPU|1p;)-0cTH+gp-FT=p}nD#JL5K7wqKe;>XX)+YO(8s`q%@K z1g?@KgjI}52n}w2HJmuP%d~YEmk?-~XgXyOt8gYIntI>#&r3L(&Uo$RSIJcl48_+8S|?YN!Z^!WzTD(IJ-~eVANvq&SlFV1_7hi`ZEpJ?QW$3Zo=d zhYJWd<(_Q)VkhThU*hF9J=z;NxoM^q&0ly$GuMb>wu zrKhv+!Kz9p56#o7i**iA>pksa0XHZ9%|!g@0u5KdQ@S3S*&P*n}T`|v~V z+-Z68_%R^_tV(;qYgI1|w4fB@XebyUT!`}Zat7WUn&R>S#-w?uG%hR~N)*vgl ztgk^fDG;p*q0hkzHFOT`I+V@mi*v8@`)pAXl}Qf1&=3bClp3tZi0;ciK;ofO(Lt(S zL>gC(Q9>%{mr%4(XZ=RB352_TT`Ext?9jWAVxJ?EYUe~Wfe-^$gEAK7G$zj|rZY_D z5M%H$;8!hO>?G0_dzHGY&J+__m`{fvK@{(W53UVq4nA z6GdaP3?-hUqeH@Cg?2e&GlJ4MGbOZvay6$qH#9-0g6FqqXRMkQ7c{=AdF_>Z+@BWI z%@W_Xtm>Lf8#-bSfA1}0r6H#H&475-8fYR8Mcb^fdB*Esd5hov=C}Ot|M^eo<`Qjf z^12yP?JA`?I=ajG({n=Sv1`Wjn98J+QY!*s~J_lk2{ECH?B(ws(ZBw_8?H&~flc_Zv!rsA_ayFMjZGOTW?gi^&*7r`5d+6CSk5 z+dvolGoVNaL6M#=!!z)p1 zLXSiPkDv(aIkxUAJAr>#m+ek*0=40`8jk71je|x`KfY@#PqRjC4bC!RVgU|p=Gop^ z6Vv9Fh!Er^x$k~=evGN|9{NAd$4_5R@e8L zbFRmJopXM_-+lIjD|90b-Ix%mn1Bz2m>61!JQ_t1h?Q9Wp-K5FTT-T?iB*bPs3cWJ zEQk?HglG^8Oau`Xf+6bImp99AkXP7~gTZYqugAPCG^JGU5P(V9pAdk#TP7{_aVk!$}Wen_*pazPVZU zSvHV7P-Z`7uNSlqOg$<|7Rhuz9uW`8X>=KhWJ*AJ)RK5zUz4e5MR2B2VQRrx&3j5o zeX4`ZHyCBFc80=f^ww-Xr<|1Y^feNZkfw3{^4D z6i0=wok}QtPi#A?!jY0t*peU-S5>&#g0d`#=cm}xW2*(l$uUtZzVE3omK?24SQigz z*Dv$1Pu%YUJvFZHdHb90aksW~O-CCIkK0JoduG;Q3e7uG#(9S;s!95RQgg*5#33m* zc*M=v6enDym-y*_@goRpWIm^GGmLC0Tt!`0xT>N#J4Ip|Negy0)MQ8fH7>8g2YS2q zWJ#~)$}jnzy9IQeb~jx-x0RMQ%LteV+Z|?iu`Y1(PdLxi~3m?k=C3viYe; zOqTdwpQi%gN#MVM>3W)Ne~7u@+IH33zBdxngwsR)eUiQoO+<7Po`w`p6c01@3qZ#A zw12<-_jT>t8%?&W_U}z&G^IGYzCAf8cA5QP^eT9JnPD*+h|oo7Be4j$c?}{217=oYkB>;Ez@3TuGhI@?Hs^3d#Z(_KUW?0}_YL!{ zBXvEoPjstDy;>5BlFC&`8L8Y%S>j!(KBpA0u_r}?5H(pCi`asr=iWj!KVoe@itB2` z)kt}Skf@ErFJ|~zNp-ZqElVCgx=%EYwynN9SqhV1M^xh+4 zN5NpNA;w)8epOZYjFym8u_#jF#gBZH=KL||4?c*i3f$3KpiK~33@|&M6VlGQ(x8Za z-brJPmgjKFv6)P~yb;{kUKazyA?m1gwiA4=jJ zCdV*#mpRXd{vNve6(;RlyuTrw{nCfm_B-ZqW8knMVy3?urj)2x{EL{Y6Z?rb_oS3QoRfz62`L(o1Jh??9hcI9-D%0%tv$CX%9bL49 zxW&pHj1aWvx*3Vn*R3^JalGJw@RNZpP-}BQ~#| zQ`RqIj#nH#`z*&#JxyPhxUy!sx`(whT;CzKBjKs4xfaChdfhK#g|(6Hd_%c+11XAa@=n@Zm!za(SY+B7lDy?I+ za3qO@&SQ+r3H%DWJ7=)gkVNOY$rMraYc~Jxrx6SiAR0qeozVBMVCgl$!NA#OLx=%H z;eAs$s_ICs>l6TGiub{1=7uqg^Hy{GmSx9v!VLiohuUokHE-JNF4OHOJdmTBE;*Ek zd<<)LV#~5q(pA_PoXPc5CXix6eTg<}W7-@Bm-8rOG%u&h>}^XVjO{>f)8LCRX(xzo zY-YDf-_!=;(ImCTAy}@Gw;pPFM(pl1 zz*O1d+$U%?Ts2wtfJ0tuu?$DL19N|{q4$C%MAeMOwRL}a9d2?UsY99RZU$wmQ@8t(Tc+AT8%ysx(@HvGKwjZyr*yu zv2fmODNDn#pVRq3Aks<;eIWIYm3tEsh2XdNeuL>7QfdiFuvI}3msG2om8~hA;q2iV zMZKbUcFFnatDL_035u6q!r#A7c>a0ndv{o!tZ0&_sP8Z<3@J2NlW5CGSegmm4?D%fe}ZIL32ioElVQ001BWNklA*A#h@DJHDK;*3fo%<%C$>~kG&QQuKm@^|YH?b!flh=N}NHSnj_NxLWk_cc6gt=zl(_fnBLJXd& zs@0#Ali;0G8mqFZsLF!ES*&q5XDN(Te@@Cdbvg4tX4oi(KxrMm^ITYAUX^URo~CQ) z{ES(EE>uXrr8|9v*j_;25;j{%o=AhEpr~dPwxlRaih6+~;z%@IpqKP#a}F_{7<-E7HNr3ruRG^bzTp83 zucJNbf`Z8+&CrKT>leeGLThEvrUnpicbw#iIT`6@i}A!uk*ENVKybghc#Q@qNQ{`w z)-Z0ulL#rj{qFDSGfpjuE7sB^|6_<=6Fp$J9ayUs5-05t?aOszHofO{%;AU5d!ySv z58V*eIruJKTaewRUwcPPgq}X?uFB;VkCfL*R0#b@~P*zID5eQ!6yjuK0ZQkOO8W;Wkc`h%x5)KIU|Tr z&Q@Rqu>n(6xN=T372+fPcEhZm6GeIay!TjJkm#r@$K8_?Y78^q@aoo66$UmD-}X3X zxl_zIUvDu6N-NZb<$T@HrwFdZxssGRM0ORfMZ(_vGDf3=OKY_ntPv7cixAEVrTL16 zkT$wn^7M{qG-U8Sk~$KR*yp<4x$L-d4ZOiHUGDa+FFLf>BandIE|OrcfRY@rho&%8 z{x{_R${xs;7p+lz!6uEHnauB3EMIUekwhX^f|38{Q~^ z^Q0Eq*ylkr(wFiEbmY>{D_;y_ap|3ZDz_X2>tsAHQ~3;UXov>XJ7?uYa_JJeULZ}c zdFXxL^d7H%ryz9_p{mNsSWU-7z+@KZ3=wI!YBc2>20L0d&=4{zk5PCThT2a zW84Gc#~;%dbB>;UEA94z&1Q`}I){2j>9o3SkrEPmVx5>B&+%=GwSj0YDa}C)si-Iy z$N07H=LjNXW5gggj29(Wjrfm;Of@494mp}%H#Y+i)MYR&fPMmG8$4r~D9 zCXsemxg5y%#6b+hbw&Ga^53r+z;I39rh@0P&*)dW4e*Pk9gY2gxPdpk*_>Z$?Oz14XvKsZ=%Z`5+*X$v`=H2Q)iX6J|kuQG+2xB1MBW32QaKMUv8BT~S|+GTg_M@G)wa zOrh&52`Q)rW(BY@E0<0oIV{*xOCCh6qmxq5p9VVdI4{^%7wn@xQEYpx-BQFzwW*oa z1=YMJZnQ>EQJ2IJ!6Z88sBMXpHBD9FW|sQtdn{&0*yWNsbgb4L{?s$CSJ?TAJ{{ zR7WQiXC1DJ6asY;s>;!?J6vI@E62L&=~Do47%b7myeAm!5bljyLz1l*K~qe~v!Sd? zs^tn}Av6urcB7S(M1~9#r3hkB#x?)bTIHxTWeL_$xRNL>vHukoMsJi>K5q32+>e^u zp8YTTCQ~Af+sh6V(|>UkPD9QFjG3^xYUL0#Fc_J7%`sAS3~msvLMqq6X?w>^%4jDB zUG{eS^L6B&hyH#e+9BraYnx1~&+f)(9FI?>ds7$8rE86;9g6aVfeYN2~o3L88;Pdq;Ns=Zc!e@d?|Di`=&XO3v)~CSqLF0-po?JbCG$+kS*tyS=hISXuXd)&Wf}V_8UlB`LprvI* zh)FaZBN}XxDVt0p^#LCuG3Yx8eI$jLOL;riZBI-cQ3Chtxvs0w;hP0V%aY|1ipnWt zh}Vh$wkk1&CUsiz*d`IzTSQviy}R6b`wQH=^AxibnXi@v+tbD^v*m)u&k5Ux_2vxw z*b)7b<-Ecaj-s4niiW1DFcxY7Qxv44#H50dJhSDJ!dRk9NV52@rKk*DlaQ*!m2+Hg zC`OeDJkS14D zX6Yvg*tpW+>GiW<{w1d+%@5kn3MZu1iyTaHq z%5p0ST%iT{Qp#b%nK0I<<%Nxyq!69OWtvQfNF*8qg{3h0?MOxjjPf>$Bqje9s{mm# z6h`%K1X7H|5Hs_K5IjV2IO_( zzBcgq(J8M!z93#)FgtBHTGSlBX~mm<{X2Qn+diFpC(n?!5wqP;bpcx}=&Qt|i#0I> z?8y<~sM4G)ZwP%yAW<()h}$)@r{*Mah;K*^WnJ*1WxQ=9rj8&z+xC>udrVyrQy}(R zT+)zg$@`H=V%v5sstS^_AcQV3D`tqOI&QkC7Lb}*IiO{~1j-O)heVPwxU$3)B_{S7 z#FZ<}jJ>M{qWsuj^|*a z{t~rB+GA39W7@u8ygj_T4>ezvbarywfx8LRWldTh6MPQ6o?bmkPShMGqY{7WC|HHz zQbLBXT^uHdOsV{FNUn@&OsOzl42a1BCaF>~);tq%cDELdksQ&bsRNRdQVmI_U&>>Z z$YpAx=D78~BZLeyr9=$Sgbguuln#Q8gdRHY*|vf9sHc|5u`?Xo3OBb@MZkuD-!?ot z-SSf(f5#QN1&*giVPE@lV?k}PMB z&M87-8CQsTM4CtDeZW-}U~nc-x`>y6lf-u0k!->Ri)k;2;R2sJy!0y2iL~bpeM+QO zOGBj65gqgdEy->T^SYwx6EQ?8VCE_=VvEHp#Fs?F?DPB1#0>j^FEPjIS0$mWu|@LALMhd5NP>CH^K#J-jNnJe);Oe-T1HTK}Pm_~16w2Cd zJws^ZE*LLey2|SkfqPBwVbv+7u`vgI7&lzunvZlNQ@y}}q}vM`rhw{xGimZ!nrzRP zL)IC9xdHsVezs1(v0SoV-;j1)RGeWfv%v0sr*wIQ4voasHuwNw;;R##Kh`;xt7B3U zwvS)Nx{72i&J+rLiH2)SGD8}sY9u78gMMi zn(*Ku4?h0$ESD!##uNH2U5ZEn-Z^~gNmxSY@O{u^-w;U2ki_C+rwrZJAkNVGp4IV^ zzKImh;d?(8yt77$k_`r9_w&}AF$&`)p{Q5b;Z{i9YO=!g;OsIxVALX>oK-Qxp!HblD8r)^3UCh$d$lc@=W4qm;;H z&@Bl|G`mPUH*6NDrbiO0SwTIYT}jmKeWK|)Bo3g@&UcbT_N)tq3-+6cP!$#Pd5xJQ zjST`!_X1_~_HhFC-=ghXR))o5Mb~fCnSAT3sHJJps!jQBnWhEOS5f; zTi&{l#u?`GIZfL^*s)Bgz!pkZP*oL8)9iA>l9n^br7u(V!)=Jm{fU7nMhNU|CJ0VMIre_feO7$RzF&3DJEiewi_e#Xy(5q z5l93P`V{GdCrZdE)w+&Ci1-NNBOxS~C8QK6VuNc9C5Ce>4~q?TGp7^J$6h?+esi<# z-T|Kk+E+SE|1z{a_ZBlwo_?NUIVaVyxHBikr)f89{OLn39z7<+L?kj}K?L0Dgj$4I zS+e$l^OrtBzun-TeGAN1gb=V*LEH9VEy-A-Nd&Rr96m%4t9ekS$5_j8&2#8rb3tfgMw#m<(*^#z3yD!0JI9x1gvh3`8k z3yOM0>@HNP5cLxds&o{=CPB==Uw8Zc>Gs0NB5XL z?yQSTw;z&FB|~TV-6=yu6jcllAS%E43x6Yj@DKh0sLoO<%~L{G)$_AYM&nVTASs_?e&mFyH#E-^xob zzjAvs_)_4*bI&}(U;PW;!gqbo_wa*1{7s+3G`{19{_lUrU;As{L6qAU*-F64>V*IPPyBKI`gis{}BC%^qa|LuJHxBm?upPt_0`MDJaD3V4;?=Mvoxe5jD)`dKQfwu398m@V{?;=83 zmdsWQHtVhWVax)o(}+Q9Fs1~PeLhCZBv@OFF&UOfluoa&Jw`}G0UIJEmLM|vcXv^u z(|!PwFt*V8H#?UtVwAAhNF+uTNP=h*XM_+EeTYP_g(W3FXA&XGE=@xKywDDCR39it zUC1zZ5x4C*ZF}xFdiNPn<>#t_IZ#zll$O*b%1kVBsk&u1i)+kbu>qV(Q$bm6Ln@F=c z^_@G2jrhL7ni9e3F=$1DAg~IayCTY%{)@L^71~+Tzm;LG<|=V-&=sX?0h_=f3l^`L%C*JAdul zzn#A8Nl9?d^1=%*@Q44*rbp(Z5eu@Kejog^Z{yWaWP zyyG38#h?2N-;zr}Sgcb`Qn0-A%6%@@YkjU;j^prHDJ9C%@eN=1yZNFo{!J`Tj`+!+ zejhJ?^5wnSQq$uvH=7~M^Yxk^`|+RPV;}o@M1;?I*SmPnSAGS5`CGq@P1g`o$A^FJ z=ecd30gUk?%;!sf&+mE<-~SJPkdJ)qBe_$%_wnhU{%QQqFaI+B_TTw#9-o~}-t+JP zCWk&wt}IvI=b`pm9Cc3yXC7+%b8%Wn>EO}V-eVd$XePf85a6Uf6PDTW(}eyL7#}h7 z0d`XF92OOB)xcPV`l4vPtD(GH zaDmzw>eBKSNmNN#)dj1nz*)nhDmj|Zu=5!qc{Zo#^k*AF2Q*-g;m)&fV)fjoQ9pGL zo6c$1r@ZvOpW^7bH?dfhT-Z*NMT>$iL|lSyy&(pJbp_SY9qjQ5?WSepJynq?N;^hry+^zI^MJ?)%|en>k&jxMnSU^mG7FRIrZU>e2D-3yZ#QRXG8rW#lXM&d%l;y^__p6Km6bRH~bHO_Ak);UNfL2 z?}x)XGP`UKj37~~!GzfJp^tuu|MhSDZ}^}Vg~3piN(vicEIVO#JJ8KTGvp#6eAe6F z#_#`q-^8E!)Bh7+`whRBFZ+@&<~#oOcaNQu{Sy!7L7C{exCs2i|LcdbfcKs+`I6tt zmweH0;je$^cd>0Z>O0tT)?f1;MHkIb)*vRpB(3X}Pt|cSBx9(Z!x^XwtEH?}O-9Cd z$mlz?X6Hl|@?82=h%m!pC2pE3aFzB*e_E^I(@Kh<51-Nw4H$@W%h{4U&%c$&FMR^U z;fkdO6qo|6Rmx$f#n+rINFkEM;H+-yDJesV@x!g(R)HgLE4aSh8o9UD4_65*K zVLqF2_wF$>A240RtX^<;F=vM6RxOH(qAuzChK)QTlnO^aeR9I*e*VA5v+wvUW+yA^ zS&1p1V*ZYI(YJx#H!R=!JTiMscfMiUxA@8tIw+lEUM#Scq3<@N=m=2|V=0m$_K}zp z{ko->PJj0S-vyd&&&Ky`+Madm*@nPH=)pPWrQuQ2A{eUD(KZcrp$lgdI#pm}#2HIK z$HNWm^9~|`YH^o(d53nhg*H%C6)vIGlubqGQ!a}ReY?f?TO`E1SK58#$f#(FfGtZ* zl#!$Gwb~OD*VB;aD`cT&n3lVCS#vy;ka_E}3FGxLtaa^vCI^M$^?5Rv@iz?&kfaNF z3Zq<=?`j3WZm zolx^W?wEn6@5yFI{teS=fE8|1a1P~0+*0P>svWd8^Lx9(R6gnFRVelFd$`<;F*&LY zOtoZn_f4E_w%DS^P-g#)MwseqHXsSb zz<2{~!lpn`&!`ti6tfwnNz99ad0A2i!H0&Udrwn4$DKDn$5U^5mS>)Op88}(J##n$ zZnh*GzmjM@`^-{PFb6-R`w*-f%MD@4daEskh*b70RGYL)rH*&1hO_yQ!{4x+!MFw&@- z;if}0#&z(*RM1R0+osEl1CcWl0 zWZ7NfI)ih!fMMsDDEOB@@&DofNaQo$@tOSWM?b8P(KMWQgj4ca)*?PauBY^l&wLxd z_v^oglyr3=w}0n%^62alfA{Zx zFTdwKU&R-E;TQ7Hf9(Gm1<186Z*0@YFey)Bo!)Q(x40txE~A z@A;~C^RCbR?403W;G;$Weg5bD`^;BME%T95Rx1XOPoW`T0%fM%-}$Jkj1}q~83g%s zGu4fM0%Lv(lIc**sex@9*=MtUtvX=(^-jrAMak~!c@mJ3#k6@^$%@hv z%F56_I;HD-jB|tll!mZvwVJFG+SDOwNtFf$1V;)9>*ka*$JzP}?>#;xn%1+u*b;*f z`b0>HWF0ZW%;k!+DUd9h7@Xo*@mnh%qiBzSbsD_lj4F`j@`$RcIJfiZp-u4-v!3%GBGk@S4zm8cwXS?nB)BoLj`RcFxMjkzWbSo6=s+0UuzsxX_yK1dd zY*~*ndnU&;hhd<5`jRjDjePcJy;C#f3rCC|W8kfC`81Y`1-ts&1M`3|I9zqMCWa(X zksU+CJ}Ws)1$RnX0$u%p`wGpVh&_pAzq`-j<28pS?T43}T_0bIxOG}mY3OJEcDQ@i zuN0=Wg_c(7+s8;1u*~UWkBxzpB1KHt&=68U!~X?~S8F$^kQva0NSeGP2IyKe8cVD|NJxk~^doDD##vn9)R68yf|e-{X`F5tF%;HO&kV*z zssQtv@Ia*b$76r@WnyOqu619qsPKfgb zwpo+<7AqB|T;Zjs?;S_aK1J{z*A@7XXgBIdvPLjIU=0M(%Cehg12}A1VbDCHKtPPa zcZsI$m64*-9Y>6{9xB^pD)JJzneRJy| zt0xmiNu{8*hlx0!WN}`$l&-tZu*n%y4xe1Q(#=k?T{(fPOajxhB$1O|?EzYteRFiD zFbh6t!kLq))@(RAIpNO938$~#$Hg7}kI0VFC{2V$O#4p$k9_3k`CVWC`)J#2I-3IT zRamCUrh2GNpL6>A^UpoYH+|FBBQfxszVM6qg8w*Soncm$eA+uclmFzmegWV6eLt`l zw{eIH=d!R%!*dNarch!~&GMgW^t;V<1Fkbc2VtY|eL_OZ5*uX`MTK*{^K?7){Yt(lWJd_#&Z=#O7k6l*I^0*=@ z(qseK`Sxbt7F$xFn=3PWmRASb&^Hry;$eFUFC>+kH;t)*0 z7^kHWqAIA-=(t*xDAd&zGgmU7*DU4>s(DGWfm8;p7^+lL)-$!f`<4sokXem$C!E|p zrYZ_jb3t+m>%{4xVE_Oi07*naRBEc~7;7rdU;Zdn7je%%OWC(c*JA{E^k^k*N2moNUq}O*-xXG*K8lYN@Xou05=f}&OuNX1{(q?B;4{I zUEg7PuLwj!BQJ{?sj6_c!c|ME#WCUR5x0$_ePYXqbh99NJ#FeBzgkmgCkU)PwD|YS zEbQf2yhfJ$bSX`?fyw{%D7!CIKJkqzJga?e2MqH?*mGoA-iWC z_=E;Rat-yhJ6=r-V&TDqGn%%+U|21VurAjh$yw~is*`cAPkSSip&m&%Yxv5q{Bq7N z)_lc(`TP0BPrgdmo>Ny9MN#rU{;Bu!P2cnl{K!xIOJ4ot{e$9t6g~sqFM`Er|3x>e zC_=OuO5v){Y3S?p9gpviVWS%=zB;x(@OXW$UP}QaVao#FM@-DleWT-%vJYb4N)slk z0A{beM&fF!R?6^F!k9Gr?xxnz)j)!q{7?HZ^-vmopk~?a?E5Oup(|b=b!QkN>(XEq zOs=>SNcjn);9RM93LV%2E1LM2k|#;7OT=KY+p%BMdkz9=0xU8RaNg8HOvFNAok5U_ zh$eL!o0aYCgG#~>0;VN7K&)1U9sbjICY09Dr|h?|NueX>z$u$RS=6|BO){d4-w}G> z<5Nv%ELBy|6WKK9cnh;=*_=D>e)(et9PoH3$ip~bSfbkKDi3{J55Slh%k|jjX%+?ycQ`e*J$bNC{Fzd{iG?l(7(`F(}`lL={9Cu@++}<_qR0&(rtk%q={4E7Jpa>O6t_kI;8ObfFs$<$>cJpMIv|lxJK(FO{Vo0F7 zWg9n$0S+5a*2T3;>)2gXaq8?fF^4w<{Yn>LT~(Z%oB+jW0^hlJhcA2gf5z|r`uFhO z_x>p!o?ehJh!M&`vo-3|NEQ$+zpQYaj9ce&b;R4>{sP8W?%lmdRaYc|?Pg2cb-euQ ztF%qKH*S~V(qVIV2HyT@Z{-jE{{Nak|CjzUANbjyrEN2{yHPFbhky7-`RcFwYQFf3 zehc68-G6@+62s=Zr|phT;a|#?IEVfo5=28gTvv$E`Wb)5EWl&h zn_yS0u0w$d*KYE{08yI!K1BxDa__3yI!su%6EG555+)1CwEqI!aE*aLRqr(fAPmXn z`N^;A;9l#!P?d(-2}3I&}NA>3Ig9a7Tp=W=d&=50&*_JVj0p{tnnbG-A2bCjWDeSS{uR#*Wag;as9 zEwO9pH!U$3IuGkE(DfaqFOkAQVe!7hM}v=PL`=j+@CgZ8#z8G1oevnv#qdC!P|ok* z?mR~ffv{~TV^YeYq&~s&>8DuTyF=Tw_^zYPT3g|?pwa*xZPCob7{W`ww+rAAS zRbwm4k~>F76s6@a{?Gpl|M;K%^PPYgG!=VE%S^h=EvnH52PsD0{Or?w|KI-}PEPK! zTI{q7L3rimm-#c_@~!;9Kl&kJ-{xnV>{MbH_%0E_8OQJV(sy%qcER`mqkoKuFsluT zS|Jc4{Njr*0`Omb{WtOt{?8wz>keFZIwrFJCl*fbp5%zCDReVfJ}Ll)G2AH&`?EFW zghny2ZCXvN6Q$J`z$Hsc3N3X_J8I>l1;a!bIH&N0u{Um*(?Eq?g#Ug@!8DNIGBPj~ z*u!TBqc{adQmIReR73S|vgGA3LVe{qc6~P!KbZ_pJQ5g#5jQz6M^O&@zR8fF8%l}~ zWVd-7CFJv_5mkrVB~U@1HR`5Sxe`PnL~CXA{a$6l*Pla*h`dijgRyxvJH`0a3mZ)sJ|(QLDRZ&{$kRCq@{KEWOh#2qyNJvCHF$SU-irEZFkD2C*9?#da( zY=J3yqQAIteoWgF3!}^0_igfk4h1+|u2^|pd%ZB=;2zioH|Ua7pWC3}PkkQ~crPDK zhJ`No&=|u%`KLdsk$0}j=Om1AJbd^nKlT3i@pB*f7+o86fHiCgi`7ukqu2pZwNl2=exc?=F;y|bs+hKI037I zMAzl8Dr96?P^)JrkoLwBwKBEByCd2(I`fS&l!YdmxuPKW4vZ#_x}s3}BGD9tB#v#< zaJIhS-qDQ8dyEl+725ThXBJCLd&Va|^blJ)V$rc#N1l4>X=d>lVbnH>m)2_D^red~OVY@ZXI5y%)W`=}w3ab@$gHOcJ zBRSdG7)$f`5k={Uy++hYiUgA|E@@bKQ7HSjGj!+o*`7bT^>lq=Vf3l^h;DKQztL@n zMVstc1!G;t!^c%W_GM7UthLGscUkkFVE@)yKKQ{8^T7{(c<*^KdQFHC!;rVJGr){t zsKY%rF&ds~3kyC{h~Z~{=I8jCfBQiI%Ch9>Xocw$ZSr&>@a`}DPx)=X?MtX@N3$Nc z8r?pGG13W8r*%yH#E<{K7z64lFi3`7B&oo9e75F?e&k2^4gcZivs$fq@ZiBNvDxfH zrs}U6ED_z%JkW)nmtT6BzHc#?u;T09{dK;suG#YUzV8Rd@f*%@`C1=;@e_Rf#ZT;6 zBhxoFz-tLcrQ_;dE`1_$(4sly-+gbCD#GC{V}@YlRRKE;D2&iBvO3 z*KR;ON5=~`ttWh^y}jV>*;}}CvLqQtRcxti$D&HaAh6wX+67YSuzpME zTD8PniJKk6ofFzuU%@(IR+sb-4gLKKWtOl?SuES0=%aGG6;2^H?-3kbAMnwr%Od4> zYd~;BWAIMTEsSC2N`keBvqW$4B%0o1XEO>{BcUaDkEs?kr;l>Rh+3CEcw$lpb8Ady z@J^W2({FddBm>;-ijtGN&jN;IEvb3T=`Va3o`B!+mldXaJ(W>!(_Y&;(}izfbgYxL zZD-7tZRs){%y9hB4p|At4Al7QB%kWB7LcjWAy>m3qo>Ry3T)8m^Dlkh^Z4N@Atjm| z*8G8=`2b)0weMj*n=!JoiIB2DbU7+;*n9?S9bp$FG3lQ{2qEz3(IZkyoE#lpo7Zx1 z3`D~_H`@&%PrlcahjZOIw^dxG*2um!bqL=&e6Pzv0)vIUvjXfv!EsXr*$@G?GeIVT zWYUePAiObr+zgh>WR8x1UKP$}68JN2?%4{zdi5QAx)(i{A-T&;^*1%S^mE!$(n6M{ zb=eo3wRazKI-ZOQo-sBvd}kUh8Q@p}6A?6YIRuYL)aX-ZGXq31S`Wr5jgU8<${Knp zsG>4M3O$vXLsinJpbX)@qb@8>pRiq`_dV5e!TncX<@B*(_22<_@7<%EIi5pkw;Rf+ zfdnj9#4d1gddkuntR%X}4YTL(;fhFEIHW9yJ|H2|U2KU`D)iNQEw(3-SXfe7kY+Wp z6?}+v!P7@AESN}$g+QiOicV?GT!q*5S7^?)0F^xF+^mckZVF&%}a#W^$JE%S9Jw}PQ+N%94yx1?1KlBv)Gmd`eWd#N}P?`DlGo1YT&jjZvXBDn66v@&wJ+`j!F=CUBMUa4)0&#|rdiogYy{GNH z@`aANLqwtwI3`ppd_Bk8n%+a-Gz9;c+9is`g4Mkf3h6Q1GuCZOwd^pVqbw3dS<>gw zcx!Yv=pg5OqQt1&8EYtAO`km0IJ&;2TCV8M9?+eiag)+_s6CM|8Wg$ZXDG4hUBw`5 z+OLoXxu={U^f*aI-)WoU5>>&;JLtf5%02z?7F?j9}s z*L3VIMb=@m7cvX0EB?2^wO~AT?kK