Compare commits

..

3 Commits

Author SHA1 Message Date
sijie.sun
ba3f36d22b remove lock on pipelines 2025-07-25 10:46:06 +08:00
sijie.sun
78d8848ede fix cond of rpc encrypt 2025-07-25 09:13:42 +08:00
sijie.sun
601a0bf719 remove lock of routes 2025-07-25 09:11:05 +08:00
178 changed files with 1892 additions and 6057 deletions

View File

@@ -19,10 +19,6 @@ SYSROOT = "/usr/local/ohos-sdk/linux/native/sysroot"
linker = "aarch64-unknown-linux-musl-gcc" linker = "aarch64-unknown-linux-musl-gcc"
rustflags = ["-C", "target-feature=+crt-static"] rustflags = ["-C", "target-feature=+crt-static"]
[target.riscv64gc-unknown-linux-musl]
linker = "riscv64-unknown-linux-musl-gcc"
rustflags = ["-C", "target-feature=+crt-static"]
[target.'cfg(all(windows, target_env = "msvc"))'] [target.'cfg(all(windows, target_env = "msvc"))']
rustflags = ["-C", "target-feature=+crt-static"] rustflags = ["-C", "target-feature=+crt-static"]

View File

@@ -10,24 +10,8 @@ RUN ARTIFACT_ARCH=""; \
ARTIFACT_ARCH="x86_64"; \ ARTIFACT_ARCH="x86_64"; \
elif [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ elif [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
ARTIFACT_ARCH="aarch64"; \ ARTIFACT_ARCH="aarch64"; \
elif [ "$TARGETPLATFORM" = "linux/riscv64" ]; then \
ARTIFACT_ARCH="riscv64"; \
elif [ "$TARGETPLATFORM" = "linux/mips" ]; then \
ARTIFACT_ARCH="mips"; \
elif [ "$TARGETPLATFORM" = "linux/mipsel" ]; then \
ARTIFACT_ARCH="mipsel"; \
elif [ "$TARGETPLATFORM" = "linux/arm/v7" ]; then \
ARTIFACT_ARCH="armv7hf"; \
elif [ "$TARGETPLATFORM" = "linux/arm/v6" ]; then \
ARTIFACT_ARCH="armhf"; \
elif [ "$TARGETPLATFORM" = "linux/arm/v5" ]; then \
ARTIFACT_ARCH="arm"; \
elif [ "$TARGETPLATFORM" = "linux/arm" ]; then \
ARTIFACT_ARCH="armv7"; \
elif [ "$TARGETPLATFORM" = "linux/loong64" ]; then \
ARTIFACT_ARCH="loongarch64"; \
else \ else \
echo "Unsupported architecture: $TARGETPLATFORM"; \ echo "Unsupported architecture: $TARGETARCH"; \
exit 1; \ exit 1; \
fi; \ fi; \
cp /tmp/artifacts/easytier-linux-${ARTIFACT_ARCH}/* /tmp/output; cp /tmp/artifacts/easytier-linux-${ARTIFACT_ARCH}/* /tmp/output;

View File

@@ -83,9 +83,6 @@ jobs:
- TARGET: x86_64-unknown-linux-musl - TARGET: x86_64-unknown-linux-musl
OS: ubuntu-22.04 OS: ubuntu-22.04
ARTIFACT_NAME: linux-x86_64 ARTIFACT_NAME: linux-x86_64
- TARGET: riscv64gc-unknown-linux-musl
OS: ubuntu-22.04
ARTIFACT_NAME: linux-riscv64
- TARGET: mips-unknown-linux-musl - TARGET: mips-unknown-linux-musl
OS: ubuntu-22.04 OS: ubuntu-22.04
ARTIFACT_NAME: linux-mips ARTIFACT_NAME: linux-mips
@@ -192,8 +189,6 @@ jobs:
if [[ $OS =~ ^windows.*$ ]]; then if [[ $OS =~ ^windows.*$ ]]; then
SUFFIX=.exe SUFFIX=.exe
CORE_FEATURES="--features=mimalloc" CORE_FEATURES="--features=mimalloc"
elif [[ $TARGET =~ ^riscv64.*$ ]]; then
CORE_FEATURES="--features=mimalloc"
else else
CORE_FEATURES="--features=jemalloc" CORE_FEATURES="--features=jemalloc"
fi fi
@@ -229,8 +224,8 @@ jobs:
rustup set auto-self-update disable rustup set auto-self-update disable
rustup install 1.89 rustup install 1.87
rustup default 1.89 rustup default 1.87
export CC=clang export CC=clang
export CXX=clang++ export CXX=clang++
@@ -260,7 +255,7 @@ jobs:
TAG=$GITHUB_SHA TAG=$GITHUB_SHA
fi fi
if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^.*freebsd$ && ! $TARGET =~ ^loongarch.*$ && ! $TARGET =~ ^riscv64.*$ ]]; then if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^.*freebsd$ && ! $TARGET =~ ^loongarch.*$ ]]; then
UPX_VERSION=4.2.4 UPX_VERSION=4.2.4
curl -L https://github.com/upx/upx/releases/download/v${UPX_VERSION}/upx-${UPX_VERSION}-amd64_linux.tar.xz -s | tar xJvf - curl -L https://github.com/upx/upx/releases/download/v${UPX_VERSION}/upx-${UPX_VERSION}-amd64_linux.tar.xz -s | tar xJvf -
cp upx-${UPX_VERSION}-amd64_linux/upx . cp upx-${UPX_VERSION}-amd64_linux/upx .

View File

@@ -11,18 +11,13 @@ on:
image_tag: image_tag:
description: 'Tag for this image build' description: 'Tag for this image build'
type: string type: string
default: 'v2.4.2' default: 'v2.4.0'
required: true required: true
mark_latest: mark_latest:
description: 'Mark this image as latest' description: 'Mark this image as latest'
type: boolean type: boolean
default: false default: false
required: true required: true
mark_unstable:
description: 'Mark this image as unstable'
type: boolean
default: false
required: true
jobs: jobs:
docker: docker:
@@ -32,13 +27,6 @@ jobs:
- -
name: Checkout name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
-
name: Validate inputs
run: |
if [[ "${{ inputs.mark_latest }}" == "true" && "${{ inputs.mark_unstable }}" == "true" ]]; then
echo "Error: mark_latest and mark_unstable cannot both be true"
exit 1
fi
- -
name: Set up QEMU name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v3
@@ -68,36 +56,14 @@ jobs:
- name: List files - name: List files
run: | run: |
ls -l -R . ls -l -R .
- name: Prepare Docker tags
id: tags
run: |
# Base tags with version
DOCKERHUB_TAGS="easytier/easytier:${{ inputs.image_tag }}"
GHCR_TAGS="ghcr.io/easytier/easytier:${{ inputs.image_tag }}"
# Add latest tags if requested
if [[ "${{ inputs.mark_latest }}" == "true" ]]; then
DOCKERHUB_TAGS="${DOCKERHUB_TAGS},easytier/easytier:latest"
GHCR_TAGS="${GHCR_TAGS},ghcr.io/easytier/easytier:latest"
fi
# Add unstable tags if requested
if [[ "${{ inputs.mark_unstable }}" == "true" ]]; then
DOCKERHUB_TAGS="${DOCKERHUB_TAGS},easytier/easytier:unstable"
GHCR_TAGS="${GHCR_TAGS},ghcr.io/easytier/easytier:unstable"
fi
# Combine all tags
ALL_TAGS="${DOCKERHUB_TAGS},${GHCR_TAGS}"
echo "tags=${ALL_TAGS}" >> $GITHUB_OUTPUT
echo "Generated tags: ${ALL_TAGS}"
- -
name: Build and push name: Build and push
uses: docker/build-push-action@v6 uses: docker/build-push-action@v6
with: with:
context: ./docker_context context: ./docker_context
platforms: linux/amd64,linux/arm64,linux/riscv64,linux/mips,linux/mipsel,linux/arm/v7,linux/arm/v6,linux/arm/v5,linux/arm,linux/loong64 platforms: linux/amd64,linux/arm64
push: true push: true
file: .github/workflows/Dockerfile file: .github/workflows/Dockerfile
tags: ${{ steps.tags.outputs.tags }} tags: |
easytier/easytier:${{ inputs.image_tag }}${{ inputs.mark_latest && ',easytier/easytier:latest' || '' }},
ghcr.io/easytier/easytier:${{ inputs.image_tag }}${{ inputs.mark_latest && ',easytier/easytier:latest' || '' }},

View File

@@ -29,7 +29,7 @@ jobs:
concurrent_skipping: 'same_content_newer' concurrent_skipping: 'same_content_newer'
skip_after_successful_duplicate: 'true' skip_after_successful_duplicate: 'true'
cancel_others: 'true' cancel_others: 'true'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", ".github/workflows/gui.yml", ".github/workflows/install_rust.sh", ".github/workflows/install_gui_dep.sh"]' paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", ".github/workflows/gui.yml", ".github/workflows/install_rust.sh"]'
build-gui: build-gui:
strategy: strategy:
fail-fast: false fail-fast: false
@@ -78,11 +78,20 @@ jobs:
needs: pre_job needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true' if: needs.pre_job.outputs.should_skip != 'true'
steps: steps:
- uses: actions/checkout@v3
- name: Install GUI dependencies (x86 only) - name: Install GUI dependencies (x86 only)
if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }} if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }}
run: bash ./.github/workflows/install_gui_dep.sh run: |
sudo apt update
sudo apt install -qq libwebkit2gtk-4.1-dev \
build-essential \
curl \
wget \
file \
libgtk-3-dev \
librsvg2-dev \
libxdo-dev \
libssl-dev \
patchelf
- name: Install GUI cross compile (aarch64 only) - name: Install GUI cross compile (aarch64 only)
if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }} if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }}
@@ -119,6 +128,8 @@ jobs:
echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV" echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV"
echo "PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig/" >> "$GITHUB_ENV" echo "PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig/" >> "$GITHUB_ENV"
- uses: actions/checkout@v3
- name: Set current ref as env variable - name: Set current ref as env variable
run: | run: |
echo "GIT_DESC=$(git log -1 --format=%cd.%h --date=format:%Y-%m-%d_%H:%M:%S)" >> $GITHUB_ENV echo "GIT_DESC=$(git log -1 --format=%cd.%h --date=format:%Y-%m-%d_%H:%M:%S)" >> $GITHUB_ENV

View File

@@ -1,11 +0,0 @@
sudo apt update
sudo apt install -qq libwebkit2gtk-4.1-dev \
build-essential \
curl \
wget \
file \
libgtk-3-dev \
librsvg2-dev \
libxdo-dev \
libssl-dev \
patchelf

View File

@@ -15,8 +15,6 @@ if [[ $OS =~ ^ubuntu.*$ ]]; then
# if target is mips or mipsel, we should use soft-float version of musl # if target is mips or mipsel, we should use soft-float version of musl
if [[ $TARGET =~ ^mips.*$ || $TARGET =~ ^mipsel.*$ ]]; then if [[ $TARGET =~ ^mips.*$ || $TARGET =~ ^mipsel.*$ ]]; then
MUSL_TARGET=${TARGET}sf MUSL_TARGET=${TARGET}sf
elif [[ $TARGET =~ ^riscv64gc-.*$ ]]; then
MUSL_TARGET=${TARGET/#riscv64gc-/riscv64-}
fi fi
if [[ $MUSL_TARGET =~ musl ]]; then if [[ $MUSL_TARGET =~ musl ]]; then
mkdir -p ./musl_gcc mkdir -p ./musl_gcc
@@ -31,8 +29,8 @@ fi
# see https://github.com/rust-lang/rustup/issues/3709 # see https://github.com/rust-lang/rustup/issues/3709
rustup set auto-self-update disable rustup set auto-self-update disable
rustup install 1.89 rustup install 1.87
rustup default 1.89 rustup default 1.87
# mips/mipsel cannot add target from rustup, need compile by ourselves # mips/mipsel cannot add target from rustup, need compile by ourselves
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then

View File

@@ -21,7 +21,7 @@ on:
version: version:
description: 'Version for this release' description: 'Version for this release'
type: string type: string
default: 'v2.4.2' default: 'v2.4.0'
required: true required: true
make_latest: make_latest:
description: 'Mark this release as latest' description: 'Mark this release as latest'

View File

@@ -28,7 +28,7 @@ jobs:
# All of these options are optional, so you can remove them if you are happy with the defaults # All of these options are optional, so you can remove them if you are happy with the defaults
concurrent_skipping: 'never' concurrent_skipping: 'never'
skip_after_successful_duplicate: 'true' skip_after_successful_duplicate: 'true'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/test.yml", ".github/workflows/install_gui_dep.sh", ".github/workflows/install_rust.sh"]' paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/test.yml"]'
test: test:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
needs: pre_job needs: pre_job
@@ -89,24 +89,6 @@ jobs:
./target ./target
key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }} key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }}
- name: Install GUI dependencies (Used by clippy)
run: |
bash ./.github/workflows/install_gui_dep.sh
bash ./.github/workflows/install_rust.sh
rustup component add rustfmt
rustup component add clippy
- name: Check formatting
if: ${{ !cancelled() }}
run: cargo fmt --all -- --check
- name: Check Clippy
if: ${{ !cancelled() }}
# NOTE: tauri need `dist` dir in build.rs
run: |
mkdir -p easytier-gui/dist
cargo clippy --all-targets --all-features --all -- -D warnings
- name: Run tests - name: Run tests
run: | run: |
sudo prlimit --pid $$ --nofile=1048576:1048576 sudo prlimit --pid $$ --nofile=1048576:1048576

1
.gitignore vendored
View File

@@ -12,7 +12,6 @@ target-*/
.vscode .vscode
/.idea /.idea
/.direnv/
# perf & flamegraph # perf & flamegraph
perf.data perf.data

View File

@@ -26,7 +26,7 @@ Thank you for your interest in contributing to EasyTier! This document provides
#### Required Tools #### Required Tools
- Node.js v21 or higher - Node.js v21 or higher
- pnpm v9 or higher - pnpm v9 or higher
- Rust toolchain (version 1.89) - Rust toolchain (version 1.87)
- LLVM and Clang - LLVM and Clang
- Protoc (Protocol Buffers compiler) - Protoc (Protocol Buffers compiler)
@@ -37,6 +37,7 @@ Thank you for your interest in contributing to EasyTier! This document provides
# Core build dependencies # Core build dependencies
sudo apt-get update && sudo apt-get install -y \ sudo apt-get update && sudo apt-get install -y \
musl-tools \ musl-tools \
libappindicator3-dev \
llvm \ llvm \
clang \ clang \
protobuf-compiler protobuf-compiler
@@ -52,7 +53,6 @@ sudo apt install -y \
librsvg2-dev \ librsvg2-dev \
libxdo-dev \ libxdo-dev \
libssl-dev \ libssl-dev \
libappindicator3-dev \
patchelf patchelf
# Testing dependencies # Testing dependencies
@@ -79,8 +79,8 @@ sudo apt install -y bridge-utils
2. Install dependencies: 2. Install dependencies:
```bash ```bash
# Install Rust toolchain # Install Rust toolchain
rustup install 1.89 rustup install 1.87
rustup default 1.89 rustup default 1.87
# Install project dependencies # Install project dependencies
pnpm -r install pnpm -r install

View File

@@ -34,7 +34,7 @@
#### 必需工具 #### 必需工具
- Node.js v21 或更高版本 - Node.js v21 或更高版本
- pnpm v9 或更高版本 - pnpm v9 或更高版本
- Rust 工具链(版本 1.89 - Rust 工具链(版本 1.87
- LLVM 和 Clang - LLVM 和 Clang
- ProtocProtocol Buffers 编译器) - ProtocProtocol Buffers 编译器)
@@ -45,6 +45,7 @@
# 核心构建依赖 # 核心构建依赖
sudo apt-get update && sudo apt-get install -y \ sudo apt-get update && sudo apt-get install -y \
musl-tools \ musl-tools \
libappindicator3-dev \
llvm \ llvm \
clang \ clang \
protobuf-compiler protobuf-compiler
@@ -60,7 +61,6 @@ sudo apt install -y \
librsvg2-dev \ librsvg2-dev \
libxdo-dev \ libxdo-dev \
libssl-dev \ libssl-dev \
libappindicator3-dev \
patchelf patchelf
# 测试依赖 # 测试依赖
@@ -87,8 +87,8 @@ sudo apt install -y bridge-utils
2. 安装依赖: 2. 安装依赖:
```bash ```bash
# 安装 Rust 工具链 # 安装 Rust 工具链
rustup install 1.89 rustup install 1.87
rustup default 1.89 rustup default 1.87
# 安装项目依赖 # 安装项目依赖
pnpm -r install pnpm -r install

42
Cargo.lock generated
View File

@@ -1876,7 +1876,7 @@ dependencies = [
"libc", "libc",
"option-ext", "option-ext",
"redox_users 0.5.0", "redox_users 0.5.0",
"windows-sys 0.60.2", "windows-sys 0.59.0",
] ]
[[package]] [[package]]
@@ -1979,7 +1979,7 @@ checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
[[package]] [[package]]
name = "easytier" name = "easytier"
version = "2.4.2" version = "2.4.0"
dependencies = [ dependencies = [
"aes-gcm", "aes-gcm",
"anyhow", "anyhow",
@@ -2033,7 +2033,6 @@ dependencies = [
"network-interface", "network-interface",
"nix 0.29.0", "nix 0.29.0",
"once_cell", "once_cell",
"openssl",
"parking_lot", "parking_lot",
"percent-encoding", "percent-encoding",
"petgraph 0.8.1", "petgraph 0.8.1",
@@ -2113,17 +2112,16 @@ dependencies = [
[[package]] [[package]]
name = "easytier-gui" name = "easytier-gui"
version = "2.4.2" version = "2.4.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
"dashmap", "dashmap",
"dunce", "dunce",
"easytier", "easytier",
"elevated-command",
"gethostname 1.0.2", "gethostname 1.0.2",
"libc",
"once_cell", "once_cell",
"security-framework-sys",
"serde", "serde",
"serde_json", "serde_json",
"tauri", "tauri",
@@ -2139,8 +2137,6 @@ dependencies = [
"thunk-rs", "thunk-rs",
"tokio", "tokio",
"uuid", "uuid",
"winapi",
"windows 0.52.0",
] ]
[[package]] [[package]]
@@ -2163,7 +2159,7 @@ dependencies = [
[[package]] [[package]]
name = "easytier-web" name = "easytier-web"
version = "2.4.2" version = "2.4.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@@ -2211,6 +2207,20 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "elevated-command"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54c410eccdcc5b759704fdb6a792afe6b01ab8a062e2c003ff2567e2697a94aa"
dependencies = [
"anyhow",
"base64 0.21.7",
"libc",
"log",
"winapi",
"windows 0.52.0",
]
[[package]] [[package]]
name = "embed-resource" name = "embed-resource"
version = "3.0.5" version = "3.0.5"
@@ -5235,15 +5245,6 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
[[package]]
name = "openssl-src"
version = "300.5.2+3.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d270b79e2926f5150189d475bc7e9d2c69f9c4697b185fa917d5a32b792d21b4"
dependencies = [
"cc",
]
[[package]] [[package]]
name = "openssl-sys" name = "openssl-sys"
version = "0.9.103" version = "0.9.103"
@@ -5252,7 +5253,6 @@ checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6"
dependencies = [ dependencies = [
"cc", "cc",
"libc", "libc",
"openssl-src",
"pkg-config", "pkg-config",
"vcpkg", "vcpkg",
] ]
@@ -7557,9 +7557,9 @@ dependencies = [
[[package]] [[package]]
name = "socket2" name = "socket2"
version = "0.5.10" version = "0.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c"
dependencies = [ dependencies = [
"libc", "libc",
"windows-sys 0.52.0", "windows-sys 0.52.0",

View File

@@ -44,7 +44,5 @@
"prettier.enable": false, "prettier.enable": false,
"editor.formatOnSave": true, "editor.formatOnSave": true,
"editor.formatOnSaveMode": "modifications", "editor.formatOnSaveMode": "modifications",
"editor.formatOnPaste": false,
"editor.formatOnType": true,
} }
} }

View File

@@ -105,9 +105,9 @@ After successful execution, you can check the network status using `easytier-cli
```text ```text
| ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version | | ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version |
| ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- | | ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- |
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.4.2-70e69a38~ | | 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.4.0-70e69a38~ |
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.4.2-70e69a38~ | | 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.4.0-70e69a38~ |
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.4.2-70e69a38~ | | | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.4.0-70e69a38~ |
``` ```
You can test connectivity between nodes: You can test connectivity between nodes:
@@ -302,18 +302,14 @@ CDN acceleration and security protection for this project are sponsored by Tence
</a> </a>
</p> </p>
Special thanks to [Langlang Cloud](https://langlangy.cn/?i26c5a5) and [RainCloud](https://www.rainyun.com/NjM0NzQ1_) for sponsoring our public servers. Special thanks to [Langlang Cloud](https://langlang.cloud/) for sponsoring our public servers.
<p align="center"> <p align="center">
<a href="https://langlangy.cn/?i26c5a5" target="_blank"> <a href="https://langlangy.cn/?i26c5a5" target="_blank">
<img src="assets/langlang.png" width="200"> <img src="assets/langlang.png" width="200">
</a> </a>
<a href="https://langlangy.cn/?i26c5a5" target="_blank">
<img src="assets/raincloud.png" width="200">
</a>
</p> </p>
If you find EasyTier helpful, please consider sponsoring us. Software development and maintenance require a lot of time and effort, and your sponsorship will help us better maintain and improve EasyTier. If you find EasyTier helpful, please consider sponsoring us. Software development and maintenance require a lot of time and effort, and your sponsorship will help us better maintain and improve EasyTier.
<p align="center"> <p align="center">

View File

@@ -106,9 +106,9 @@ sudo easytier-core -d --network-name abc --network-secret abc -p tcp://public.ea
```text ```text
| ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version | | ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version |
| ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- | | ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- |
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.4.2-70e69a38~ | | 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.4.0-70e69a38~ |
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.4.2-70e69a38~ | | 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.4.0-70e69a38~ |
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.4.2-70e69a38~ | | | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.4.0-70e69a38~ |
``` ```
您可以测试节点之间的连通性: 您可以测试节点之间的连通性:
@@ -303,15 +303,12 @@ EasyTier 在 [LGPL-3.0](https://github.com/EasyTier/EasyTier/blob/main/LICENSE)
</a> </a>
</p> </p>
特别感谢 [浪浪云](https://langlangy.cn/?i26c5a5) 和 [雨云](https://www.rainyun.com/NjM0NzQ1_) 赞助我们的公共服务器。 特别感谢 [浪浪云](https://langlang.cloud/) 赞助我们的公共服务器。
<p align="center"> <p align="center">
<a href="https://langlangy.cn/?i26c5a5" target="_blank"> <a href="https://langlangy.cn/?i26c5a5" target="_blank">
<img src="assets/langlang.png" width="200"> <img src="assets/langlang.png" width="200">
</a> </a>
<a href="https://langlangy.cn/?i26c5a5" target="_blank">
<img src="assets/raincloud.png" width="200">
</a>
</p> </p>
如果您觉得 EasyTier 有帮助,请考虑赞助我们。软件开发和维护需要大量的时间和精力,您的赞助将帮助我们更好地维护和改进 EasyTier。 如果您觉得 EasyTier 有帮助,请考虑赞助我们。软件开发和维护需要大量的时间和精力,您的赞助将帮助我们更好地维护和改进 EasyTier。

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

View File

@@ -29,10 +29,8 @@ fn set_error_msg(msg: &str) {
msg_buf[..len].copy_from_slice(bytes); msg_buf[..len].copy_from_slice(bytes);
} }
/// # Safety
/// Set the tun fd
#[no_mangle] #[no_mangle]
pub unsafe extern "C" fn set_tun_fd( pub extern "C" fn set_tun_fd(
inst_name: *const std::ffi::c_char, inst_name: *const std::ffi::c_char,
fd: std::ffi::c_int, fd: std::ffi::c_int,
) -> std::ffi::c_int { ) -> std::ffi::c_int {
@@ -45,23 +43,18 @@ pub unsafe extern "C" fn set_tun_fd(
if !INSTANCE_NAME_ID_MAP.contains_key(&inst_name) { if !INSTANCE_NAME_ID_MAP.contains_key(&inst_name) {
return -1; return -1;
} }
match INSTANCE_MANAGER.set_tun_fd(&INSTANCE_NAME_ID_MAP.get(&inst_name).unwrap().value(), fd) {
let inst_id = *INSTANCE_NAME_ID_MAP Ok(_) => {
.get(&inst_name) 0
.as_ref() }
.unwrap() Err(_) => {
.value(); -1
}
match INSTANCE_MANAGER.set_tun_fd(&inst_id, fd) {
Ok(_) => 0,
Err(_) => -1,
} }
} }
/// # Safety
/// Get the last error message
#[no_mangle] #[no_mangle]
pub unsafe extern "C" fn get_error_msg(out: *mut *const std::ffi::c_char) { pub extern "C" fn get_error_msg(out: *mut *const std::ffi::c_char) {
let msg_buf = ERROR_MSG.lock().unwrap(); let msg_buf = ERROR_MSG.lock().unwrap();
if msg_buf.is_empty() { if msg_buf.is_empty() {
unsafe { unsafe {
@@ -85,10 +78,8 @@ pub extern "C" fn free_string(s: *const std::ffi::c_char) {
} }
} }
/// # Safety
/// Parse the config
#[no_mangle] #[no_mangle]
pub unsafe extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int { pub extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
let cfg_str = unsafe { let cfg_str = unsafe {
assert!(!cfg_str.is_null()); assert!(!cfg_str.is_null());
std::ffi::CStr::from_ptr(cfg_str) std::ffi::CStr::from_ptr(cfg_str)
@@ -104,10 +95,8 @@ pub unsafe extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::
0 0
} }
/// # Safety
/// Run the network instance
#[no_mangle] #[no_mangle]
pub unsafe extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int { pub extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
let cfg_str = unsafe { let cfg_str = unsafe {
assert!(!cfg_str.is_null()); assert!(!cfg_str.is_null());
std::ffi::CStr::from_ptr(cfg_str) std::ffi::CStr::from_ptr(cfg_str)
@@ -142,10 +131,8 @@ pub unsafe extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char)
0 0
} }
/// # Safety
/// Retain the network instance
#[no_mangle] #[no_mangle]
pub unsafe extern "C" fn retain_network_instance( pub extern "C" fn retain_network_instance(
inst_names: *const *const std::ffi::c_char, inst_names: *const *const std::ffi::c_char,
length: usize, length: usize,
) -> std::ffi::c_int { ) -> std::ffi::c_int {
@@ -181,15 +168,13 @@ pub unsafe extern "C" fn retain_network_instance(
return -1; return -1;
} }
INSTANCE_NAME_ID_MAP.retain(|k, _| inst_names.contains(k)); let _ = INSTANCE_NAME_ID_MAP.retain(|k, _| inst_names.contains(k));
0 0
} }
/// # Safety
/// Collect the network infos
#[no_mangle] #[no_mangle]
pub unsafe extern "C" fn collect_network_infos( pub extern "C" fn collect_network_infos(
infos: *mut KeyValuePair, infos: *mut KeyValuePair,
max_length: usize, max_length: usize,
) -> std::ffi::c_int { ) -> std::ffi::c_int {
@@ -248,9 +233,7 @@ mod tests {
network = "test_network" network = "test_network"
"#; "#;
let cstr = std::ffi::CString::new(cfg_str).unwrap(); let cstr = std::ffi::CString::new(cfg_str).unwrap();
unsafe { assert_eq!(parse_config(cstr.as_ptr()), 0);
assert_eq!(parse_config(cstr.as_ptr()), 0);
}
} }
#[test] #[test]
@@ -260,8 +243,6 @@ mod tests {
network = "test_network" network = "test_network"
"#; "#;
let cstr = std::ffi::CString::new(cfg_str).unwrap(); let cstr = std::ffi::CString::new(cfg_str).unwrap();
unsafe { assert_eq!(run_network_instance(cstr.as_ptr()), 0);
assert_eq!(run_network_instance(cstr.as_ptr()), 0);
}
} }
} }

View File

@@ -1,6 +1,6 @@
id=easytier_magisk id=easytier_magisk
name=EasyTier_Magisk name=EasyTier_Magisk
version=v2.4.2 version=v2.4.0
versionCode=1 versionCode=1
author=EasyTier author=EasyTier
description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier) description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier)

View File

@@ -1010,7 +1010,7 @@ dependencies = [
[[package]] [[package]]
name = "easytier" name = "easytier"
version = "2.4.2" version = "2.4.0"
source = "git+https://github.com/EasyTier/EasyTier.git#a4bb555fac1046d0099c44676fa9d0d8cca55c99" source = "git+https://github.com/EasyTier/EasyTier.git#a4bb555fac1046d0099c44676fa9d0d8cca55c99"
dependencies = [ dependencies = [
"anyhow", "anyhow",

View File

@@ -1,7 +1,7 @@
{ {
"name": "easytier-gui", "name": "easytier-gui",
"type": "module", "type": "module",
"version": "2.4.2", "version": "2.4.0",
"private": true, "private": true,
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4", "packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4",
"scripts": { "scripts": {

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "easytier-gui" name = "easytier-gui"
version = "2.4.2" version = "2.4.0"
description = "EasyTier GUI" description = "EasyTier GUI"
authors = ["you"] authors = ["you"]
edition = "2021" edition = "2021"
@@ -40,6 +40,7 @@ chrono = { version = "0.4.37", features = ["serde"] }
once_cell = "1.18.0" once_cell = "1.18.0"
dashmap = "6.0" dashmap = "6.0"
elevated-command = "1.1.2"
gethostname = "1.0.2" gethostname = "1.0.2"
dunce = "1.0.4" dunce = "1.0.4"
@@ -53,15 +54,6 @@ tauri-plugin-os = "2.3.0"
tauri-plugin-autostart = "2.5.0" tauri-plugin-autostart = "2.5.0"
uuid = "1.17.0" uuid = "1.17.0"
[target.'cfg(target_os = "windows")'.dependencies]
windows = { version = "0.52", features = ["Win32_Foundation", "Win32_UI_Shell", "Win32_UI_WindowsAndMessaging"] }
winapi = { version = "0.3.9", features = ["securitybaseapi", "processthreadsapi"] }
[target.'cfg(target_family = "unix")'.dependencies]
libc = "0.2"
[target.'cfg(target_os = "macos")'.dependencies]
security-framework-sys = "2.9.0"
[features] [features]
# This feature is used for production builds or when a dev server is not specified, DO NOT REMOVE!! # This feature is used for production builds or when a dev server is not specified, DO NOT REMOVE!!

View File

@@ -1,10 +1,6 @@
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"> <manifest xmlns:android="http://schemas.android.com/apk/res/android">
<uses-permission android:name="android.permission.INTERNET" /> <uses-permission android:name="android.permission.INTERNET" />
<uses-permission android:name="android.permission.FOREGROUND_SERVICE" />
<uses-permission android:name="android.permission.POST_NOTIFICATIONS" />
<uses-permission android:name="android.permission.FOREGROUND_SERVICE_DATA_SYNC" />
<application <application
android:icon="@mipmap/ic_launcher" android:icon="@mipmap/ic_launcher"
android:label="@string/app_name" android:label="@string/app_name"
@@ -22,12 +18,6 @@
<category android:name="android.intent.category.LAUNCHER" /> <category android:name="android.intent.category.LAUNCHER" />
</intent-filter> </intent-filter>
</activity> </activity>
<service
android:name=".MainForegroundService"
android:foregroundServiceType="dataSync"
android:enabled="true"
android:exported="false">
</service>
<provider <provider
android:name="androidx.core.content.FileProvider" android:name="androidx.core.content.FileProvider"

View File

@@ -1,20 +1,3 @@
package com.kkrainbow.easytier package com.kkrainbow.easytier
import android.content.Intent class MainActivity : TauriActivity()
import android.os.Build
import android.os.Bundle
class MainActivity : TauriActivity() {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
initService()
}
private fun initService() {
val serviceIntent = Intent(this, MainForegroundService::class.java)
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
startForegroundService(serviceIntent)
} else {
startService(serviceIntent)
}
}
}

View File

@@ -1,64 +0,0 @@
package com.kkrainbow.easytier
import android.app.Notification
import android.app.NotificationChannel
import android.app.NotificationManager
import android.app.Service
import android.content.Intent
import android.content.pm.ServiceInfo
import android.os.Build
import android.os.IBinder
import androidx.core.app.NotificationCompat
import android.util.Log
class MainForegroundService : Service() {
companion object {
const val CHANNEL_ID = "easytier_channel"
const val NOTIFICATION_ID = 1355
// You can add more constants if needed
}
override fun onCreate() {
super.onCreate()
}
override fun onStartCommand(intent: Intent?, flags: Int, startId: Int): Int {
createNotificationChannel()
val notification = NotificationCompat.Builder(this, CHANNEL_ID)
.setContentTitle("easytier Running")
.setContentText("easytier is available on localhost")
.setSmallIcon(android.R.drawable.ic_menu_manage)
.build()
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) {
startForeground(
NOTIFICATION_ID,
notification,
ServiceInfo.FOREGROUND_SERVICE_TYPE_DATA_SYNC
)
} else {
startForeground(NOTIFICATION_ID, notification)
}
return START_STICKY
}
override fun onDestroy() {
super.onDestroy()
}
override fun onBind(intent: Intent?): IBinder? = null
private fun createNotificationChannel() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
try {
val channel = NotificationChannel(
CHANNEL_ID,
"easytier notice",
NotificationManager.IMPORTANCE_DEFAULT
)
val manager = getSystemService(NotificationManager::class.java)
manager?.createNotificationChannel(channel)
} catch (e: Exception) {
Log.e("MainForegroundService", "Failed to create notification channel", e)
}
}
}
}

View File

@@ -1,67 +0,0 @@
/*---------------------------------------------------------------------------------------------
* Copyright (c) Luis Liu. All rights reserved.
* Licensed under the MIT License. See License in the project root for license information.
*--------------------------------------------------------------------------------------------*/
use super::Command;
use anyhow::{anyhow, Result};
use std::env;
use std::ffi::OsStr;
use std::path::PathBuf;
use std::process::{Command as StdCommand, Output};
use std::str::FromStr;
/// The implementation of state check and elevated executing varies on each platform
impl Command {
/// Check the state the current program running
///
/// Return `true` if the program is running as root, otherwise false
pub fn is_elevated() -> bool {
let uid = unsafe { libc::getuid() };
uid == 0
}
/// Prompting the user with a graphical OS dialog for the root password,
/// excuting the command with escalated privileges, and return the output
pub fn output(&self) -> Result<Output> {
let pkexec = PathBuf::from_str("/bin/pkexec")?;
let mut command = StdCommand::new(pkexec);
let display = env::var("DISPLAY");
let xauthority = env::var("XAUTHORITY");
let home = env::var("HOME");
command.arg("--disable-internal-agent");
if display.is_ok() || xauthority.is_ok() || home.is_ok() {
command.arg("env");
if let Ok(display) = display {
command.arg(format!("DISPLAY={}", display));
}
if let Ok(xauthority) = xauthority {
command.arg(format!("XAUTHORITY={}", xauthority));
}
if let Ok(home) = home {
command.arg(format!("HOME={}", home));
}
} else if self.cmd.get_envs().any(|(_, v)| v.is_some()) {
command.arg("env");
}
for (k, v) in self.cmd.get_envs() {
if let Some(value) = v {
command.arg(format!(
"{}={}",
k.to_str().ok_or(anyhow!("invalid key"))?,
value.to_str().ok_or(anyhow!("invalid value"))?
));
}
}
command.arg(self.cmd.get_program());
let args: Vec<&OsStr> = self.cmd.get_args().collect();
if !args.is_empty() {
command.args(args);
}
let output = command.output()?;
Ok(output)
}
}

View File

@@ -1,182 +0,0 @@
/*---------------------------------------------------------------------------------------------
* Copyright (c) Luis Liu. All rights reserved.
* Licensed under the MIT License. See License in the project root for license information.
*--------------------------------------------------------------------------------------------*/
// Thanks to https://github.com/jorangreef/sudo-prompt/blob/master/index.js
// MIT License
//
// Copyright (c) 2015 Joran Dirk Greef
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// ...
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
use super::Command;
use anyhow::Result;
use std::env;
use std::path::PathBuf;
use std::process::{ExitStatus, Output};
use std::ffi::{CString, OsString};
use std::io;
use std::mem;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use std::ptr;
use libc::{fcntl, fileno, waitpid, EINTR, F_GETOWN};
use security_framework_sys::authorization::{
errAuthorizationSuccess, kAuthorizationFlagDefaults, kAuthorizationFlagDestroyRights,
AuthorizationCreate, AuthorizationExecuteWithPrivileges, AuthorizationFree, AuthorizationRef,
};
const ENV_PATH: &str = "PATH";
fn get_exe_path<P: AsRef<Path>>(exe_name: P) -> Option<PathBuf> {
let exe_name = exe_name.as_ref();
if exe_name.has_root() {
return Some(exe_name.into());
}
if let Ok(abs_path) = exe_name.canonicalize() {
if abs_path.is_file() {
return Some(abs_path);
}
}
env::var_os(ENV_PATH).and_then(|paths| {
env::split_paths(&paths)
.filter_map(|dir| {
let full_path = dir.join(exe_name);
if full_path.is_file() {
Some(full_path)
} else {
None
}
})
.next()
})
}
macro_rules! make_cstring {
($s:expr) => {
match CString::new($s.as_bytes()) {
Ok(s) => s,
Err(_) => {
return Err(io::Error::new(io::ErrorKind::Other, "null byte in string"));
}
}
};
}
unsafe fn gui_runas(prog: *const i8, argv: *const *const i8) -> i32 {
let mut authref: AuthorizationRef = ptr::null_mut();
let mut pipe: *mut libc::FILE = ptr::null_mut();
if AuthorizationCreate(
ptr::null(),
ptr::null(),
kAuthorizationFlagDefaults,
&mut authref,
) != errAuthorizationSuccess
{
return -1;
}
if AuthorizationExecuteWithPrivileges(
authref,
prog,
kAuthorizationFlagDefaults,
argv as *const *mut _,
&mut pipe,
) != errAuthorizationSuccess
{
AuthorizationFree(authref, kAuthorizationFlagDestroyRights);
return -1;
}
let pid = fcntl(fileno(pipe), F_GETOWN, 0);
let mut status = 0;
loop {
let r = waitpid(pid, &mut status, 0);
if r == -1 && io::Error::last_os_error().raw_os_error() == Some(EINTR) {
continue;
} else {
break;
}
}
AuthorizationFree(authref, kAuthorizationFlagDestroyRights);
status
}
fn runas_root_gui(cmd: &Command) -> io::Result<ExitStatus> {
let exe: OsString = match get_exe_path(&cmd.cmd.get_program()) {
Some(exe) => exe.into(),
None => unsafe {
return Ok(mem::transmute(!0));
},
};
let prog = make_cstring!(exe);
let mut args = vec![];
for arg in cmd.cmd.get_args() {
args.push(make_cstring!(arg))
}
let mut argv: Vec<_> = args.iter().map(|x| x.as_ptr()).collect();
argv.push(ptr::null());
unsafe { Ok(mem::transmute(gui_runas(prog.as_ptr(), argv.as_ptr()))) }
}
/// The implementation of state check and elevated executing varies on each platform
impl Command {
/// Check the state the current program running
///
/// Return `true` if the program is running as root, otherwise false
///
/// # Examples
///
/// ```no_run
/// use elevated_command::Command;
///
/// fn main() {
/// let is_elevated = Command::is_elevated();
///
/// }
/// ```
pub fn is_elevated() -> bool {
let uid = unsafe { libc::getuid() };
let euid = unsafe { libc::geteuid() };
match (uid, euid) {
(0, 0) => true,
(_, 0) => true,
(_, _) => false,
}
}
/// Prompting the user with a graphical OS dialog for the root password,
/// excuting the command with escalated privileges, and return the output
///
/// # Examples
///
/// ```no_run
/// use elevated_command::Command;
/// use std::process::Command as StdCommand;
///
/// fn main() {
/// let mut cmd = StdCommand::new("path to the application");
/// let elevated_cmd = Command::new(cmd);
/// let output = elevated_cmd.output().unwrap();
/// }
/// ```
pub fn output(&self) -> Result<Output> {
let status = runas_root_gui(self)?;
Ok(Output {
status,
stdout: Vec::new(),
stderr: Vec::new(),
})
}
}

View File

@@ -1,101 +0,0 @@
#![allow(dead_code)]
/*---------------------------------------------------------------------------------------------
* Copyright (c) Luis Liu. All rights reserved.
* Licensed under the MIT License. See License in the project root for license information.
*--------------------------------------------------------------------------------------------*/
use std::convert::From;
use std::process::Command as StdCommand;
/// Wrap of std::process::command and escalate privileges while executing
pub struct Command {
cmd: StdCommand,
#[allow(dead_code)]
icon: Option<Vec<u8>>,
#[allow(dead_code)]
name: Option<String>,
}
/// Command initialization shares the same logic across all the platforms
impl Command {
/// Constructs a new `Command` from a std::process::Command
/// instance, it would read the following configuration from
/// the instance while executing:
///
/// * The instance's path to the program
/// * The instance's arguments
/// * The instance's environment variables
///
/// So far, the new `Command` would only take the environment variables explicitly
/// set by std::process::Command::env and std::process::Command::env,
/// without the ones inherited from the parent process
///
/// And the environment variables would only be taken on Linux and MacOS,
/// they would be ignored on Windows
///
/// Current working directory would be the following while executing the command:
/// - %SystemRoot%\System32 on Windows
/// - /root on Linux
/// - $TMPDIR/sudo_prompt_applet/applet.app/Contents/MacOS on MacOS
///
/// To pass environment variables on Windows,
/// to inherit environment variables from the parent process and
/// to change the working directory will be supported in later versions
pub fn new(cmd: StdCommand) -> Self {
Self {
cmd,
icon: None,
name: None,
}
}
/// Consumes the `Take`, returning the wrapped std::process::Command
///
/// # Examples
pub fn into_inner(self) -> StdCommand {
self.cmd
}
/// Gets a mutable reference to the underlying std::process::Command
pub fn get_ref(&self) -> &StdCommand {
&self.cmd
}
/// Gets a reference to the underlying std::process::Command
pub fn get_mut(&mut self) -> &mut StdCommand {
&mut self.cmd
}
/// Set the `icon` for the pop-up graphical OS dialog
pub fn icon(&mut self, icon: Vec<u8>) -> &mut Self {
self.icon = Some(icon);
self
}
/// Set the name for the pop-up graphical OS dialog
///
/// This method is only applicable on `MacOS`
pub fn name(&mut self, name: String) -> &mut Self {
self.name = Some(name);
self
}
}
impl From<StdCommand> for Command {
/// Converts from a std::process::Command
///
/// It is similiar with the construct method
fn from(cmd: StdCommand) -> Self {
Self {
cmd,
icon: None,
name: None,
}
}
}
#[cfg(target_os = "linux")]
mod linux;
#[cfg(target_os = "macos")]
mod macos;
#[cfg(target_os = "windows")]
mod windows;

View File

@@ -1,114 +0,0 @@
/*---------------------------------------------------------------------------------------------
* Copyright (c) Luis Liu. All rights reserved.
* Licensed under the MIT License. See License in the project root for license information.
*--------------------------------------------------------------------------------------------*/
use super::Command;
use anyhow::Result;
use std::mem;
use std::os::windows::process::ExitStatusExt;
use std::process::{ExitStatus, Output};
use winapi::shared::minwindef::{DWORD, LPVOID};
use winapi::um::processthreadsapi::{GetCurrentProcess, OpenProcessToken};
use winapi::um::securitybaseapi::GetTokenInformation;
use winapi::um::winnt::{TokenElevation, HANDLE, TOKEN_ELEVATION, TOKEN_QUERY};
use windows::core::{w, HSTRING, PCWSTR};
use windows::Win32::Foundation::HWND;
use windows::Win32::UI::Shell::ShellExecuteW;
use windows::Win32::UI::WindowsAndMessaging::SW_HIDE;
/// The implementation of state check and elevated executing varies on each platform
impl Command {
/// Check the state the current program running
///
/// Return `true` if the program is running as root, otherwise false
///
/// # Examples
///
/// ```no_run
/// use elevated_command::Command;
///
/// fn main() {
/// let is_elevated = Command::is_elevated();
///
/// }
/// ```
pub fn is_elevated() -> bool {
// Thanks to https://stackoverflow.com/a/8196291
unsafe {
let mut current_token_ptr: HANDLE = mem::zeroed();
let mut token_elevation: TOKEN_ELEVATION = mem::zeroed();
let token_elevation_type_ptr: *mut TOKEN_ELEVATION = &mut token_elevation;
let mut size: DWORD = 0;
let result = OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &mut current_token_ptr);
if result != 0 {
let result = GetTokenInformation(
current_token_ptr,
TokenElevation,
token_elevation_type_ptr as LPVOID,
mem::size_of::<winapi::um::winnt::TOKEN_ELEVATION_TYPE>() as u32,
&mut size,
);
if result != 0 {
return token_elevation.TokenIsElevated != 0;
}
}
}
false
}
/// Prompting the user with a graphical OS dialog for the root password,
/// excuting the command with escalated privileges, and return the output
///
/// On Windows, according to https://learn.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-shellexecutew#return-value,
/// Output.status.code() shoudl be greater than 32 if the function succeeds,
/// otherwise the value indicates the cause of the failure
///
/// On Windows, Output.stdout and Output.stderr will always be empty as of now
///
/// # Examples
///
/// ```no_run
/// use elevated_command::Command;
/// use std::process::Command as StdCommand;
///
/// fn main() {
/// let mut cmd = StdCommand::new("path to the application");
/// let elevated_cmd = Command::new(cmd);
/// let output = elevated_cmd.output().unwrap();
/// }
/// ```
pub fn output(&self) -> Result<Output> {
let args = self
.cmd
.get_args()
.map(|c| c.to_str().unwrap().to_string())
.collect::<Vec<String>>();
let parameters = if args.is_empty() {
HSTRING::new()
} else {
let arg_str = args.join(" ");
HSTRING::from(arg_str)
};
// according to https://stackoverflow.com/a/38034535
// the cwd always point to %SystemRoot%\System32 and cannot be changed by settting lpdirectory param
let r = unsafe {
ShellExecuteW(
HWND(0),
w!("runas"),
&HSTRING::from(self.cmd.get_program()),
&HSTRING::from(parameters),
PCWSTR::null(),
SW_HIDE,
)
};
Ok(Output {
status: ExitStatus::from_raw(r.0 as u32),
stdout: Vec::<u8>::new(),
stderr: Vec::<u8>::new(),
})
}
}

View File

@@ -1,8 +1,6 @@
// Prevents additional console window on Windows in release, DO NOT REMOVE!! // Prevents additional console window on Windows in release, DO NOT REMOVE!!
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
mod elevate;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use easytier::{ use easytier::{
@@ -130,7 +128,7 @@ fn toggle_window_visibility<R: tauri::Runtime>(app: &tauri::AppHandle<R>) {
#[cfg(not(target_os = "android"))] #[cfg(not(target_os = "android"))]
fn check_sudo() -> bool { fn check_sudo() -> bool {
let is_elevated = elevate::Command::is_elevated(); let is_elevated = elevated_command::Command::is_elevated();
if !is_elevated { if !is_elevated {
let exe_path = std::env::var("APPIMAGE") let exe_path = std::env::var("APPIMAGE")
.ok() .ok()
@@ -141,7 +139,7 @@ fn check_sudo() -> bool {
if args.contains(&AUTOSTART_ARG.to_owned()) { if args.contains(&AUTOSTART_ARG.to_owned()) {
stdcmd.arg(AUTOSTART_ARG); stdcmd.arg(AUTOSTART_ARG);
} }
elevate::Command::new(stdcmd) elevated_command::Command::new(stdcmd)
.output() .output()
.expect("Failed to run elevated command"); .expect("Failed to run elevated command");
} }

View File

@@ -17,7 +17,7 @@
"createUpdaterArtifacts": false "createUpdaterArtifacts": false
}, },
"productName": "easytier-gui", "productName": "easytier-gui",
"version": "2.4.2", "version": "2.4.0",
"identifier": "com.kkrainbow.easytier", "identifier": "com.kkrainbow.easytier",
"plugins": {}, "plugins": {},
"app": { "app": {

View File

@@ -115,11 +115,6 @@ function getRoutesForVpn(routes: Route[]): string[] {
async function onNetworkInstanceChange() { async function onNetworkInstanceChange() {
console.error('vpn service watch network instance change ids', JSON.stringify(networkStore.networkInstanceIds)) console.error('vpn service watch network instance change ids', JSON.stringify(networkStore.networkInstanceIds))
const insts = networkStore.networkInstanceIds const insts = networkStore.networkInstanceIds
const no_tun = networkStore.isNoTunEnabled(insts[0])
if (no_tun) {
await doStopVpn()
return
}
if (!insts) { if (!insts) {
await doStopVpn() await doStopVpn()
return return
@@ -137,6 +132,14 @@ async function onNetworkInstanceChange() {
return return
} }
// if use no tun mode, stop the vpn service
const no_tun = networkStore.isNoTunEnabled(insts[0])
if (no_tun) {
console.error('no tun mode, stop vpn service')
await doStopVpn()
return
}
let network_length = curNetworkInfo?.my_node_info?.virtual_ipv4.network_length let network_length = curNetworkInfo?.my_node_info?.virtual_ipv4.network_length
if (!network_length) { if (!network_length) {
network_length = 24 network_length = 24
@@ -184,26 +187,12 @@ async function watchNetworkInstance() {
console.error('vpn service watch network instance') console.error('vpn service watch network instance')
} }
function isNoTunEnabled(instanceId: string | undefined) {
if (!instanceId) {
return false
}
const no_tun = networkStore.isNoTunEnabled(instanceId)
if (no_tun) {
return true
}
return false
}
export async function initMobileVpnService() { export async function initMobileVpnService() {
await registerVpnServiceListener() await registerVpnServiceListener()
await watchNetworkInstance() await watchNetworkInstance()
} }
export async function prepareVpnService(instanceId: string) { export async function prepareVpnService() {
if (isNoTunEnabled(instanceId)) {
return
}
console.log('prepare vpn') console.log('prepare vpn')
const prepare_ret = await prepare_vpn() const prepare_ret = await prepare_vpn()
console.log('prepare vpn', JSON.stringify((prepare_ret))) console.log('prepare vpn', JSON.stringify((prepare_ret)))

View File

@@ -102,7 +102,7 @@ networkStore.$subscribe(async () => {
async function runNetworkCb(cfg: NetworkTypes.NetworkConfig, cb: () => void) { async function runNetworkCb(cfg: NetworkTypes.NetworkConfig, cb: () => void) {
if (type() === 'android') { if (type() === 'android') {
await prepareVpnService(cfg.instance_id) await prepareVpnService()
networkStore.clearNetworkInstances() networkStore.clearNetworkInstances()
} }
else { else {

View File

@@ -8,7 +8,7 @@ repository = "https://github.com/EasyTier/EasyTier"
authors = ["kkrainbow"] authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"] keywords = ["vpn", "p2p", "network", "easytier"]
categories = ["network-programming", "command-line-utilities"] categories = ["network-programming", "command-line-utilities"]
rust-version = "1.89.0" rust-version = "1.87.0"
license-file = "LICENSE" license-file = "LICENSE"
readme = "README.md" readme = "README.md"

View File

@@ -14,11 +14,18 @@ const NAMESPACE: &str = "easytier::proto::rpc_types";
/// ///
/// See the crate-level documentation for more info. /// See the crate-level documentation for more info.
#[allow(missing_copy_implementations)] #[allow(missing_copy_implementations)]
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug)]
pub struct ServiceGenerator { pub struct ServiceGenerator {
_private: (), _private: (),
} }
impl ServiceGenerator {
/// Create a new `ServiceGenerator` instance with the default options set.
pub fn new() -> ServiceGenerator {
ServiceGenerator { _private: () }
}
}
impl prost_build::ServiceGenerator for ServiceGenerator { impl prost_build::ServiceGenerator for ServiceGenerator {
fn generate(&mut self, service: prost_build::Service, mut buf: &mut String) { fn generate(&mut self, service: prost_build::Service, mut buf: &mut String) {
use std::fmt::Write; use std::fmt::Write;
@@ -71,7 +78,7 @@ impl prost_build::ServiceGenerator for ServiceGenerator {
enum_methods, enum_methods,
" {name} = {index},", " {name} = {index},",
name = method.proto_name, name = method.proto_name,
index = idx + 1 index = format!("{}", idx + 1)
) )
.unwrap(); .unwrap();
@@ -80,7 +87,7 @@ impl prost_build::ServiceGenerator for ServiceGenerator {
" {index} => Ok({service_name}MethodDescriptor::{name}),", " {index} => Ok({service_name}MethodDescriptor::{name}),",
service_name = service.name, service_name = service.name,
name = method.proto_name, name = method.proto_name,
index = idx + 1, index = format!("{}", idx + 1),
) )
.unwrap(); .unwrap();
@@ -95,12 +102,12 @@ impl prost_build::ServiceGenerator for ServiceGenerator {
writeln!( writeln!(
client_methods, client_methods,
r#" async fn {name}(&self, ctrl: H::Controller, input: {input_type}) -> {namespace}::error::Result<{output_type}> {{ r#" async fn {name}(&self, ctrl: H::Controller, input: {input_type}) -> {namespace}::error::Result<{output_type}> {{
{client_name}Client::{name}_inner(self.0.clone(), ctrl, input).await {client_name}::{name}_inner(self.0.clone(), ctrl, input).await
}}"#, }}"#,
name = method.name, name = method.name,
input_type = method.input_type, input_type = method.input_type,
output_type = method.output_type, output_type = method.output_type,
client_name = service.name, client_name = format!("{}Client", service.name),
namespace = NAMESPACE, namespace = NAMESPACE,
) )
.unwrap(); .unwrap();

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "easytier-web" name = "easytier-web"
version = "2.4.2" version = "2.4.0"
edition = "2021" edition = "2021"
description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server." description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server."

View File

@@ -1,10 +1,7 @@
fn main() { fn main() {
// enable thunk-rs when target os is windows and arch is x86_64 or i686 // enable thunk-rs when target os is windows and arch is x86_64 or i686
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
if !std::env::var("TARGET") if !std::env::var("TARGET").unwrap_or_default().contains("aarch64"){
.unwrap_or_default() thunk::thunk();
.contains("aarch64") }
{ }
thunk::thunk();
}
}

View File

@@ -2,13 +2,7 @@
import InputGroup from 'primevue/inputgroup' import InputGroup from 'primevue/inputgroup'
import InputGroupAddon from 'primevue/inputgroupaddon' import InputGroupAddon from 'primevue/inputgroupaddon'
import { SelectButton, Checkbox, InputText, InputNumber, AutoComplete, Panel, Divider, ToggleButton, Button, Password } from 'primevue' import { SelectButton, Checkbox, InputText, InputNumber, AutoComplete, Panel, Divider, ToggleButton, Button, Password } from 'primevue'
import { import { DEFAULT_NETWORK_CONFIG, NetworkConfig, NetworkingMethod } from '../types/network'
addRow,
DEFAULT_NETWORK_CONFIG,
NetworkConfig,
NetworkingMethod,
removeRow
} from '../types/network'
import { defineProps, defineEmits, ref, } from 'vue' import { defineProps, defineEmits, ref, } from 'vue'
import { useI18n } from 'vue-i18n' import { useI18n } from 'vue-i18n'
@@ -169,8 +163,6 @@ const bool_flags: BoolFlag[] = [
{ field: 'enable_private_mode', help: 'enable_private_mode_help' }, { field: 'enable_private_mode', help: 'enable_private_mode_help' },
] ]
const portForwardProtocolOptions = ref(["tcp","udp"]);
</script> </script>
<template> <template>
@@ -424,73 +416,6 @@ const portForwardProtocolOptions = ref(["tcp","udp"]);
</div> </div>
</Panel> </Panel>
<Divider />
<Panel :header="t('port_forwards')" toggleable collapsed>
<div class="flex flex-col gap-y-2">
<div class="flex flex-row gap-x-9 flex-wrap w-full">
<div class="flex flex-col gap-2 grow p-fluid">
<div class="flex">
<label for="port_forwards">{{ t('port_forwards_help') }}</label>
</div>
<div v-for="(row, index) in curNetwork.port_forwards" class="form-row">
<div style="display: flex; gap: 0.5rem; align-items: flex-end;">
<SelectButton v-model="row.proto" :options="portForwardProtocolOptions" :allow-empty="false"/>
<div style="flex-grow: 4;">
<InputGroup>
<InputText
v-model="row.bind_ip"
:placeholder="t('port_forwards_bind_addr')"
/>
<InputGroupAddon>
<span style="font-weight: bold">:</span>
</InputGroupAddon>
<InputNumber v-model="row.bind_port" :format="false"
inputId="horizontal-buttons" :step="1" mode="decimal" :min="1"
:max="65535" fluid
class="max-w-20"/>
</InputGroup>
</div>
<div style="flex-grow: 4;">
<InputGroup>
<InputText
v-model="row.dst_ip"
:placeholder="t('port_forwards_dst_addr')"
/>
<InputGroupAddon>
<span style="font-weight: bold">:</span>
</InputGroupAddon>
<InputNumber v-model="row.dst_port" :format="false"
inputId="horizontal-buttons" :step="1" mode="decimal" :min="1"
:max="65535" fluid
class="max-w-20"/>
</InputGroup>
</div>
<div style="flex-grow: 1;">
<Button
v-if="curNetwork.port_forwards.length > 0"
icon="pi pi-trash"
severity="danger"
text
rounded
@click="removeRow(index,curNetwork.port_forwards)"
/>
</div>
</div>
</div>
<div class="flex justify-content-end mt-4">
<Button
icon="pi pi-plus"
:label="t('port_forwards_add_btn')"
severity="success"
@click="addRow(curNetwork.port_forwards)"
/>
</div>
</div>
</div>
</div>
</Panel>
<div class="flex pt-6 justify-center"> <div class="flex pt-6 justify-center">
<Button :label="t('run_network')" icon="pi pi-arrow-right" icon-pos="right" :disabled="configInvalid" <Button :label="t('run_network')" icon="pi pi-arrow-right" icon-pos="right" :disabled="configInvalid"
@click="$emit('runNetwork', curNetwork)" /> @click="$emit('runNetwork', curNetwork)" />

View File

@@ -150,12 +150,6 @@ socks5_help: |
exit_nodes: 出口节点列表 exit_nodes: 出口节点列表
exit_nodes_help: 转发所有流量的出口节点虚拟IPv4地址优先级由列表顺序决定 exit_nodes_help: 转发所有流量的出口节点虚拟IPv4地址优先级由列表顺序决定
port_forwards: 端口转发
port_forwards_help: "将本地端口转发到虚拟网络中的远程端口。例如udp://0.0.0.0:12345/10.126.126.1:23456表示将本地UDP端口12345转发到虚拟网络中的10.126.126.1:23456。可以指定多个。"
port_forwards_bind_addr: "绑定地址0.0.0.0"
port_forwards_dst_addr: "目标地址10.126.126.1"
port_forwards_add_btn: "添加"
mtu: MTU mtu: MTU
mtu_help: | mtu_help: |

View File

@@ -151,12 +151,6 @@ socks5_help: |
exit_nodes: Exit Nodes exit_nodes: Exit Nodes
exit_nodes_help: Exit nodes to forward all traffic to, a virtual ipv4 address, priority is determined by the order of the list exit_nodes_help: Exit nodes to forward all traffic to, a virtual ipv4 address, priority is determined by the order of the list
port_forwards: Port Forward
port_forwards_help: "forward local port to remote port in virtual network. e.g.: udp://0.0.0.0:12345/10.126.126.1:23456, means forward local udp port 12345 to 10.126.126.1:23456 in the virtual network. can specify multiple."
port_forwards_bind_addr: "Bind address, e.g.: 0.0.0.0"
port_forwards_dst_addr: "Destination address, e.g.: 10.126.126.1"
port_forwards_add_btn: "Add"
mtu: MTU mtu: MTU
mtu_help: | mtu_help: |
MTU of the TUN device, default is 1380 for non-encryption, 1360 for encryption. Range:400-1380 MTU of the TUN device, default is 1380 for non-encryption, 1360 for encryption. Range:400-1380

View File

@@ -70,8 +70,6 @@ export interface NetworkConfig {
enable_private_mode?: boolean enable_private_mode?: boolean
rpc_portal_whitelists: string[] rpc_portal_whitelists: string[]
port_forwards: PortForwardConfig[]
} }
export function DEFAULT_NETWORK_CONFIG(): NetworkConfig { export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
@@ -134,7 +132,6 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
enable_magic_dns: false, enable_magic_dns: false,
enable_private_mode: false, enable_private_mode: false,
rpc_portal_whitelists: [], rpc_portal_whitelists: [],
port_forwards: [],
} }
} }
@@ -258,30 +255,6 @@ export interface PeerConnStats {
latency_us: number latency_us: number
} }
export interface PortForwardConfig {
bind_ip: string,
bind_port: number,
dst_ip: string,
dst_port: number,
proto: string
}
// 添加新行
export const addRow = (rows: PortForwardConfig[]) => {
rows.push({
proto: 'tcp',
bind_ip: '',
bind_port: 65535,
dst_ip: '',
dst_port: 65535,
});
};
// 删除行
export const removeRow = (index: number, rows: PortForwardConfig[]) => {
rows.splice(index, 1);
};
export enum EventType { export enum EventType {
TunDeviceReady = 'TunDeviceReady', // string TunDeviceReady = 'TunDeviceReady', // string
TunDeviceError = 'TunDeviceError', // string TunDeviceError = 'TunDeviceError', // string

View File

@@ -25,7 +25,7 @@ fn load_geoip_db(geoip_db: Option<String>) -> Option<maxminddb::Reader<Vec<u8>>>
match maxminddb::Reader::open_readfile(&path) { match maxminddb::Reader::open_readfile(&path) {
Ok(reader) => { Ok(reader) => {
tracing::info!("Successfully loaded GeoIP2 database from {}", path); tracing::info!("Successfully loaded GeoIP2 database from {}", path);
Some(reader) return Some(reader);
} }
Err(err) => { Err(err) => {
tracing::debug!("Failed to load GeoIP2 database from {}: {}", path, err); tracing::debug!("Failed to load GeoIP2 database from {}: {}", path, err);
@@ -207,8 +207,10 @@ impl ClientManager {
let region = city.subdivisions.map(|r| { let region = city.subdivisions.map(|r| {
r.iter() r.iter()
.filter_map(|x| x.names.as_ref()) .map(|x| x.names.as_ref())
.filter_map(|x| x.get("zh-CN").or_else(|| x.get("en"))) .flatten()
.map(|x| x.get("zh-CN").or_else(|| x.get("en")))
.flatten()
.map(|x| x.to_string()) .map(|x| x.to_string())
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join(",") .join(",")

View File

@@ -94,10 +94,14 @@ impl SessionRpcService {
return Ok(HeartbeatResponse {}); return Ok(HeartbeatResponse {});
}; };
let machine_id: uuid::Uuid = req.machine_id.map(Into::into).ok_or(anyhow::anyhow!( let machine_id: uuid::Uuid =
"Machine id is not set correctly, expect uuid but got: {:?}",
req.machine_id req.machine_id
))?; .clone()
.map(Into::into)
.ok_or(anyhow::anyhow!(
"Machine id is not set correctly, expect uuid but got: {:?}",
req.machine_id
))?;
let user_id = storage let user_id = storage
.db() .db()
@@ -117,7 +121,7 @@ impl SessionRpcService {
if data.req.replace(req.clone()).is_none() { if data.req.replace(req.clone()).is_none() {
assert!(data.storage_token.is_none()); assert!(data.storage_token.is_none());
data.storage_token = Some(StorageToken { data.storage_token = Some(StorageToken {
token: req.user_token.clone(), token: req.user_token.clone().into(),
client_url: data.client_url.clone(), client_url: data.client_url.clone(),
machine_id, machine_id,
user_id, user_id,

View File

@@ -34,7 +34,7 @@ impl TryFrom<WeakRefStorage> for Storage {
type Error = (); type Error = ();
fn try_from(weak: Weak<StorageInner>) -> Result<Self, Self::Error> { fn try_from(weak: Weak<StorageInner>) -> Result<Self, Self::Error> {
weak.upgrade().map(Storage).ok_or(()) weak.upgrade().map(|inner| Storage(inner)).ok_or(())
} }
} }
@@ -51,7 +51,9 @@ impl Storage {
machine_id: &uuid::Uuid, machine_id: &uuid::Uuid,
client_url: &url::Url, client_url: &url::Url,
) { ) {
map.remove_if(machine_id, |_, v| v.storage_token.client_url == *client_url); map.remove_if(&machine_id, |_, v| {
v.storage_token.client_url == *client_url
});
} }
fn update_mid_to_client_info_map( fn update_mid_to_client_info_map(
@@ -72,7 +74,11 @@ impl Storage {
} }
pub fn update_client(&self, stoken: StorageToken, report_time: i64) { pub fn update_client(&self, stoken: StorageToken, report_time: i64) {
let inner = self.0.user_clients_map.entry(stoken.user_id).or_default(); let inner = self
.0
.user_clients_map
.entry(stoken.user_id)
.or_insert_with(DashMap::new);
let client_info = ClientInfo { let client_info = ClientInfo {
storage_token: stoken.clone(), storage_token: stoken.clone(),

View File

@@ -151,7 +151,7 @@ async fn get_dual_stack_listener(
} else { } else {
None None
}; };
let v4_listener = if local_ipv4().await.is_ok() { let v4_listener = if let Ok(_) = local_ipv4().await {
get_listener_by_url(&format!("{}://0.0.0.0:{}", protocol, port).parse().unwrap()).ok() get_listener_by_url(&format!("{}://0.0.0.0:{}", protocol, port).parse().unwrap()).ok()
} else { } else {
None None

View File

@@ -137,7 +137,7 @@ mod post {
mod get { mod get {
use crate::restful::{ use crate::restful::{
captcha::{ captcha::{
builder::spec::SpecCaptcha, captcha::spec::SpecCaptcha,
extension::{axum_tower_sessions::CaptchaAxumTowerSessionExt as _, CaptchaUtil}, extension::{axum_tower_sessions::CaptchaAxumTowerSessionExt as _, CaptchaUtil},
NewCaptcha as _, NewCaptcha as _,
}, },

View File

@@ -46,22 +46,22 @@ pub(crate) struct Captcha {
/// 验证码文本类型 The character type of the captcha /// 验证码文本类型 The character type of the captcha
pub enum CaptchaType { pub enum CaptchaType {
/// 字母数字混合 /// 字母数字混合
Default = 1, TypeDefault = 1,
/// 纯数字 /// 纯数字
OnlyNumber, TypeOnlyNumber,
/// 纯字母 /// 纯字母
OnlyChar, TypeOnlyChar,
/// 纯大写字母 /// 纯大写字母
OnlyUpper, TypeOnlyUpper,
/// 纯小写字母 /// 纯小写字母
OnlyLower, TypeOnlyLower,
/// 数字大写字母 /// 数字大写字母
NumAndUpper, TypeNumAndUpper,
} }
/// 内置字体 Fonts shipped with the library /// 内置字体 Fonts shipped with the library
@@ -92,29 +92,29 @@ impl Captcha {
/// 生成随机验证码 /// 生成随机验证码
pub fn alphas(&mut self) -> Vec<char> { pub fn alphas(&mut self) -> Vec<char> {
let mut cs = vec!['\0'; self.len]; let mut cs = vec!['\0'; self.len];
for cs_i in cs.iter_mut() { for i in 0..self.len {
match self.char_type { match self.char_type {
CaptchaType::Default => *cs_i = self.randoms.alpha(), CaptchaType::TypeDefault => cs[i] = self.randoms.alpha(),
CaptchaType::OnlyNumber => { CaptchaType::TypeOnlyNumber => {
*cs_i = self.randoms.alpha_under(self.randoms.num_max_index) cs[i] = self.randoms.alpha_under(self.randoms.num_max_index)
} }
CaptchaType::OnlyChar => { CaptchaType::TypeOnlyChar => {
*cs_i = self cs[i] = self
.randoms .randoms
.alpha_between(self.randoms.char_min_index, self.randoms.char_max_index) .alpha_between(self.randoms.char_min_index, self.randoms.char_max_index)
} }
CaptchaType::OnlyUpper => { CaptchaType::TypeOnlyUpper => {
*cs_i = self cs[i] = self
.randoms .randoms
.alpha_between(self.randoms.upper_min_index, self.randoms.upper_max_index) .alpha_between(self.randoms.upper_min_index, self.randoms.upper_max_index)
} }
CaptchaType::OnlyLower => { CaptchaType::TypeOnlyLower => {
*cs_i = self cs[i] = self
.randoms .randoms
.alpha_between(self.randoms.lower_min_index, self.randoms.lower_max_index) .alpha_between(self.randoms.lower_min_index, self.randoms.lower_max_index)
} }
CaptchaType::NumAndUpper => { CaptchaType::TypeNumAndUpper => {
*cs_i = self.randoms.alpha_under(self.randoms.upper_max_index) cs[i] = self.randoms.alpha_under(self.randoms.upper_max_index)
} }
} }
} }
@@ -142,7 +142,7 @@ impl Captcha {
} }
} }
pub fn get_font(&'_ mut self) -> Arc<Font<'_>> { pub fn get_font(&mut self) -> Arc<Font> {
if let Some(font) = font::get_font(&self.font_name) { if let Some(font) = font::get_font(&self.font_name) {
font font
} else { } else {
@@ -185,7 +185,6 @@ where
/// 特别地/In particular: /// 特别地/In particular:
/// ///
/// - 对算术验证码[ArithmeticCaptcha](crate::captcha::arithmetic::ArithmeticCaptcha)而言,这里的`len`是验证码中数字的数量。 /// - 对算术验证码[ArithmeticCaptcha](crate::captcha::arithmetic::ArithmeticCaptcha)而言,这里的`len`是验证码中数字的数量。
///
/// For [ArithmeticCaptcha](crate::captcha::arithmetic::ArithmeticCaptcha), the `len` presents the count of the digits /// For [ArithmeticCaptcha](crate::captcha::arithmetic::ArithmeticCaptcha), the `len` presents the count of the digits
/// in the Captcha. /// in the Captcha.
fn with_size_and_len(width: i32, height: i32, len: usize) -> Self; fn with_size_and_len(width: i32, height: i32, len: usize) -> Self;
@@ -227,7 +226,7 @@ impl NewCaptcha for Captcha {
let len = 5; let len = 5;
let width = 130; let width = 130;
let height = 48; let height = 48;
let char_type = CaptchaType::Default; let char_type = CaptchaType::TypeDefault;
let chars = None; let chars = None;
Self { Self {

View File

@@ -1,4 +1,6 @@
use rand::random;
use rand::{random};
/// 随机数工具类 /// 随机数工具类
pub(crate) struct Randoms { pub(crate) struct Randoms {

View File

@@ -10,7 +10,7 @@ use axum::response::Response;
use std::fmt::Debug; use std::fmt::Debug;
use tower_sessions::Session; use tower_sessions::Session;
const CAPTCHA_KEY: &str = "ez-captcha"; const CAPTCHA_KEY: &'static str = "ez-captcha";
/// Axum & Tower_Sessions /// Axum & Tower_Sessions
#[async_trait] #[async_trait]
@@ -32,7 +32,7 @@ pub trait CaptchaAxumTowerSessionStaticExt {
/// Verify the Captcha code, and return whether user's code is correct. /// Verify the Captcha code, and return whether user's code is correct.
async fn ver(code: &str, session: &Session) -> bool { async fn ver(code: &str, session: &Session) -> bool {
match session.get::<String>(CAPTCHA_KEY).await { match session.get::<String>(CAPTCHA_KEY).await {
Ok(Some(ans)) => ans.eq_ignore_ascii_case(code), Ok(Some(ans)) => ans.to_ascii_lowercase() == code.to_ascii_lowercase(),
_ => false, _ => false,
} }
} }

View File

@@ -1,7 +1,7 @@
pub mod axum_tower_sessions; pub mod axum_tower_sessions;
use super::base::captcha::AbstractCaptcha; use super::base::captcha::AbstractCaptcha;
use super::builder::spec::SpecCaptcha; use super::captcha::spec::SpecCaptcha;
use super::{CaptchaFont, NewCaptcha}; use super::{CaptchaFont, NewCaptcha};
/// 验证码工具类 - Captcha Utils /// 验证码工具类 - Captcha Utils

View File

@@ -117,7 +117,7 @@
#![allow(dead_code)] #![allow(dead_code)]
pub(crate) mod base; pub(crate) mod base;
pub mod builder; pub mod captcha;
pub mod extension; pub mod extension;
mod utils; mod utils;

View File

@@ -32,24 +32,21 @@ impl From<(u8, u8, u8)> for Color {
} }
} }
impl From<Color> for (u8, u8, u8, u8) { impl Into<(u8, u8, u8, u8)> for Color {
fn from(val: Color) -> Self { fn into(self) -> (u8, u8, u8, u8) {
( (
(val.0 * 255.0) as u8, (self.0 * 255.0) as u8,
(val.1 * 255.0) as u8, (self.1 * 255.0) as u8,
(val.2 * 255.0) as u8, (self.2 * 255.0) as u8,
(val.3 * 255.0) as u8, (self.3 * 255.0) as u8,
) )
} }
} }
impl From<Color> for u32 { impl Into<u32> for Color {
fn from(val: Color) -> Self { fn into(self) -> u32 {
let color: (u8, u8, u8, u8) = val.into(); let color: (u8, u8, u8, u8) = self.into();
(color.0 as u32) (color.0 as u32) << 24 + (color.1 as u32) << 16 + (color.2 as u32) << 8 + (color.3 as u32)
<< (24 + (color.1 as u32))
<< (16 + (color.2 as u32))
<< (8 + (color.3 as u32))
} }
} }

View File

@@ -11,7 +11,7 @@ struct FontAssets;
// pub(crate) static ref FONTS: RwLock<HashMap<String, Arc<Font>>> = Default::default(); // pub(crate) static ref FONTS: RwLock<HashMap<String, Arc<Font>>> = Default::default();
// } // }
pub fn get_font(font_name: &'_ str) -> Option<Arc<Font<'_>>> { pub fn get_font(font_name: &str) -> Option<Arc<Font>> {
// let fonts_cell = FONTS.get_or_init(|| Default::default()); // let fonts_cell = FONTS.get_or_init(|| Default::default());
// let guard = fonts_cell.read(); // let guard = fonts_cell.read();
// //
@@ -31,7 +31,7 @@ pub fn get_font(font_name: &'_ str) -> Option<Arc<Font<'_>>> {
// } // }
} }
pub fn load_font(font_name: &'_ str) -> Result<Option<Font<'_>>, Box<dyn Error>> { pub fn load_font(font_name: &str) -> Result<Option<Font>, Box<dyn Error>> {
match FontAssets::get(font_name) { match FontAssets::get(font_name) {
Some(assets) => { Some(assets) => {
let font = Font::try_from_vec(Vec::from(assets.data)).unwrap(); let font = Font::try_from_vec(Vec::from(assets.data)).unwrap();

View File

@@ -143,7 +143,7 @@ impl RestfulServer {
return Err((StatusCode::UNAUTHORIZED, other_error("No such user").into())); return Err((StatusCode::UNAUTHORIZED, other_error("No such user").into()));
}; };
let machines = client_mgr.list_machine_by_user_id(user.id()).await; let machines = client_mgr.list_machine_by_user_id(user.id().clone()).await;
Ok(GetSummaryJsonResp { Ok(GetSummaryJsonResp {
device_count: machines.len() as u32, device_count: machines.len() as u32,

View File

@@ -8,7 +8,7 @@ use axum_login::AuthUser;
use easytier::launcher::NetworkConfig; use easytier::launcher::NetworkConfig;
use easytier::proto::common::Void; use easytier::proto::common::Void;
use easytier::proto::rpc_types::controller::BaseController; use easytier::proto::rpc_types::controller::BaseController;
use easytier::proto::{self, web::*}; use easytier::proto::web::*;
use crate::client_manager::session::{Location, Session}; use crate::client_manager::session::{Location, Session};
use crate::client_manager::ClientManager; use crate::client_manager::ClientManager;
@@ -85,7 +85,7 @@ impl NetworkApi {
let Some(user_id) = auth_session.user.as_ref().map(|x| x.id()) else { let Some(user_id) = auth_session.user.as_ref().map(|x| x.id()) else {
return Err(( return Err((
StatusCode::UNAUTHORIZED, StatusCode::UNAUTHORIZED,
other_error("No user id found".to_string()).into(), other_error(format!("No user id found")).into(),
)); ));
}; };
Ok(user_id) Ok(user_id)
@@ -108,7 +108,7 @@ impl NetworkApi {
let Some(token) = result.get_token().await else { let Some(token) = result.get_token().await else {
return Err(( return Err((
StatusCode::UNAUTHORIZED, StatusCode::UNAUTHORIZED,
other_error("No token reported".to_string()).into(), other_error(format!("No token reported")).into(),
)); ));
}; };
@@ -120,7 +120,7 @@ impl NetworkApi {
{ {
return Err(( return Err((
StatusCode::FORBIDDEN, StatusCode::FORBIDDEN,
other_error("Token mismatch".to_string()).into(), other_error(format!("Token mismatch")).into(),
)); ));
} }
@@ -177,7 +177,7 @@ impl NetworkApi {
.insert_or_update_user_network_config( .insert_or_update_user_network_config(
auth_session.user.as_ref().unwrap().id(), auth_session.user.as_ref().unwrap().id(),
machine_id, machine_id,
resp.inst_id.unwrap_or_default().into(), resp.inst_id.clone().unwrap_or_default().into(),
serde_json::to_string(&config).unwrap(), serde_json::to_string(&config).unwrap(),
) )
.await .await
@@ -248,7 +248,7 @@ impl NetworkApi {
.await .await
.map_err(convert_rpc_error)?; .map_err(convert_rpc_error)?;
let running_inst_ids = ret.inst_ids.clone().into_iter().collect(); let running_inst_ids = ret.inst_ids.clone().into_iter().map(Into::into).collect();
// collect networks that are disabled // collect networks that are disabled
let disabled_inst_ids = client_mgr let disabled_inst_ids = client_mgr
@@ -261,7 +261,7 @@ impl NetworkApi {
.await .await
.map_err(convert_db_error)? .map_err(convert_db_error)?
.iter() .iter()
.map(|x| Into::<proto::common::Uuid>::into(x.network_instance_id.clone())) .filter_map(|x| x.network_instance_id.clone().try_into().ok())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
Ok(ListNetworkInstanceIdsJsonResp { Ok(ListNetworkInstanceIdsJsonResp {
@@ -330,8 +330,9 @@ impl NetworkApi {
// not implement disable all // not implement disable all
return Err(( return Err((
StatusCode::NOT_IMPLEMENTED, StatusCode::NOT_IMPLEMENTED,
other_error("Not implemented".to_string()).into(), other_error(format!("Not implemented")).into(),
)); ))
.into();
}; };
let sess = Self::get_session_by_machine_id(&auth_session, &client_mgr, &machine_id).await?; let sess = Self::get_session_by_machine_id(&auth_session, &client_mgr, &machine_id).await?;

View File

@@ -76,32 +76,32 @@ impl Backend {
pub async fn register_new_user(&self, new_user: &RegisterNewUser) -> anyhow::Result<()> { pub async fn register_new_user(&self, new_user: &RegisterNewUser) -> anyhow::Result<()> {
let hashed_password = password_auth::generate_hash(new_user.credentials.password.as_str()); let hashed_password = password_auth::generate_hash(new_user.credentials.password.as_str());
let txn = self.db.orm_db().begin().await?; let mut txn = self.db.orm_db().begin().await?;
entity::users::ActiveModel { entity::users::ActiveModel {
username: Set(new_user.credentials.username.clone()), username: Set(new_user.credentials.username.clone()),
password: Set(hashed_password.clone()), password: Set(hashed_password.clone()),
..Default::default() ..Default::default()
} }
.save(&txn) .save(&mut txn)
.await?; .await?;
entity::users_groups::ActiveModel { entity::users_groups::ActiveModel {
user_id: Set(entity::users::Entity::find() user_id: Set(entity::users::Entity::find()
.filter(entity::users::Column::Username.eq(new_user.credentials.username.as_str())) .filter(entity::users::Column::Username.eq(new_user.credentials.username.as_str()))
.one(&txn) .one(&mut txn)
.await? .await?
.unwrap() .unwrap()
.id), .id),
group_id: Set(entity::groups::Entity::find() group_id: Set(entity::groups::Entity::find()
.filter(entity::groups::Column::Name.eq("users")) .filter(entity::groups::Column::Name.eq("users"))
.one(&txn) .one(&mut txn)
.await? .await?
.unwrap() .unwrap()
.id), .id),
..Default::default() ..Default::default()
} }
.save(&txn) .save(&mut txn)
.await?; .await?;
txn.commit().await?; txn.commit().await?;

View File

@@ -52,7 +52,9 @@ pub fn build_router(api_host: Option<url::Url>) -> Router {
router router
}; };
router.fallback_service(service) let router = router.fallback_service(service);
router
} }
pub struct WebServer { pub struct WebServer {

View File

@@ -3,12 +3,12 @@ name = "easytier"
description = "A full meshed p2p VPN, connecting all your devices in one network with one command." description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
homepage = "https://github.com/EasyTier/EasyTier" homepage = "https://github.com/EasyTier/EasyTier"
repository = "https://github.com/EasyTier/EasyTier" repository = "https://github.com/EasyTier/EasyTier"
version = "2.4.2" version = "2.4.0"
edition = "2021" edition = "2021"
authors = ["kkrainbow"] authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"] keywords = ["vpn", "p2p", "network", "easytier"]
categories = ["network-programming", "command-line-utilities"] categories = ["network-programming", "command-line-utilities"]
rust-version = "1.89.0" rust-version = "1.87.0"
license-file = "LICENSE" license-file = "LICENSE"
readme = "README.md" readme = "README.md"
@@ -115,7 +115,7 @@ byteorder = "1.5.0"
# for proxy # for proxy
cidr = { version = "0.2.2", features = ["serde"] } cidr = { version = "0.2.2", features = ["serde"] }
socket2 = { version = "0.5.10", features = ["all"] } socket2 = "0.5.5"
# for hole punching # for hole punching
stun_codec = "0.3.4" stun_codec = "0.3.4"
@@ -150,7 +150,6 @@ boringtun = { package = "boringtun-easytier", version = "0.6.1", optional = true
ring = { version = "0.17", optional = true } ring = { version = "0.17", optional = true }
bitflags = "2.5" bitflags = "2.5"
aes-gcm = { version = "0.10.3", optional = true } aes-gcm = { version = "0.10.3", optional = true }
openssl = { version = "0.10", optional = true, features = ["vendored"] }
# for cli # for cli
tabled = "0.16" tabled = "0.16"
@@ -250,9 +249,7 @@ windows-sys = { version = "0.52", features = [
winapi = { version = "0.3.9", features = ["impl-default"] } winapi = { version = "0.3.9", features = ["impl-default"] }
[target.'cfg(not(windows))'.dependencies] [target.'cfg(not(windows))'.dependencies]
jemallocator = { package = "tikv-jemallocator", version = "0.6.0", optional = true, features = [ jemallocator = { package = "tikv-jemallocator", version = "0.6.0", optional = true }
"unprefixed_malloc_on_supported_platforms"
] }
jemalloc-ctl = { package = "tikv-jemalloc-ctl", version = "0.6.0", optional = true, features = [ jemalloc-ctl = { package = "tikv-jemalloc-ctl", version = "0.6.0", optional = true, features = [
] } ] }
jemalloc-sys = { package = "tikv-jemalloc-sys", version = "0.6.0", features = [ jemalloc-sys = { package = "tikv-jemalloc-sys", version = "0.6.0", features = [
@@ -299,7 +296,6 @@ full = [
"websocket", "websocket",
"wireguard", "wireguard",
"aes-gcm", "aes-gcm",
"openssl-crypto", # need openssl-dev libs
"smoltcp", "smoltcp",
"tun", "tun",
"socks5", "socks5",
@@ -308,7 +304,6 @@ wireguard = ["dep:boringtun", "dep:ring"]
quic = ["dep:quinn", "dep:rustls", "dep:rcgen"] quic = ["dep:quinn", "dep:rustls", "dep:rcgen"]
mimalloc = ["dep:mimalloc"] mimalloc = ["dep:mimalloc"]
aes-gcm = ["dep:aes-gcm"] aes-gcm = ["dep:aes-gcm"]
openssl-crypto = ["dep:openssl"]
tun = ["dep:tun"] tun = ["dep:tun"]
websocket = [ websocket = [
"dep:tokio-websockets", "dep:tokio-websockets",

View File

@@ -116,7 +116,7 @@ fn check_locale() {
if let Ok(globs) = globwalk::glob(locale_path) { if let Ok(globs) = globwalk::glob(locale_path) {
for entry in globs { for entry in globs {
if let Err(e) = entry { if let Err(e) = entry {
println!("cargo:i18n-error={e}"); println!("cargo:i18n-error={}", e);
continue; continue;
} }
@@ -151,7 +151,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
]; ];
for proto_file in proto_files.iter().chain(proto_files_reflect.iter()) { for proto_file in proto_files.iter().chain(proto_files_reflect.iter()) {
println!("cargo:rerun-if-changed={proto_file}"); println!("cargo:rerun-if-changed={}", proto_file);
} }
let mut config = prost_build::Config::new(); let mut config = prost_build::Config::new();
@@ -173,7 +173,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
.field_attribute(".web.NetworkConfig", "#[serde(default)]") .field_attribute(".web.NetworkConfig", "#[serde(default)]")
.service_generator(Box::new(rpc_build::ServiceGenerator::new())) .service_generator(Box::new(rpc_build::ServiceGenerator::new()))
.btree_map(["."]) .btree_map(["."])
.skip_debug([".common.Ipv4Addr", ".common.Ipv6Addr", ".common.UUID"]); .skip_debug(&[".common.Ipv4Addr", ".common.Ipv6Addr", ".common.UUID"]);
config.compile_protos(&proto_files, &["src/proto/"])?; config.compile_protos(&proto_files, &["src/proto/"])?;

View File

@@ -95,9 +95,6 @@ core_clap:
disable_encryption: disable_encryption:
en: "disable encryption for peers communication, default is false, must be same with peers" en: "disable encryption for peers communication, default is false, must be same with peers"
zh-CN: "禁用对等节点通信的加密默认为false必须与对等节点相同" zh-CN: "禁用对等节点通信的加密默认为false必须与对等节点相同"
encryption_algorithm:
en: "encryption algorithm to use, supported: '', 'xor', 'chacha20', 'aes-gcm', 'aes-gcm-256', 'openssl-aes128-gcm', 'openssl-aes256-gcm', 'openssl-chacha20'. Empty string means default (aes-gcm)"
zh-CN: "要使用的加密算法,支持:''默认aes-gcm、'xor'、'chacha20'、'aes-gcm'、'aes-gcm-256'、'openssl-aes128-gcm'、'openssl-aes256-gcm'、'openssl-chacha20'"
multi_thread: multi_thread:
en: "use multi-thread runtime, default is single-thread" en: "use multi-thread runtime, default is single-thread"
zh-CN: "使用多线程运行时,默认为单线程" zh-CN: "使用多线程运行时,默认为单线程"
@@ -193,18 +190,6 @@ core_clap:
foreign_relay_bps_limit: foreign_relay_bps_limit:
en: "the maximum bps limit for foreign network relay, default is no limit. unit: BPS (bytes per second)" en: "the maximum bps limit for foreign network relay, default is no limit. unit: BPS (bytes per second)"
zh-CN: "作为共享节点时,限制非本地网络的流量转发速率,默认无限制,单位 BPS (字节每秒)" zh-CN: "作为共享节点时,限制非本地网络的流量转发速率,默认无限制,单位 BPS (字节每秒)"
tcp_whitelist:
en: "tcp port whitelist. Supports single ports (80) and ranges (8000-9000)"
zh-CN: "TCP 端口白名单。支持单个端口80和范围8000-9000"
udp_whitelist:
en: "udp port whitelist. Supports single ports (53) and ranges (5000-6000)"
zh-CN: "UDP 端口白名单。支持单个端口53和范围5000-6000"
disable_relay_kcp:
en: "if true, disable relay kcp packets. avoid consuming too many bandwidth. default is false"
zh-CN: "如果为true则禁止节点转发 KCP 数据包防止过度消耗流量。默认值为false"
enable_relay_foreign_network_kcp:
en: "if true, allow relay kcp packets from foreign network. default is false (not forward foreign network kcp packets)"
zh-CN: "如果为true则作为共享节点时也可以转发其他网络的 KCP 数据包。默认值为false不转发"
core_app: core_app:
panic_backtrace_save: panic_backtrace_save:

View File

@@ -6,9 +6,8 @@ use std::{
time::{Duration, SystemTime, UNIX_EPOCH}, time::{Duration, SystemTime, UNIX_EPOCH},
}; };
use crate::common::{config::ConfigLoader, global_ctx::ArcGlobalCtx, token_bucket::TokenBucket}; use crate::common::token_bucket::TokenBucket;
use crate::proto::acl::*; use crate::proto::acl::*;
use anyhow::Context as _;
use dashmap::DashMap; use dashmap::DashMap;
use tokio::task::JoinSet; use tokio::task::JoinSet;
@@ -178,12 +177,6 @@ impl AclLogContext {
} }
} }
pub type SharedState = (
Arc<DashMap<String, ConnTrackEntry>>,
Arc<DashMap<RateLimitKey, Arc<TokenBucket>>>,
Arc<DashMap<AclStatKey, u64>>,
);
// High-performance ACL processor - No more internal locks! // High-performance ACL processor - No more internal locks!
pub struct AclProcessor { pub struct AclProcessor {
// Immutable rule vectors - no locks needed since they're never modified after creation // Immutable rule vectors - no locks needed since they're never modified after creation
@@ -327,7 +320,7 @@ impl AclProcessor {
.rules .rules
.iter() .iter()
.filter(|rule| rule.enabled) .filter(|rule| rule.enabled)
.map(Self::convert_to_fast_lookup_rule) .map(|rule| Self::convert_to_fast_lookup_rule(rule))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// Sort by priority (higher priority first) // Sort by priority (higher priority first)
@@ -428,7 +421,7 @@ impl AclProcessor {
self.inc_cache_entry_stats(cache_entry, packet_info); self.inc_cache_entry_stats(cache_entry, packet_info);
cache_entry.acl_result.clone().unwrap() return cache_entry.acl_result.clone().unwrap();
} }
fn inc_cache_entry_stats(&self, cache_entry: &AclCacheEntry, packet_info: &PacketInfo) { fn inc_cache_entry_stats(&self, cache_entry: &AclCacheEntry, packet_info: &PacketInfo) {
@@ -477,7 +470,6 @@ impl AclProcessor {
let rules = match chain_type { let rules = match chain_type {
ChainType::Inbound => &self.inbound_rules, ChainType::Inbound => &self.inbound_rules,
ChainType::Outbound => &self.outbound_rules, ChainType::Outbound => &self.outbound_rules,
ChainType::Forward => &self.forward_rules,
_ => { _ => {
return AclResult { return AclResult {
action: Action::Drop, action: Action::Drop,
@@ -545,7 +537,7 @@ impl AclProcessor {
cache_entry.rule_stats_vec.push(rule.rule_stats.clone()); cache_entry.rule_stats_vec.push(rule.rule_stats.clone());
cache_entry.matched_rule = RuleId::Priority(rule.priority); cache_entry.matched_rule = RuleId::Priority(rule.priority);
cache_entry.acl_result = Some(AclResult { cache_entry.acl_result = Some(AclResult {
action: rule.action, action: rule.action.clone(),
matched_rule: Some(RuleId::Priority(rule.priority)), matched_rule: Some(RuleId::Priority(rule.priority)),
should_log: false, should_log: false,
log_context: Some(AclLogContext::RuleMatch { log_context: Some(AclLogContext::RuleMatch {
@@ -601,7 +593,13 @@ impl AclProcessor {
} }
/// Get shared state for preserving across hot reloads /// Get shared state for preserving across hot reloads
pub fn get_shared_state(&self) -> SharedState { pub fn get_shared_state(
&self,
) -> (
Arc<DashMap<String, ConnTrackEntry>>,
Arc<DashMap<RateLimitKey, Arc<TokenBucket>>>,
Arc<DashMap<AclStatKey, u64>>,
) {
( (
self.conn_track.clone(), self.conn_track.clone(),
self.rate_limiters.clone(), self.rate_limiters.clone(),
@@ -698,9 +696,9 @@ impl AclProcessor {
} }
/// Check connection state for stateful rules /// Check connection state for stateful rules
fn check_connection_state(&self, conn_track_key: &str, packet_info: &PacketInfo) { fn check_connection_state(&self, conn_track_key: &String, packet_info: &PacketInfo) {
self.conn_track self.conn_track
.entry(conn_track_key.to_string()) .entry(conn_track_key.clone())
.and_modify(|x| { .and_modify(|x| {
x.last_seen = SystemTime::now() x.last_seen = SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)
@@ -764,13 +762,13 @@ impl AclProcessor {
let src_ip_ranges = rule let src_ip_ranges = rule
.source_ips .source_ips
.iter() .iter()
.filter_map(|x| Self::convert_ip_inet_to_cidr(x.as_str())) .filter_map(|ip_inet| Self::convert_ip_inet_to_cidr(ip_inet))
.collect(); .collect();
let dst_ip_ranges = rule let dst_ip_ranges = rule
.destination_ips .destination_ips
.iter() .iter()
.filter_map(|x| Self::convert_ip_inet_to_cidr(x.as_str())) .filter_map(|ip_inet| Self::convert_ip_inet_to_cidr(ip_inet))
.collect(); .collect();
let src_port_ranges = rule let src_port_ranges = rule
@@ -820,8 +818,8 @@ impl AclProcessor {
} }
/// Convert IpInet to CIDR for fast lookup /// Convert IpInet to CIDR for fast lookup
fn convert_ip_inet_to_cidr(input: &str) -> Option<cidr::IpCidr> { fn convert_ip_inet_to_cidr(input: &String) -> Option<cidr::IpCidr> {
cidr::IpCidr::from_str(input).ok() cidr::IpCidr::from_str(input.as_str()).ok()
} }
/// Increment statistics counter /// Increment statistics counter
@@ -898,13 +896,17 @@ impl AclProcessor {
} }
// 新增辅助函数 // 新增辅助函数
fn parse_port_start(port_strs: &[String]) -> Option<u16> { fn parse_port_start(
port_strs: &::prost::alloc::vec::Vec<::prost::alloc::string::String>,
) -> Option<u16> {
port_strs port_strs
.iter() .iter()
.filter_map(|s| parse_port_range(s).map(|(start, _)| start)) .filter_map(|s| parse_port_range(s).map(|(start, _)| start))
.min() .min()
} }
fn parse_port_end(port_strs: &[String]) -> Option<u16> { fn parse_port_end(
port_strs: &::prost::alloc::vec::Vec<::prost::alloc::string::String>,
) -> Option<u16> {
port_strs port_strs
.iter() .iter()
.filter_map(|s| parse_port_range(s).map(|(_, end)| end)) .filter_map(|s| parse_port_range(s).map(|(_, end)| end))
@@ -990,146 +992,6 @@ impl AclStatKey {
} }
} }
pub struct AclRuleBuilder {
pub acl: Option<Acl>,
pub tcp_whitelist: Vec<String>,
pub udp_whitelist: Vec<String>,
pub whitelist_priority: Option<u32>,
}
impl AclRuleBuilder {
fn parse_port_list(port_list: &[String]) -> anyhow::Result<Vec<String>> {
let mut ports = Vec::new();
for port_spec in port_list {
if port_spec.contains('-') {
// Handle port range like "8000-9000"
let parts: Vec<&str> = port_spec.split('-').collect();
if parts.len() != 2 {
return Err(anyhow::anyhow!("Invalid port range format: {}", port_spec));
}
let start: u16 = parts[0]
.parse()
.with_context(|| format!("Invalid start port in range: {}", port_spec))?;
let end: u16 = parts[1]
.parse()
.with_context(|| format!("Invalid end port in range: {}", port_spec))?;
if start > end {
return Err(anyhow::anyhow!(
"Start port must be <= end port in range: {}",
port_spec
));
}
// acl can handle port range
ports.push(port_spec.clone());
} else {
// Handle single port
let port: u16 = port_spec
.parse()
.with_context(|| format!("Invalid port number: {}", port_spec))?;
ports.push(port.to_string());
}
}
Ok(ports)
}
fn generate_acl_from_whitelists(&mut self) -> anyhow::Result<()> {
if self.tcp_whitelist.is_empty() && self.udp_whitelist.is_empty() {
return Ok(());
}
// Create inbound chain for whitelist rules
let mut inbound_chain = Chain {
name: "inbound_whitelist".to_string(),
chain_type: ChainType::Inbound as i32,
description: "Auto-generated inbound whitelist from CLI".to_string(),
enabled: true,
rules: vec![],
default_action: Action::Drop as i32, // Default deny
};
let mut rule_priority = self.whitelist_priority.unwrap_or(1000u32);
// Add TCP whitelist rules
if !self.tcp_whitelist.is_empty() {
let tcp_ports = Self::parse_port_list(&self.tcp_whitelist)?;
let tcp_rule = Rule {
name: "tcp_whitelist".to_string(),
description: "Auto-generated TCP whitelist rule".to_string(),
priority: rule_priority,
enabled: true,
protocol: Protocol::Tcp as i32,
ports: tcp_ports,
source_ips: vec![],
destination_ips: vec![],
source_ports: vec![],
action: Action::Allow as i32,
rate_limit: 0,
burst_limit: 0,
stateful: true,
};
inbound_chain.rules.push(tcp_rule);
rule_priority -= 1;
}
// Add UDP whitelist rules
if !self.udp_whitelist.is_empty() {
let udp_ports = Self::parse_port_list(&self.udp_whitelist)?;
let udp_rule = Rule {
name: "udp_whitelist".to_string(),
description: "Auto-generated UDP whitelist rule".to_string(),
priority: rule_priority,
enabled: true,
protocol: Protocol::Udp as i32,
ports: udp_ports,
source_ips: vec![],
destination_ips: vec![],
source_ports: vec![],
action: Action::Allow as i32,
rate_limit: 0,
burst_limit: 0,
stateful: false,
};
inbound_chain.rules.push(udp_rule);
}
if self.acl.is_none() {
self.acl = Some(Acl::default());
}
let acl = self.acl.as_mut().unwrap();
if let Some(ref mut acl_v1) = acl.acl_v1 {
acl_v1.chains.push(inbound_chain);
} else {
acl.acl_v1 = Some(AclV1 {
chains: vec![inbound_chain],
});
}
Ok(())
}
fn do_build(mut self) -> anyhow::Result<Option<Acl>> {
self.generate_acl_from_whitelists()?;
Ok(self.acl.clone())
}
pub fn build(global_ctx: &ArcGlobalCtx) -> anyhow::Result<Option<Acl>> {
let builder = AclRuleBuilder {
acl: global_ctx.config.get_acl(),
tcp_whitelist: global_ctx.config.get_tcp_whitelist(),
udp_whitelist: global_ctx.config.get_udp_whitelist(),
whitelist_priority: None,
};
builder.do_build()
}
}
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
pub enum AclStatType { pub enum AclStatType {
Total, Total,
@@ -1150,22 +1012,18 @@ mod tests {
let mut acl_v1 = AclV1::default(); let mut acl_v1 = AclV1::default();
// Create inbound chain // Create inbound chain
let mut chain = Chain { let mut chain = Chain::default();
name: "test_inbound".to_string(), chain.name = "test_inbound".to_string();
chain_type: ChainType::Inbound as i32, chain.chain_type = ChainType::Inbound as i32;
enabled: true, chain.enabled = true;
..Default::default()
};
// Allow all rule // Allow all rule
let rule = Rule { let mut rule = Rule::default();
name: "allow_all".to_string(), rule.name = "allow_all".to_string();
priority: 100, rule.priority = 100;
enabled: true, rule.enabled = true;
action: Action::Allow as i32, rule.action = Action::Allow as i32;
protocol: Protocol::Any as i32, rule.protocol = Protocol::Any as i32;
..Default::default()
};
chain.rules.push(rule); chain.rules.push(rule);
acl_v1.chains.push(chain); acl_v1.chains.push(chain);
@@ -1278,14 +1136,12 @@ mod tests {
// 创建新配置(模拟热加载) // 创建新配置(模拟热加载)
let mut new_config = create_test_acl_config(); let mut new_config = create_test_acl_config();
if let Some(ref mut acl_v1) = new_config.acl_v1 { if let Some(ref mut acl_v1) = new_config.acl_v1 {
let drop_rule = Rule { let mut drop_rule = Rule::default();
name: "drop_all".to_string(), drop_rule.name = "drop_all".to_string();
priority: 200, drop_rule.priority = 200;
enabled: true, drop_rule.enabled = true;
action: Action::Drop as i32, drop_rule.action = Action::Drop as i32;
protocol: Protocol::Any as i32, drop_rule.protocol = Protocol::Any as i32;
..Default::default()
};
acl_v1.chains[0].rules.push(drop_rule); acl_v1.chains[0].rules.push(drop_rule);
} }
@@ -1323,48 +1179,40 @@ mod tests {
let mut acl_config = Acl::default(); let mut acl_config = Acl::default();
let mut acl_v1 = AclV1::default(); let mut acl_v1 = AclV1::default();
let mut chain = Chain { let mut chain = Chain::default();
name: "performance_test".to_string(), chain.name = "performance_test".to_string();
chain_type: ChainType::Inbound as i32, chain.chain_type = ChainType::Inbound as i32;
enabled: true, chain.enabled = true;
..Default::default()
};
// 1. High-priority simple rule for UDP (can be cached efficiently) // 1. High-priority simple rule for UDP (can be cached efficiently)
let simple_rule = Rule { let mut simple_rule = Rule::default();
name: "simple_udp".to_string(), simple_rule.name = "simple_udp".to_string();
priority: 300, simple_rule.priority = 300;
enabled: true, simple_rule.enabled = true;
action: Action::Allow as i32, simple_rule.action = Action::Allow as i32;
protocol: Protocol::Udp as i32, simple_rule.protocol = Protocol::Udp as i32;
..Default::default()
};
// No stateful or rate limit - can benefit from full cache optimization // No stateful or rate limit - can benefit from full cache optimization
chain.rules.push(simple_rule); chain.rules.push(simple_rule);
// 2. Medium-priority stateful + rate-limited rule for TCP (security critical) // 2. Medium-priority stateful + rate-limited rule for TCP (security critical)
let security_rule = Rule { let mut security_rule = Rule::default();
name: "security_tcp".to_string(), security_rule.name = "security_tcp".to_string();
priority: 200, security_rule.priority = 200;
enabled: true, security_rule.enabled = true;
action: Action::Allow as i32, security_rule.action = Action::Allow as i32;
protocol: Protocol::Tcp as i32, security_rule.protocol = Protocol::Tcp as i32;
stateful: true, security_rule.stateful = true;
rate_limit: 100, security_rule.rate_limit = 100; // 100 packets/sec
burst_limit: 200, security_rule.burst_limit = 200;
..Default::default()
};
chain.rules.push(security_rule); chain.rules.push(security_rule);
// 3. Low-priority default allow rule for Any // 3. Low-priority default allow rule for Any
let default_rule = Rule { let mut default_rule = Rule::default();
name: "default_allow".to_string(), default_rule.name = "default_allow".to_string();
priority: 100, default_rule.priority = 100;
enabled: true, default_rule.enabled = true;
action: Action::Allow as i32, default_rule.action = Action::Allow as i32;
protocol: Protocol::Any as i32, default_rule.protocol = Protocol::Any as i32;
..Default::default()
};
chain.rules.push(default_rule); chain.rules.push(default_rule);
acl_v1.chains.push(chain); acl_v1.chains.push(chain);
@@ -1451,16 +1299,15 @@ mod tests {
// Create a very restrictive rate-limited rule // Create a very restrictive rate-limited rule
if let Some(ref mut acl_v1) = acl_config.acl_v1 { if let Some(ref mut acl_v1) = acl_config.acl_v1 {
let rule = Rule { let mut rule = Rule::default();
name: "strict_rate_limit".to_string(), rule.name = "strict_rate_limit".to_string();
priority: 200, rule.priority = 200;
enabled: true, rule.enabled = true;
action: Action::Allow as i32, rule.action = Action::Allow as i32;
protocol: Protocol::Any as i32, rule.protocol = Protocol::Any as i32;
rate_limit: 1, // Allow only 1 packet per second rule.rate_limit = 1; // Allow only 1 packet per second
burst_limit: 1, // Burst of 1 packet rule.burst_limit = 1; // Burst of 1 packet
..Default::default()
};
acl_v1.chains[0].rules.push(rule); acl_v1.chains[0].rules.push(rule);
} }

View File

@@ -21,12 +21,6 @@ pub trait Compressor {
pub struct DefaultCompressor {} pub struct DefaultCompressor {}
impl Default for DefaultCompressor {
fn default() -> Self {
Self::new()
}
}
impl DefaultCompressor { impl DefaultCompressor {
pub fn new() -> Self { pub fn new() -> Self {
DefaultCompressor {} DefaultCompressor {}
@@ -201,11 +195,11 @@ pub mod tests {
packet, packet,
packet.payload_len() packet.payload_len()
); );
assert!(packet.peer_manager_header().unwrap().is_compressed()); assert_eq!(packet.peer_manager_header().unwrap().is_compressed(), true);
compressor.decompress(&mut packet).await.unwrap(); compressor.decompress(&mut packet).await.unwrap();
assert_eq!(packet.payload(), text); assert_eq!(packet.payload(), text);
assert!(!packet.peer_manager_header().unwrap().is_compressed()); assert_eq!(packet.peer_manager_header().unwrap().is_compressed(), false);
} }
#[tokio::test] #[tokio::test]
@@ -221,10 +215,10 @@ pub mod tests {
.compress(&mut packet, CompressorAlgo::ZstdDefault) .compress(&mut packet, CompressorAlgo::ZstdDefault)
.await .await
.unwrap(); .unwrap();
assert!(!packet.peer_manager_header().unwrap().is_compressed()); assert_eq!(packet.peer_manager_header().unwrap().is_compressed(), false);
compressor.decompress(&mut packet).await.unwrap(); compressor.decompress(&mut packet).await.unwrap();
assert_eq!(packet.payload(), text); assert_eq!(packet.payload(), text);
assert!(!packet.peer_manager_header().unwrap().is_compressed()); assert_eq!(packet.peer_manager_header().unwrap().is_compressed(), false);
} }
} }

View File

@@ -1,8 +1,8 @@
use std::{ use std::{
hash::Hasher, net::{Ipv4Addr, SocketAddr},
net::{IpAddr, SocketAddr},
path::PathBuf, path::PathBuf,
sync::{Arc, Mutex}, sync::{Arc, Mutex},
u64,
}; };
use anyhow::Context; use anyhow::Context;
@@ -40,87 +40,16 @@ pub fn gen_default_flags() -> Flags {
bind_device: true, bind_device: true,
enable_kcp_proxy: false, enable_kcp_proxy: false,
disable_kcp_input: false, disable_kcp_input: false,
disable_relay_kcp: false, disable_relay_kcp: true,
enable_relay_foreign_network_kcp: false,
accept_dns: false, accept_dns: false,
private_mode: false, private_mode: false,
enable_quic_proxy: false, enable_quic_proxy: false,
disable_quic_input: false, disable_quic_input: false,
foreign_relay_bps_limit: u64::MAX, foreign_relay_bps_limit: u64::MAX,
multi_thread_count: 2, multi_thread_count: 2,
encryption_algorithm: "aes-gcm".to_string(),
} }
} }
pub enum EncryptionAlgorithm {
AesGcm,
Aes256Gcm,
Xor,
#[cfg(feature = "wireguard")]
ChaCha20,
#[cfg(feature = "openssl-crypto")]
OpensslAesGcm,
#[cfg(feature = "openssl-crypto")]
OpensslChacha20,
#[cfg(feature = "openssl-crypto")]
OpensslAes256Gcm,
}
impl std::fmt::Display for EncryptionAlgorithm {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::AesGcm => write!(f, "aes-gcm"),
Self::Aes256Gcm => write!(f, "aes-256-gcm"),
Self::Xor => write!(f, "xor"),
#[cfg(feature = "wireguard")]
Self::ChaCha20 => write!(f, "chacha20"),
#[cfg(feature = "openssl-crypto")]
Self::OpensslAesGcm => write!(f, "openssl-aes-gcm"),
#[cfg(feature = "openssl-crypto")]
Self::OpensslChacha20 => write!(f, "openssl-chacha20"),
#[cfg(feature = "openssl-crypto")]
Self::OpensslAes256Gcm => write!(f, "openssl-aes-256-gcm"),
}
}
}
impl TryFrom<&str> for EncryptionAlgorithm {
type Error = anyhow::Error;
fn try_from(value: &str) -> Result<Self, Self::Error> {
match value {
"aes-gcm" => Ok(Self::AesGcm),
"aes-256-gcm" => Ok(Self::Aes256Gcm),
"xor" => Ok(Self::Xor),
#[cfg(feature = "wireguard")]
"chacha20" => Ok(Self::ChaCha20),
#[cfg(feature = "openssl-crypto")]
"openssl-aes-gcm" => Ok(Self::OpensslAesGcm),
#[cfg(feature = "openssl-crypto")]
"openssl-chacha20" => Ok(Self::OpensslChacha20),
#[cfg(feature = "openssl-crypto")]
"openssl-aes-256-gcm" => Ok(Self::OpensslAes256Gcm),
_ => Err(anyhow::anyhow!("invalid encryption algorithm")),
}
}
}
pub fn get_avaliable_encrypt_methods() -> Vec<&'static str> {
let mut r = vec!["aes-gcm", "aes-256-gcm", "xor"];
if cfg!(feature = "wireguard") {
r.push("chacha20");
}
if cfg!(feature = "openssl-crypto") {
r.extend(vec![
"openssl-aes-gcm",
"openssl-chacha20",
"openssl-aes-256-gcm",
]);
}
r
}
#[auto_impl::auto_impl(Box, &)] #[auto_impl::auto_impl(Box, &)]
pub trait ConfigLoader: Send + Sync { pub trait ConfigLoader: Send + Sync {
fn get_id(&self) -> uuid::Uuid; fn get_id(&self) -> uuid::Uuid;
@@ -178,8 +107,8 @@ pub trait ConfigLoader: Send + Sync {
fn get_flags(&self) -> Flags; fn get_flags(&self) -> Flags;
fn set_flags(&self, flags: Flags); fn set_flags(&self, flags: Flags);
fn get_exit_nodes(&self) -> Vec<IpAddr>; fn get_exit_nodes(&self) -> Vec<Ipv4Addr>;
fn set_exit_nodes(&self, nodes: Vec<IpAddr>); fn set_exit_nodes(&self, nodes: Vec<Ipv4Addr>);
fn get_routes(&self) -> Option<Vec<cidr::Ipv4Cidr>>; fn get_routes(&self) -> Option<Vec<cidr::Ipv4Cidr>>;
fn set_routes(&self, routes: Option<Vec<cidr::Ipv4Cidr>>); fn set_routes(&self, routes: Option<Vec<cidr::Ipv4Cidr>>);
@@ -193,12 +122,6 @@ pub trait ConfigLoader: Send + Sync {
fn get_acl(&self) -> Option<Acl>; fn get_acl(&self) -> Option<Acl>;
fn set_acl(&self, acl: Option<Acl>); fn set_acl(&self, acl: Option<Acl>);
fn get_tcp_whitelist(&self) -> Vec<String>;
fn set_tcp_whitelist(&self, whitelist: Vec<String>);
fn get_udp_whitelist(&self) -> Vec<String>;
fn set_udp_whitelist(&self, whitelist: Vec<String>);
fn dump(&self) -> String; fn dump(&self) -> String;
} }
@@ -210,7 +133,7 @@ pub trait LoggingConfigLoader {
pub type NetworkSecretDigest = [u8; 32]; pub type NetworkSecretDigest = [u8; 32];
#[derive(Debug, Clone, Deserialize, Serialize)] #[derive(Debug, Clone, Deserialize, Serialize, Default, Eq, Hash)]
pub struct NetworkIdentity { pub struct NetworkIdentity {
pub network_name: String, pub network_name: String,
pub network_secret: Option<String>, pub network_secret: Option<String>,
@@ -218,53 +141,27 @@ pub struct NetworkIdentity {
pub network_secret_digest: Option<NetworkSecretDigest>, pub network_secret_digest: Option<NetworkSecretDigest>,
} }
#[derive(Eq, PartialEq, Hash)]
struct NetworkIdentityWithOnlyDigest {
network_name: String,
network_secret_digest: Option<NetworkSecretDigest>,
}
impl From<NetworkIdentity> for NetworkIdentityWithOnlyDigest {
fn from(identity: NetworkIdentity) -> Self {
if identity.network_secret_digest.is_some() {
Self {
network_name: identity.network_name,
network_secret_digest: identity.network_secret_digest,
}
} else if identity.network_secret.is_some() {
let mut network_secret_digest = [0u8; 32];
generate_digest_from_str(
&identity.network_name,
identity.network_secret.as_ref().unwrap(),
&mut network_secret_digest,
);
Self {
network_name: identity.network_name,
network_secret_digest: Some(network_secret_digest),
}
} else {
Self {
network_name: identity.network_name,
network_secret_digest: None,
}
}
}
}
impl PartialEq for NetworkIdentity { impl PartialEq for NetworkIdentity {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
let self_with_digest = NetworkIdentityWithOnlyDigest::from(self.clone()); if self.network_name != other.network_name {
let other_with_digest = NetworkIdentityWithOnlyDigest::from(other.clone()); return false;
self_with_digest == other_with_digest }
}
}
impl Eq for NetworkIdentity {} if self.network_secret.is_some()
&& other.network_secret.is_some()
&& self.network_secret != other.network_secret
{
return false;
}
impl std::hash::Hash for NetworkIdentity { if self.network_secret_digest.is_some()
fn hash<H: Hasher>(&self, state: &mut H) { && other.network_secret_digest.is_some()
let self_with_digest = NetworkIdentityWithOnlyDigest::from(self.clone()); && self.network_secret_digest != other.network_secret_digest
self_with_digest.hash(state); {
return false;
}
return true;
} }
} }
@@ -279,10 +176,8 @@ impl NetworkIdentity {
network_secret_digest: Some(network_secret_digest), network_secret_digest: Some(network_secret_digest),
} }
} }
}
impl Default for NetworkIdentity { pub fn default() -> Self {
fn default() -> Self {
Self::new("default".to_string(), "".to_string()) Self::new("default".to_string(), "".to_string())
} }
} }
@@ -335,7 +230,7 @@ pub struct VpnPortalConfig {
pub wireguard_listen: SocketAddr, pub wireguard_listen: SocketAddr,
} }
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Hash)] #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
pub struct PortForwardConfig { pub struct PortForwardConfig {
pub bind_addr: SocketAddr, pub bind_addr: SocketAddr,
pub dst_addr: SocketAddr, pub dst_addr: SocketAddr,
@@ -356,12 +251,12 @@ impl From<PortForwardConfigPb> for PortForwardConfig {
} }
} }
impl From<PortForwardConfig> for PortForwardConfigPb { impl Into<PortForwardConfigPb> for PortForwardConfig {
fn from(val: PortForwardConfig) -> Self { fn into(self) -> PortForwardConfigPb {
PortForwardConfigPb { PortForwardConfigPb {
bind_addr: Some(val.bind_addr.into()), bind_addr: Some(self.bind_addr.into()),
dst_addr: Some(val.dst_addr.into()), dst_addr: Some(self.dst_addr.into()),
socket_type: match val.proto.to_lowercase().as_str() { socket_type: match self.proto.to_lowercase().as_str() {
"tcp" => SocketType::Tcp as i32, "tcp" => SocketType::Tcp as i32,
"udp" => SocketType::Udp as i32, "udp" => SocketType::Udp as i32,
_ => SocketType::Tcp as i32, _ => SocketType::Tcp as i32,
@@ -382,7 +277,7 @@ struct Config {
network_identity: Option<NetworkIdentity>, network_identity: Option<NetworkIdentity>,
listeners: Option<Vec<url::Url>>, listeners: Option<Vec<url::Url>>,
mapped_listeners: Option<Vec<url::Url>>, mapped_listeners: Option<Vec<url::Url>>,
exit_nodes: Option<Vec<IpAddr>>, exit_nodes: Option<Vec<Ipv4Addr>>,
peer: Option<Vec<PeerConfig>>, peer: Option<Vec<PeerConfig>>,
proxy_network: Option<Vec<ProxyNetworkConfig>>, proxy_network: Option<Vec<ProxyNetworkConfig>>,
@@ -404,9 +299,6 @@ struct Config {
flags_struct: Option<Flags>, flags_struct: Option<Flags>,
acl: Option<Acl>, acl: Option<Acl>,
tcp_whitelist: Option<Vec<String>>,
udp_whitelist: Option<Vec<String>>,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@@ -521,7 +413,8 @@ impl ConfigLoader for TomlConfigLoader {
locked_config locked_config
.ipv4 .ipv4
.as_ref() .as_ref()
.and_then(|s| s.parse().ok()) .map(|s| s.parse().ok())
.flatten()
.map(|c: cidr::Ipv4Inet| { .map(|c: cidr::Ipv4Inet| {
if c.network_length() == 32 { if c.network_length() == 32 {
cidr::Ipv4Inet::new(c.address(), 24).unwrap() cidr::Ipv4Inet::new(c.address(), 24).unwrap()
@@ -532,16 +425,28 @@ impl ConfigLoader for TomlConfigLoader {
} }
fn set_ipv4(&self, addr: Option<cidr::Ipv4Inet>) { fn set_ipv4(&self, addr: Option<cidr::Ipv4Inet>) {
self.config.lock().unwrap().ipv4 = addr.map(|addr| addr.to_string()); self.config.lock().unwrap().ipv4 = if let Some(addr) = addr {
Some(addr.to_string())
} else {
None
};
} }
fn get_ipv6(&self) -> Option<cidr::Ipv6Inet> { fn get_ipv6(&self) -> Option<cidr::Ipv6Inet> {
let locked_config = self.config.lock().unwrap(); let locked_config = self.config.lock().unwrap();
locked_config.ipv6.as_ref().and_then(|s| s.parse().ok()) locked_config
.ipv6
.as_ref()
.map(|s| s.parse().ok())
.flatten()
} }
fn set_ipv6(&self, addr: Option<cidr::Ipv6Inet>) { fn set_ipv6(&self, addr: Option<cidr::Ipv6Inet>) {
self.config.lock().unwrap().ipv6 = addr.map(|addr| addr.to_string()); self.config.lock().unwrap().ipv6 = if let Some(addr) = addr {
Some(addr.to_string())
} else {
None
};
} }
fn get_dhcp(&self) -> bool { fn get_dhcp(&self) -> bool {
@@ -615,7 +520,7 @@ impl ConfigLoader for TomlConfigLoader {
locked_config.instance_id = Some(id); locked_config.instance_id = Some(id);
id id
} else { } else {
*locked_config.instance_id.as_ref().unwrap() locked_config.instance_id.as_ref().unwrap().clone()
} }
} }
@@ -629,7 +534,7 @@ impl ConfigLoader for TomlConfigLoader {
.unwrap() .unwrap()
.network_identity .network_identity
.clone() .clone()
.unwrap_or_default() .unwrap_or_else(NetworkIdentity::default)
} }
fn set_network_identity(&self, identity: NetworkIdentity) { fn set_network_identity(&self, identity: NetworkIdentity) {
@@ -710,7 +615,7 @@ impl ConfigLoader for TomlConfigLoader {
self.config.lock().unwrap().flags_struct = Some(flags); self.config.lock().unwrap().flags_struct = Some(flags);
} }
fn get_exit_nodes(&self) -> Vec<IpAddr> { fn get_exit_nodes(&self) -> Vec<Ipv4Addr> {
self.config self.config
.lock() .lock()
.unwrap() .unwrap()
@@ -719,7 +624,7 @@ impl ConfigLoader for TomlConfigLoader {
.unwrap_or_default() .unwrap_or_default()
} }
fn set_exit_nodes(&self, nodes: Vec<IpAddr>) { fn set_exit_nodes(&self, nodes: Vec<Ipv4Addr>) {
self.config.lock().unwrap().exit_nodes = Some(nodes); self.config.lock().unwrap().exit_nodes = Some(nodes);
} }
@@ -760,32 +665,6 @@ impl ConfigLoader for TomlConfigLoader {
self.config.lock().unwrap().acl = acl; self.config.lock().unwrap().acl = acl;
} }
fn get_tcp_whitelist(&self) -> Vec<String> {
self.config
.lock()
.unwrap()
.tcp_whitelist
.clone()
.unwrap_or_default()
}
fn set_tcp_whitelist(&self, whitelist: Vec<String>) {
self.config.lock().unwrap().tcp_whitelist = Some(whitelist);
}
fn get_udp_whitelist(&self) -> Vec<String> {
self.config
.lock()
.unwrap()
.udp_whitelist
.clone()
.unwrap_or_default()
}
fn set_udp_whitelist(&self, whitelist: Vec<String>) {
self.config.lock().unwrap().udp_whitelist = Some(whitelist);
}
fn dump(&self) -> String { fn dump(&self) -> String {
let default_flags_json = serde_json::to_string(&gen_default_flags()).unwrap(); let default_flags_json = serde_json::to_string(&gen_default_flags()).unwrap();
let default_flags_hashmap = let default_flags_hashmap =

View File

@@ -8,14 +8,14 @@ macro_rules! define_global_var {
#[macro_export] #[macro_export]
macro_rules! use_global_var { macro_rules! use_global_var {
($name:ident) => { ($name:ident) => {
$crate::common::constants::$name.lock().unwrap().to_owned() crate::common::constants::$name.lock().unwrap().to_owned()
}; };
} }
#[macro_export] #[macro_export]
macro_rules! set_global_var { macro_rules! set_global_var {
($name:ident, $val:expr) => { ($name:ident, $val:expr) => {
*$crate::common::constants::$name.lock().unwrap() = $val *crate::common::constants::$name.lock().unwrap() = $val
}; };
} }

View File

@@ -12,9 +12,7 @@ impl<F: FnOnce()> Defer<F> {
impl<F: FnOnce()> Drop for Defer<F> { impl<F: FnOnce()> Drop for Defer<F> {
fn drop(&mut self) { fn drop(&mut self) {
if let Some(f) = self.func.take() { self.func.take().map(|f| f());
f()
}
} }
} }

View File

@@ -48,15 +48,19 @@ pub static RESOLVER: Lazy<Arc<Resolver<GenericConnector<TokioRuntimeProvider>>>>
pub async fn resolve_txt_record(domain_name: &str) -> Result<String, Error> { pub async fn resolve_txt_record(domain_name: &str) -> Result<String, Error> {
let r = RESOLVER.clone(); let r = RESOLVER.clone();
let response = r let response = r.txt_lookup(domain_name).await.with_context(|| {
.txt_lookup(domain_name) format!(
.await "txt_lookup failed, domain_name: {}",
.with_context(|| format!("txt_lookup failed, domain_name: {}", domain_name))?; domain_name.to_string()
)
})?;
let txt_record = response let txt_record = response.iter().next().with_context(|| {
.iter() format!(
.next() "no txt record found, domain_name: {}",
.with_context(|| format!("no txt record found, domain_name: {}", domain_name))?; domain_name.to_string()
)
})?;
let txt_data = String::from_utf8_lossy(&txt_record.txt_data()[0]); let txt_data = String::from_utf8_lossy(&txt_record.txt_data()[0]);
tracing::info!(?txt_data, ?domain_name, "get txt record"); tracing::info!(?txt_data, ?domain_name, "get txt record");

View File

@@ -5,7 +5,6 @@ use std::{
}; };
use crate::common::config::ProxyNetworkConfig; use crate::common::config::ProxyNetworkConfig;
use crate::common::stats_manager::StatsManager;
use crate::common::token_bucket::TokenBucketManager; use crate::common::token_bucket::TokenBucketManager;
use crate::peers::acl_filter::AclFilter; use crate::peers::acl_filter::AclFilter;
use crate::proto::cli::PeerConnInfo; use crate::proto::cli::PeerConnInfo;
@@ -84,8 +83,6 @@ pub struct GlobalCtx {
token_bucket_manager: TokenBucketManager, token_bucket_manager: TokenBucketManager,
stats_manager: Arc<StatsManager>,
acl_filter: Arc<AclFilter>, acl_filter: Arc<AclFilter>,
} }
@@ -104,7 +101,7 @@ impl std::fmt::Debug for GlobalCtx {
pub type ArcGlobalCtx = std::sync::Arc<GlobalCtx>; pub type ArcGlobalCtx = std::sync::Arc<GlobalCtx>;
impl GlobalCtx { impl GlobalCtx {
pub fn new(config_fs: impl ConfigLoader + 'static) -> Self { pub fn new(config_fs: impl ConfigLoader + 'static + Send + Sync) -> Self {
let id = config_fs.get_id(); let id = config_fs.get_id();
let network = config_fs.get_network_identity(); let network = config_fs.get_network_identity();
let net_ns = NetNS::new(config_fs.get_netns()); let net_ns = NetNS::new(config_fs.get_netns());
@@ -118,11 +115,9 @@ impl GlobalCtx {
let proxy_forward_by_system = config_fs.get_flags().proxy_forward_by_system; let proxy_forward_by_system = config_fs.get_flags().proxy_forward_by_system;
let no_tun = config_fs.get_flags().no_tun; let no_tun = config_fs.get_flags().no_tun;
let feature_flags = PeerFeatureFlag { let mut feature_flags = PeerFeatureFlag::default();
kcp_input: !config_fs.get_flags().disable_kcp_input, feature_flags.kcp_input = !config_fs.get_flags().disable_kcp_input;
no_relay_kcp: config_fs.get_flags().disable_relay_kcp, feature_flags.no_relay_kcp = config_fs.get_flags().disable_relay_kcp;
..Default::default()
};
GlobalCtx { GlobalCtx {
inst_name: config_fs.get_inst_name(), inst_name: config_fs.get_inst_name(),
@@ -156,8 +151,6 @@ impl GlobalCtx {
token_bucket_manager: TokenBucketManager::new(), token_bucket_manager: TokenBucketManager::new(),
stats_manager: Arc::new(StatsManager::new()),
acl_filter: Arc::new(AclFilter::new()), acl_filter: Arc::new(AclFilter::new()),
} }
} }
@@ -187,7 +180,7 @@ impl GlobalCtx {
{ {
Ok(()) Ok(())
} else { } else {
Err(anyhow::anyhow!("network {} not in whitelist", network_name)) Err(anyhow::anyhow!("network {} not in whitelist", network_name).into())
} }
} }
@@ -196,8 +189,8 @@ impl GlobalCtx {
return Some(ret); return Some(ret);
} }
let addr = self.config.get_ipv4(); let addr = self.config.get_ipv4();
self.cached_ipv4.store(addr); self.cached_ipv4.store(addr.clone());
addr return addr;
} }
pub fn set_ipv4(&self, addr: Option<cidr::Ipv4Inet>) { pub fn set_ipv4(&self, addr: Option<cidr::Ipv4Inet>) {
@@ -210,8 +203,8 @@ impl GlobalCtx {
return Some(ret); return Some(ret);
} }
let addr = self.config.get_ipv6(); let addr = self.config.get_ipv6();
self.cached_ipv6.store(addr); self.cached_ipv6.store(addr.clone());
addr return addr;
} }
pub fn set_ipv6(&self, addr: Option<cidr::Ipv6Inet>) { pub fn set_ipv6(&self, addr: Option<cidr::Ipv6Inet>) {
@@ -298,29 +291,6 @@ impl GlobalCtx {
key key
} }
pub fn get_256_key(&self) -> [u8; 32] {
let mut key = [0u8; 32];
let secret = self
.config
.get_network_identity()
.network_secret
.unwrap_or_default();
// fill key according to network secret
let mut hasher = DefaultHasher::new();
hasher.write(secret.as_bytes());
hasher.write(b"easytier-256bit-key"); // 添加固定盐值以区分128位和256位密钥
// 生成32字节密钥
for i in 0..4 {
let chunk_start = i * 8;
let chunk_end = chunk_start + 8;
hasher.write(&key[0..chunk_start]);
hasher.write(&[i as u8]); // 添加索引以确保每个8字节块都不同
key[chunk_start..chunk_end].copy_from_slice(&hasher.finish().to_be_bytes());
}
key
}
pub fn enable_exit_node(&self) -> bool { pub fn enable_exit_node(&self) -> bool {
self.enable_exit_node self.enable_exit_node
} }
@@ -353,10 +323,6 @@ impl GlobalCtx {
&self.token_bucket_manager &self.token_bucket_manager
} }
pub fn stats_manager(&self) -> &Arc<StatsManager> {
&self.stats_manager
}
pub fn get_acl_filter(&self) -> &Arc<AclFilter> { pub fn get_acl_filter(&self) -> &Arc<AclFilter> {
&self.acl_filter &self.acl_filter
} }
@@ -378,18 +344,18 @@ pub mod tests {
let mut subscriber = global_ctx.subscribe(); let mut subscriber = global_ctx.subscribe();
let peer_id = new_peer_id(); let peer_id = new_peer_id();
global_ctx.issue_event(GlobalCtxEvent::PeerAdded(peer_id)); global_ctx.issue_event(GlobalCtxEvent::PeerAdded(peer_id.clone()));
global_ctx.issue_event(GlobalCtxEvent::PeerRemoved(peer_id)); global_ctx.issue_event(GlobalCtxEvent::PeerRemoved(peer_id.clone()));
global_ctx.issue_event(GlobalCtxEvent::PeerConnAdded(PeerConnInfo::default())); global_ctx.issue_event(GlobalCtxEvent::PeerConnAdded(PeerConnInfo::default()));
global_ctx.issue_event(GlobalCtxEvent::PeerConnRemoved(PeerConnInfo::default())); global_ctx.issue_event(GlobalCtxEvent::PeerConnRemoved(PeerConnInfo::default()));
assert_eq!( assert_eq!(
subscriber.recv().await.unwrap(), subscriber.recv().await.unwrap(),
GlobalCtxEvent::PeerAdded(peer_id) GlobalCtxEvent::PeerAdded(peer_id.clone())
); );
assert_eq!( assert_eq!(
subscriber.recv().await.unwrap(), subscriber.recv().await.unwrap(),
GlobalCtxEvent::PeerRemoved(peer_id) GlobalCtxEvent::PeerRemoved(peer_id.clone())
); );
assert_eq!( assert_eq!(
subscriber.recv().await.unwrap(), subscriber.recv().await.unwrap(),
@@ -406,7 +372,7 @@ pub mod tests {
) -> ArcGlobalCtx { ) -> ArcGlobalCtx {
let config_fs = TomlConfigLoader::default(); let config_fs = TomlConfigLoader::default();
config_fs.set_inst_name(format!("test_{}", config_fs.get_id())); config_fs.set_inst_name(format!("test_{}", config_fs.get_id()));
config_fs.set_network_identity(network_identy.unwrap_or_default()); config_fs.set_network_identity(network_identy.unwrap_or(NetworkIdentity::default()));
let ctx = Arc::new(GlobalCtx::new(config_fs)); let ctx = Arc::new(GlobalCtx::new(config_fs));
ctx.replace_stun_info_collector(Box::new(MockStunInfoCollector { ctx.replace_stun_info_collector(Box::new(MockStunInfoCollector {

View File

@@ -1,6 +1,6 @@
#[cfg(any(target_os = "macos", target_os = "freebsd"))] #[cfg(any(target_os = "macos", target_os = "freebsd"))]
mod darwin; mod darwin;
#[cfg(target_os = "linux")] #[cfg(any(target_os = "linux"))]
mod netlink; mod netlink;
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
mod win; mod win;
@@ -141,7 +141,7 @@ pub struct DummyIfConfiger {}
#[async_trait] #[async_trait]
impl IfConfiguerTrait for DummyIfConfiger {} impl IfConfiguerTrait for DummyIfConfiger {}
#[cfg(target_os = "linux")] #[cfg(any(target_os = "linux"))]
pub type IfConfiger = netlink::NetlinkIfConfiger; pub type IfConfiger = netlink::NetlinkIfConfiger;
#[cfg(any(target_os = "macos", target_os = "freebsd"))] #[cfg(any(target_os = "macos", target_os = "freebsd"))]

View File

@@ -85,14 +85,14 @@ fn send_netlink_req_and_wait_one_resp<T: NetlinkDeserializable + NetlinkSerializ
match ret.payload { match ret.payload {
NetlinkPayload::Error(e) => { NetlinkPayload::Error(e) => {
if e.code == NonZero::new(0) { if e.code == NonZero::new(0) {
Ok(()) return Ok(());
} else { } else {
Err(e.to_io().into()) return Err(e.to_io().into());
} }
} }
p => { p => {
tracing::error!("Unexpected netlink response: {:?}", p); tracing::error!("Unexpected netlink response: {:?}", p);
Err(anyhow::anyhow!("Unexpected netlink response").into()) return Err(anyhow::anyhow!("Unexpected netlink response").into());
} }
} }
} }
@@ -263,8 +263,8 @@ impl NetlinkIfConfiger {
let (address, netmask) = match (address.family(), netmask.family()) { let (address, netmask) = match (address.family(), netmask.family()) {
(Some(Inet), Some(Inet)) => ( (Some(Inet), Some(Inet)) => (
IpAddr::V4(address.as_sockaddr_in().unwrap().ip()), IpAddr::V4(address.as_sockaddr_in().unwrap().ip().into()),
IpAddr::V4(netmask.as_sockaddr_in().unwrap().ip()), IpAddr::V4(netmask.as_sockaddr_in().unwrap().ip().into()),
), ),
(Some(Inet6), Some(Inet6)) => ( (Some(Inet6), Some(Inet6)) => (
IpAddr::V6(address.as_sockaddr_in6().unwrap().ip()), IpAddr::V6(address.as_sockaddr_in6().unwrap().ip()),
@@ -333,7 +333,7 @@ impl NetlinkIfConfiger {
let mut resp = Vec::<u8>::new(); let mut resp = Vec::<u8>::new();
loop { loop {
if resp.is_empty() { if resp.len() == 0 {
let (new_resp, _) = s.recv_from_full()?; let (new_resp, _) = s.recv_from_full()?;
resp = new_resp; resp = new_resp;
} }

View File

@@ -727,7 +727,7 @@ impl InterfaceLuid {
if family == (AF_INET6 as ADDRESS_FAMILY) { if family == (AF_INET6 as ADDRESS_FAMILY) {
// ipv6 mtu must be at least 1280 // ipv6 mtu must be at least 1280
mtu = 1280.max(mtu); mtu = 1280.max(mtu);
} }
// https://stackoverflow.com/questions/54857292/setipinterfaceentry-returns-error-invalid-parameter // https://stackoverflow.com/questions/54857292/setipinterfaceentry-returns-error-invalid-parameter
row.SitePrefixLength = 0; row.SitePrefixLength = 0;

View File

@@ -1,3 +1,3 @@
pub mod luid;
pub mod netsh; pub mod netsh;
pub mod types; pub mod types;
pub mod luid;

View File

@@ -115,4 +115,4 @@ pub fn add_dns_ipv6(if_index: u32, dnses: &[Ipv6Addr]) -> Result<(), String> {
} }
let dnses_str: Vec<String> = dnses.iter().map(|addr| addr.to_string()).collect(); let dnses_str: Vec<String> = dnses.iter().map(|addr| addr.to_string()).collect();
add_dns(AF_INET6 as _, if_index, &dnses_str) add_dns(AF_INET6 as _, if_index, &dnses_str)
} }

View File

@@ -100,4 +100,4 @@ pub fn u16_ptr_to_string(ptr: *const u16) -> String {
let slice = unsafe { std::slice::from_raw_parts(ptr, len) }; let slice = unsafe { std::slice::from_raw_parts(ptr, len) };
String::from_utf16_lossy(slice) String::from_utf16_lossy(slice)
} }

View File

@@ -22,7 +22,6 @@ pub mod ifcfg;
pub mod netns; pub mod netns;
pub mod network; pub mod network;
pub mod scoped_task; pub mod scoped_task;
pub mod stats_manager;
pub mod stun; pub mod stun;
pub mod stun_codec_ext; pub mod stun_codec_ext;
pub mod token_bucket; pub mod token_bucket;
@@ -140,8 +139,8 @@ pub fn get_machine_id() -> uuid::Uuid {
)))] )))]
let gen_mid = None; let gen_mid = None;
if let Some(mid) = gen_mid { if gen_mid.is_some() {
return mid; return gen_mid.unwrap();
} }
let gen_mid = uuid::Uuid::new_v4(); let gen_mid = uuid::Uuid::new_v4();

View File

@@ -34,12 +34,13 @@ impl NetNSGuard {
return; return;
} }
let ns_path: String;
let name = name.unwrap(); let name = name.unwrap();
let ns_path: String = if name == ROOT_NETNS_NAME { if name == ROOT_NETNS_NAME {
"/proc/1/ns/net".to_string() ns_path = "/proc/1/ns/net".to_string();
} else { } else {
format!("/var/run/netns/{}", name) ns_path = format!("/var/run/netns/{}", name);
}; }
let ns = std::fs::File::open(ns_path).unwrap(); let ns = std::fs::File::open(ns_path).unwrap();
tracing::info!( tracing::info!(

View File

@@ -211,7 +211,7 @@ impl IPCollector {
cached_ip_list.read().await.public_ipv6 cached_ip_list.read().await.public_ipv6
); );
let sleep_sec = if cached_ip_list.read().await.public_ipv4.is_some() { let sleep_sec = if !cached_ip_list.read().await.public_ipv4.is_none() {
CACHED_IP_LIST_TIMEOUT_SEC CACHED_IP_LIST_TIMEOUT_SEC
} else { } else {
3 3
@@ -252,11 +252,14 @@ impl IPCollector {
for iface in ifaces { for iface in ifaces {
for ip in iface.ips { for ip in iface.ips {
let ip: std::net::IpAddr = ip.ip(); let ip: std::net::IpAddr = ip.ip();
if let std::net::IpAddr::V4(v4) = ip { match ip {
if ip.is_loopback() || ip.is_multicast() { std::net::IpAddr::V4(v4) => {
continue; if ip.is_loopback() || ip.is_multicast() {
continue;
}
ret.interface_ipv4s.push(v4.into());
} }
ret.interface_ipv4s.push(v4.into()); _ => {}
} }
} }
} }
@@ -266,11 +269,14 @@ impl IPCollector {
for iface in ifaces { for iface in ifaces {
for ip in iface.ips { for ip in iface.ips {
let ip: std::net::IpAddr = ip.ip(); let ip: std::net::IpAddr = ip.ip();
if let std::net::IpAddr::V6(v6) = ip { match ip {
if v6.is_multicast() || v6.is_loopback() || v6.is_unicast_link_local() { std::net::IpAddr::V6(v6) => {
continue; if v6.is_multicast() || v6.is_loopback() || v6.is_unicast_link_local() {
continue;
}
ret.interface_ipv6s.push(v6.into());
} }
ret.interface_ipv6s.push(v6.into()); _ => {}
} }
} }
} }

View File

@@ -1,886 +0,0 @@
use dashmap::DashMap;
use serde::{Deserialize, Serialize};
use std::cell::UnsafeCell;
use std::fmt;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::time::interval;
use crate::common::scoped_task::ScopedTask;
/// Predefined metric names for type safety
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum MetricName {
/// RPC calls sent to peers
PeerRpcClientTx,
/// RPC calls received from peers
PeerRpcClientRx,
/// RPC calls sent to peers
PeerRpcServerTx,
/// RPC calls received from peers
PeerRpcServerRx,
/// RPC call duration in milliseconds
PeerRpcDuration,
/// RPC errors
PeerRpcErrors,
/// Traffic bytes sent
TrafficBytesTx,
/// Traffic bytes received
TrafficBytesRx,
/// Traffic bytes forwarded
TrafficBytesForwarded,
/// Traffic bytes sent to self
TrafficBytesSelfTx,
/// Traffic bytes received from self
TrafficBytesSelfRx,
/// Traffic bytes forwarded for foreign network, rx to local
TrafficBytesForeignForwardRx,
/// Traffic bytes forwarded for foreign network, tx from local
TrafficBytesForeignForwardTx,
/// Traffic bytes forwarded for foreign network, forward
TrafficBytesForeignForwardForwarded,
/// Traffic packets sent
TrafficPacketsTx,
/// Traffic packets received
TrafficPacketsRx,
/// Traffic packets forwarded
TrafficPacketsForwarded,
/// Traffic packets sent to self
TrafficPacketsSelfTx,
/// Traffic packets received from self
TrafficPacketsSelfRx,
/// Traffic packets forwarded for foreign network, rx to local
TrafficPacketsForeignForwardRx,
/// Traffic packets forwarded for foreign network, tx from local
TrafficPacketsForeignForwardTx,
/// Traffic packets forwarded for foreign network, forward
TrafficPacketsForeignForwardForwarded,
/// Compression bytes before compression
CompressionBytesRxBefore,
/// Compression bytes after compression
CompressionBytesRxAfter,
/// Compression bytes before compression
CompressionBytesTxBefore,
/// Compression bytes after compression
CompressionBytesTxAfter,
}
impl fmt::Display for MetricName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MetricName::PeerRpcClientTx => write!(f, "peer_rpc_client_tx"),
MetricName::PeerRpcClientRx => write!(f, "peer_rpc_client_rx"),
MetricName::PeerRpcServerTx => write!(f, "peer_rpc_server_tx"),
MetricName::PeerRpcServerRx => write!(f, "peer_rpc_server_rx"),
MetricName::PeerRpcDuration => write!(f, "peer_rpc_duration_ms"),
MetricName::PeerRpcErrors => write!(f, "peer_rpc_errors"),
MetricName::TrafficBytesTx => write!(f, "traffic_bytes_tx"),
MetricName::TrafficBytesRx => write!(f, "traffic_bytes_rx"),
MetricName::TrafficBytesForwarded => write!(f, "traffic_bytes_forwarded"),
MetricName::TrafficBytesSelfTx => write!(f, "traffic_bytes_self_tx"),
MetricName::TrafficBytesSelfRx => write!(f, "traffic_bytes_self_rx"),
MetricName::TrafficBytesForeignForwardRx => {
write!(f, "traffic_bytes_foreign_forward_rx")
}
MetricName::TrafficBytesForeignForwardTx => {
write!(f, "traffic_bytes_foreign_forward_tx")
}
MetricName::TrafficBytesForeignForwardForwarded => {
write!(f, "traffic_bytes_foreign_forward_forwarded")
}
MetricName::TrafficPacketsTx => write!(f, "traffic_packets_tx"),
MetricName::TrafficPacketsRx => write!(f, "traffic_packets_rx"),
MetricName::TrafficPacketsForwarded => write!(f, "traffic_packets_forwarded"),
MetricName::TrafficPacketsSelfTx => write!(f, "traffic_packets_self_tx"),
MetricName::TrafficPacketsSelfRx => write!(f, "traffic_packets_self_rx"),
MetricName::TrafficPacketsForeignForwardRx => {
write!(f, "traffic_packets_foreign_forward_rx")
}
MetricName::TrafficPacketsForeignForwardTx => {
write!(f, "traffic_packets_foreign_forward_tx")
}
MetricName::TrafficPacketsForeignForwardForwarded => {
write!(f, "traffic_packets_foreign_forward_forwarded")
}
MetricName::CompressionBytesRxBefore => write!(f, "compression_bytes_rx_before"),
MetricName::CompressionBytesRxAfter => write!(f, "compression_bytes_rx_after"),
MetricName::CompressionBytesTxBefore => write!(f, "compression_bytes_tx_before"),
MetricName::CompressionBytesTxAfter => write!(f, "compression_bytes_tx_after"),
}
}
}
/// Predefined label types for type safety
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum LabelType {
/// Network Name
NetworkName(String),
/// Source peer ID
SrcPeerId(u32),
/// Destination peer ID
DstPeerId(u32),
/// Service name
ServiceName(String),
/// Method name
MethodName(String),
/// Protocol type
Protocol(String),
/// Direction (tx/rx)
Direction(String),
/// Compression algorithm
CompressionAlgo(String),
/// Error type
ErrorType(String),
/// Status
Status(String),
}
impl fmt::Display for LabelType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
LabelType::NetworkName(name) => write!(f, "network_name={}", name),
LabelType::SrcPeerId(id) => write!(f, "src_peer_id={}", id),
LabelType::DstPeerId(id) => write!(f, "dst_peer_id={}", id),
LabelType::ServiceName(name) => write!(f, "service_name={}", name),
LabelType::MethodName(name) => write!(f, "method_name={}", name),
LabelType::Protocol(proto) => write!(f, "protocol={}", proto),
LabelType::Direction(dir) => write!(f, "direction={}", dir),
LabelType::CompressionAlgo(algo) => write!(f, "compression_algo={}", algo),
LabelType::ErrorType(err) => write!(f, "error_type={}", err),
LabelType::Status(status) => write!(f, "status={}", status),
}
}
}
impl LabelType {
pub fn key(&self) -> &'static str {
match self {
LabelType::NetworkName(_) => "network_name",
LabelType::SrcPeerId(_) => "src_peer_id",
LabelType::DstPeerId(_) => "dst_peer_id",
LabelType::ServiceName(_) => "service_name",
LabelType::MethodName(_) => "method_name",
LabelType::Protocol(_) => "protocol",
LabelType::Direction(_) => "direction",
LabelType::CompressionAlgo(_) => "compression_algo",
LabelType::ErrorType(_) => "error_type",
LabelType::Status(_) => "status",
}
}
pub fn value(&self) -> String {
match self {
LabelType::NetworkName(name) => name.clone(),
LabelType::SrcPeerId(id) => id.to_string(),
LabelType::DstPeerId(id) => id.to_string(),
LabelType::ServiceName(name) => name.clone(),
LabelType::MethodName(name) => name.clone(),
LabelType::Protocol(proto) => proto.clone(),
LabelType::Direction(dir) => dir.clone(),
LabelType::CompressionAlgo(algo) => algo.clone(),
LabelType::ErrorType(err) => err.clone(),
LabelType::Status(status) => status.clone(),
}
}
}
/// Label represents a key-value pair for metric identification
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct Label {
pub key: String,
pub value: String,
}
impl Label {
pub fn new(key: impl Into<String>, value: impl Into<String>) -> Self {
Self {
key: key.into(),
value: value.into(),
}
}
pub fn from_label_type(label_type: &LabelType) -> Self {
Self {
key: label_type.key().to_string(),
value: label_type.value(),
}
}
}
/// LabelSet represents a collection of labels for a metric
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct LabelSet {
labels: Vec<Label>,
}
impl LabelSet {
pub fn new() -> Self {
Self { labels: Vec::new() }
}
pub fn with_label(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
self.labels.push(Label::new(key, value));
self.labels.sort_by(|a, b| a.key.cmp(&b.key)); // Keep labels sorted for consistent hashing
self
}
/// Add a typed label to the set
pub fn with_label_type(mut self, label_type: LabelType) -> Self {
self.labels.push(Label::from_label_type(&label_type));
self.labels.sort_by(|a, b| a.key.cmp(&b.key)); // Keep labels sorted for consistent hashing
self
}
/// Create a LabelSet from multiple LabelTypes
pub fn from_label_types(label_types: &[LabelType]) -> Self {
let mut labels = Vec::new();
for label_type in label_types {
labels.push(Label::from_label_type(label_type));
}
labels.sort_by(|a, b| a.key.cmp(&b.key)); // Keep labels sorted for consistent hashing
Self { labels }
}
pub fn labels(&self) -> &[Label] {
&self.labels
}
/// Generate a string key for this label set
pub fn to_key(&self) -> String {
if self.labels.is_empty() {
return String::new();
}
let mut parts = Vec::with_capacity(self.labels.len());
for label in &self.labels {
parts.push(format!("{}={}", label.key, label.value));
}
parts.join(",")
}
}
impl Default for LabelSet {
fn default() -> Self {
Self::new()
}
}
/// UnsafeCounter provides a high-performance counter using UnsafeCell
#[derive(Debug)]
pub struct UnsafeCounter {
value: UnsafeCell<u64>,
}
impl Default for UnsafeCounter {
fn default() -> Self {
Self::new()
}
}
impl UnsafeCounter {
pub fn new() -> Self {
Self {
value: UnsafeCell::new(0),
}
}
pub fn new_with_value(initial: u64) -> Self {
Self {
value: UnsafeCell::new(initial),
}
}
/// Increment the counter by the given amount
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is accessing this counter simultaneously.
pub unsafe fn add(&self, delta: u64) {
let ptr = self.value.get();
*ptr = (*ptr).saturating_add(delta);
}
/// Increment the counter by 1
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is accessing this counter simultaneously.
pub unsafe fn inc(&self) {
self.add(1);
}
/// Get the current value of the counter
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is modifying this counter simultaneously.
pub unsafe fn get(&self) -> u64 {
let ptr = self.value.get();
*ptr
}
/// Reset the counter to zero
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is accessing this counter simultaneously.
pub unsafe fn reset(&self) {
let ptr = self.value.get();
*ptr = 0;
}
/// Set the counter to a specific value
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is accessing this counter simultaneously.
pub unsafe fn set(&self, value: u64) {
let ptr = self.value.get();
*ptr = value;
}
}
// UnsafeCounter is Send + Sync because the safety is guaranteed by the caller
unsafe impl Send for UnsafeCounter {}
unsafe impl Sync for UnsafeCounter {}
/// MetricData contains both the counter and last update timestamp
/// Uses UnsafeCell for lock-free access
#[derive(Debug)]
struct MetricData {
counter: UnsafeCounter,
last_updated: UnsafeCell<Instant>,
}
impl MetricData {
fn new() -> Self {
Self {
counter: UnsafeCounter::new(),
last_updated: UnsafeCell::new(Instant::now()),
}
}
fn new_with_value(initial: u64) -> Self {
Self {
counter: UnsafeCounter::new_with_value(initial),
last_updated: UnsafeCell::new(Instant::now()),
}
}
/// Update the last_updated timestamp
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is accessing this timestamp simultaneously.
unsafe fn touch(&self) {
let ptr = self.last_updated.get();
*ptr = Instant::now();
}
/// Get the last updated timestamp
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is modifying this timestamp simultaneously.
unsafe fn get_last_updated(&self) -> Instant {
let ptr = self.last_updated.get();
*ptr
}
}
// MetricData is Send + Sync because the safety is guaranteed by the caller
unsafe impl Send for MetricData {}
unsafe impl Sync for MetricData {}
/// MetricKey uniquely identifies a metric with its name and labels
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct MetricKey {
name: MetricName,
labels: LabelSet,
}
impl MetricKey {
fn new(name: MetricName, labels: LabelSet) -> Self {
Self { name, labels }
}
}
impl fmt::Display for MetricKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let label_str = self.labels.to_key();
if label_str.is_empty() {
f.write_str(self.name.to_string().as_str())
} else {
f.write_str(format!("{}[{}]", self.name, label_str).as_str())
}
}
}
/// CounterHandle provides a safe interface to a MetricData
/// It ensures thread-local access patterns for performance
#[derive(Clone)]
pub struct CounterHandle {
metric_data: Arc<MetricData>,
_key: MetricKey, // Keep key for debugging purposes
}
impl CounterHandle {
fn new(metric_data: Arc<MetricData>, key: MetricKey) -> Self {
Self {
metric_data,
_key: key,
}
}
/// Increment the counter by the given amount
pub fn add(&self, delta: u64) {
unsafe {
self.metric_data.counter.add(delta);
self.metric_data.touch();
}
}
/// Increment the counter by 1
pub fn inc(&self) {
unsafe {
self.metric_data.counter.inc();
self.metric_data.touch();
}
}
/// Get the current value of the counter
pub fn get(&self) -> u64 {
unsafe { self.metric_data.counter.get() }
}
/// Reset the counter to zero
pub fn reset(&self) {
unsafe {
self.metric_data.counter.reset();
self.metric_data.touch();
}
}
/// Set the counter to a specific value
pub fn set(&self, value: u64) {
unsafe {
self.metric_data.counter.set(value);
self.metric_data.touch();
}
}
}
/// MetricSnapshot represents a point-in-time view of a metric
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MetricSnapshot {
pub name: MetricName,
pub labels: LabelSet,
pub value: u64,
}
impl MetricSnapshot {
pub fn name_str(&self) -> String {
self.name.to_string()
}
}
/// StatsManager manages global statistics with high performance counters
pub struct StatsManager {
counters: Arc<DashMap<MetricKey, Arc<MetricData>>>,
cleanup_task: ScopedTask<()>,
}
impl StatsManager {
/// Create a new StatsManager
pub fn new() -> Self {
let counters = Arc::new(DashMap::new());
// Start cleanup task only if we're in a tokio runtime
let counters_clone = Arc::downgrade(&counters.clone());
let cleanup_task = tokio::spawn(async move {
let mut interval = interval(Duration::from_secs(60)); // Check every minute
loop {
interval.tick().await;
let cutoff_time = Instant::now() - Duration::from_secs(180); // 3 minutes
let Some(counters) = counters_clone.upgrade() else {
break;
};
// Remove entries that haven't been updated for 3 minutes
counters.retain(|_, metric_data: &mut Arc<MetricData>| unsafe {
metric_data.get_last_updated() > cutoff_time
});
}
});
Self {
counters,
cleanup_task: cleanup_task.into(),
}
}
/// Get or create a counter with the given name and labels
pub fn get_counter(&self, name: MetricName, labels: LabelSet) -> CounterHandle {
let key = MetricKey::new(name, labels);
let metric_data = self
.counters
.entry(key.clone())
.or_insert_with(|| Arc::new(MetricData::new()))
.clone();
CounterHandle::new(metric_data, key)
}
/// Get a counter with no labels
pub fn get_simple_counter(&self, name: MetricName) -> CounterHandle {
self.get_counter(name, LabelSet::new())
}
/// Get all metric snapshots
pub fn get_all_metrics(&self) -> Vec<MetricSnapshot> {
let mut metrics = Vec::new();
for entry in self.counters.iter() {
let key = entry.key();
let metric_data = entry.value();
let value = unsafe { metric_data.counter.get() };
metrics.push(MetricSnapshot {
name: key.name,
labels: key.labels.clone(),
value,
});
}
// Sort by metric name and then by labels for consistent output
metrics.sort_by(|a, b| {
a.name
.to_string()
.cmp(&b.name.to_string())
.then_with(|| a.labels.to_key().cmp(&b.labels.to_key()))
});
metrics
}
/// Get metrics filtered by name prefix
pub fn get_metrics_by_prefix(&self, prefix: &str) -> Vec<MetricSnapshot> {
self.get_all_metrics()
.into_iter()
.filter(|m| m.name.to_string().starts_with(prefix))
.collect()
}
/// Get a specific metric by name and labels
pub fn get_metric(&self, name: MetricName, labels: &LabelSet) -> Option<MetricSnapshot> {
let key = MetricKey::new(name, labels.clone());
if let Some(metric_data) = self.counters.get(&key) {
let value = unsafe { metric_data.counter.get() };
Some(MetricSnapshot {
name,
labels: labels.clone(),
value,
})
} else {
None
}
}
/// Clear all metrics
pub fn clear(&self) {
self.counters.clear();
}
/// Get the number of tracked metrics
pub fn metric_count(&self) -> usize {
self.counters.len()
}
/// Export metrics in Prometheus format
pub fn export_prometheus(&self) -> String {
let metrics = self.get_all_metrics();
let mut output = String::new();
let mut current_metric = String::new();
for metric in metrics {
let metric_name_str = metric.name.to_string();
if metric_name_str != current_metric {
if !current_metric.is_empty() {
output.push('\n');
}
output.push_str(&format!("# TYPE {} counter\n", metric_name_str));
current_metric = metric_name_str.clone();
}
if metric.labels.labels().is_empty() {
output.push_str(&format!("{} {}\n", metric_name_str, metric.value));
} else {
let label_str = metric
.labels
.labels()
.iter()
.map(|l| format!("{}=\"{}\"", l.key, l.value))
.collect::<Vec<_>>()
.join(",");
output.push_str(&format!(
"{}{{{}}} {}\n",
metric_name_str, label_str, metric.value
));
}
}
output
}
}
impl Default for StatsManager {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common::stats_manager::{LabelSet, LabelType, MetricName, StatsManager};
use crate::proto::cli::{
GetPrometheusStatsRequest, GetPrometheusStatsResponse, GetStatsRequest, GetStatsResponse,
};
use std::collections::BTreeMap;
#[tokio::test]
async fn test_label_set() {
let labels = LabelSet::new()
.with_label("peer_id", "peer1")
.with_label("method", "ping");
assert_eq!(labels.to_key(), "method=ping,peer_id=peer1");
}
#[tokio::test]
async fn test_unsafe_counter() {
let counter = UnsafeCounter::new();
unsafe {
assert_eq!(counter.get(), 0);
counter.inc();
assert_eq!(counter.get(), 1);
counter.add(5);
assert_eq!(counter.get(), 6);
counter.set(10);
assert_eq!(counter.get(), 10);
counter.reset();
assert_eq!(counter.get(), 0);
}
}
#[tokio::test]
async fn test_stats_manager() {
let stats = StatsManager::new();
// Test simple counter
let counter1 = stats.get_simple_counter(MetricName::PeerRpcClientTx);
counter1.inc();
counter1.add(5);
// Test counter with labels
let labels = LabelSet::new()
.with_label("peer_id", "peer1")
.with_label("method", "ping");
let counter2 = stats.get_counter(MetricName::PeerRpcClientTx, labels.clone());
counter2.add(3);
// Check metrics
let metrics = stats.get_all_metrics();
assert_eq!(metrics.len(), 2);
// Find the simple counter
let simple_metric = metrics
.iter()
.find(|m| m.labels.labels().is_empty())
.unwrap();
assert_eq!(simple_metric.name, MetricName::PeerRpcClientTx);
assert_eq!(simple_metric.value, 6);
// Find the labeled counter
let labeled_metric = metrics
.iter()
.find(|m| !m.labels.labels().is_empty())
.unwrap();
assert_eq!(labeled_metric.name, MetricName::PeerRpcClientTx);
assert_eq!(labeled_metric.value, 3);
assert_eq!(labeled_metric.labels, labels);
}
#[tokio::test]
async fn test_prometheus_export() {
let stats = StatsManager::new();
let counter1 = stats.get_simple_counter(MetricName::TrafficBytesTx);
counter1.set(100);
let labels = LabelSet::new().with_label("status", "success");
let counter2 = stats.get_counter(MetricName::PeerRpcClientTx, labels);
counter2.set(50);
let prometheus_output = stats.export_prometheus();
assert!(prometheus_output.contains("# TYPE peer_rpc_client_tx counter"));
assert!(prometheus_output.contains("peer_rpc_client_tx{status=\"success\"} 50"));
assert!(prometheus_output.contains("# TYPE traffic_bytes_tx counter"));
assert!(prometheus_output.contains("traffic_bytes_tx 100"));
}
#[tokio::test]
async fn test_get_metric() {
let stats = StatsManager::new();
let labels = LabelSet::new().with_label("peer", "test");
let counter = stats.get_counter(MetricName::PeerRpcClientTx, labels.clone());
counter.set(42);
let metric = stats
.get_metric(MetricName::PeerRpcClientTx, &labels)
.unwrap();
assert_eq!(metric.value, 42);
let non_existent = stats.get_metric(MetricName::PeerRpcErrors, &LabelSet::new());
assert!(non_existent.is_none());
}
#[tokio::test]
async fn test_metrics_by_prefix() {
let stats = StatsManager::new();
stats
.get_simple_counter(MetricName::PeerRpcClientTx)
.set(10);
stats.get_simple_counter(MetricName::PeerRpcErrors).set(2);
stats
.get_simple_counter(MetricName::TrafficBytesTx)
.set(100);
let rpc_metrics = stats.get_metrics_by_prefix("peer_rpc");
assert_eq!(rpc_metrics.len(), 2);
let traffic_metrics = stats.get_metrics_by_prefix("traffic_");
assert_eq!(traffic_metrics.len(), 1);
}
#[tokio::test]
async fn test_cleanup_mechanism() {
let stats = StatsManager::new();
// 创建一些计数器
let counter1 = stats.get_simple_counter(MetricName::PeerRpcClientTx);
counter1.set(10);
let labels = LabelSet::new().with_label("test", "value");
let counter2 = stats.get_counter(MetricName::TrafficBytesTx, labels);
counter2.set(20);
// 验证计数器存在
assert_eq!(stats.metric_count(), 2);
// 注意实际的清理测试需要等待3分钟这在单元测试中不现实
// 这里我们只验证清理机制的基本结构是否正确
// 清理逻辑在后台线程中运行会自动删除超过3分钟未更新的条目
// 验证计数器仍然可以正常工作
counter1.inc();
assert_eq!(counter1.get(), 11);
counter2.add(5);
assert_eq!(counter2.get(), 25);
}
#[tokio::test]
async fn test_stats_rpc_data_structures() {
// Test GetStatsRequest
let request = GetStatsRequest {};
assert_eq!(request, GetStatsRequest {});
// Test GetStatsResponse
let response = GetStatsResponse { metrics: vec![] };
assert!(response.metrics.is_empty());
// Test GetPrometheusStatsRequest
let prometheus_request = GetPrometheusStatsRequest {};
assert_eq!(prometheus_request, GetPrometheusStatsRequest {});
// Test GetPrometheusStatsResponse
let prometheus_response = GetPrometheusStatsResponse {
prometheus_text: "# Test metrics\n".to_string(),
};
assert_eq!(prometheus_response.prometheus_text, "# Test metrics\n");
}
#[tokio::test]
async fn test_metric_snapshot_creation() {
let stats_manager = StatsManager::new();
// Create some test metrics
let counter1 = stats_manager.get_counter(
MetricName::PeerRpcClientTx,
LabelSet::new()
.with_label_type(LabelType::SrcPeerId(123))
.with_label_type(LabelType::ServiceName("test_service".to_string())),
);
counter1.add(100);
let counter2 = stats_manager.get_counter(
MetricName::TrafficBytesTx,
LabelSet::new().with_label_type(LabelType::Protocol("tcp".to_string())),
);
counter2.add(1024);
// Get all metrics
let metrics = stats_manager.get_all_metrics();
assert_eq!(metrics.len(), 2);
// Verify the metrics can be converted to the format expected by RPC
for metric in metrics {
let mut labels = BTreeMap::new();
for label in metric.labels.labels() {
labels.insert(label.key.clone(), label.value.clone());
}
// This simulates what the RPC service would do
let _metric_snapshot = crate::proto::cli::MetricSnapshot {
name: metric.name.to_string(),
value: metric.value,
labels,
};
}
}
#[tokio::test]
async fn test_prometheus_export_format() {
let stats_manager = StatsManager::new();
// Create test metrics
let counter = stats_manager.get_counter(
MetricName::PeerRpcClientTx,
LabelSet::new()
.with_label_type(LabelType::SrcPeerId(123))
.with_label_type(LabelType::ServiceName("test".to_string())),
);
counter.add(42);
// Export to Prometheus format
let prometheus_text = stats_manager.export_prometheus();
println!("{}", prometheus_text);
// Verify the format
assert!(prometheus_text.contains("peer_rpc_client_tx"));
assert!(prometheus_text.contains("42"));
assert!(prometheus_text.contains("src_peer_id=\"123\""));
assert!(prometheus_text.contains("service_name=\"test\""));
}
}

View File

@@ -282,7 +282,9 @@ impl StunClient {
.with_context(|| "encode stun message")?; .with_context(|| "encode stun message")?;
tids.push(tid); tids.push(tid);
tracing::trace!(?message, ?msg, tid, "send stun request"); tracing::trace!(?message, ?msg, tid, "send stun request");
self.socket.send_to(msg.as_slice(), &stun_host).await?; self.socket
.send_to(msg.as_slice().into(), &stun_host)
.await?;
} }
let now = Instant::now(); let now = Instant::now();
@@ -370,7 +372,7 @@ impl StunClientBuilder {
pub async fn stop(&mut self) { pub async fn stop(&mut self) {
self.task_set.abort_all(); self.task_set.abort_all();
while self.task_set.join_next().await.is_some() {} while let Some(_) = self.task_set.join_next().await {}
} }
} }
@@ -415,7 +417,7 @@ impl UdpNatTypeDetectResult {
return true; return true;
} }
} }
false return false;
} }
fn is_pat(&self) -> bool { fn is_pat(&self) -> bool {
@@ -455,16 +457,16 @@ impl UdpNatTypeDetectResult {
if self.is_cone() { if self.is_cone() {
if self.has_ip_changed_resp() { if self.has_ip_changed_resp() {
if self.is_open_internet() { if self.is_open_internet() {
NatType::OpenInternet return NatType::OpenInternet;
} else if self.is_pat() { } else if self.is_pat() {
NatType::NoPat return NatType::NoPat;
} else { } else {
NatType::FullCone return NatType::FullCone;
} }
} else if self.has_port_changed_resp() { } else if self.has_port_changed_resp() {
NatType::Restricted return NatType::Restricted;
} else { } else {
NatType::PortRestricted return NatType::PortRestricted;
} }
} else if !self.stun_resps.is_empty() { } else if !self.stun_resps.is_empty() {
if self.public_ips().len() != 1 if self.public_ips().len() != 1
@@ -478,7 +480,7 @@ impl UdpNatTypeDetectResult {
.mapped_socket_addr .mapped_socket_addr
.is_none() .is_none()
{ {
NatType::Symmetric return NatType::Symmetric;
} else { } else {
let extra_bind_test = self.extra_bind_test.as_ref().unwrap(); let extra_bind_test = self.extra_bind_test.as_ref().unwrap();
let extra_port = extra_bind_test.mapped_socket_addr.unwrap().port(); let extra_port = extra_bind_test.mapped_socket_addr.unwrap().port();
@@ -486,15 +488,15 @@ impl UdpNatTypeDetectResult {
let max_port_diff = extra_port.saturating_sub(self.max_port()); let max_port_diff = extra_port.saturating_sub(self.max_port());
let min_port_diff = self.min_port().saturating_sub(extra_port); let min_port_diff = self.min_port().saturating_sub(extra_port);
if max_port_diff != 0 && max_port_diff < 100 { if max_port_diff != 0 && max_port_diff < 100 {
NatType::SymmetricEasyInc return NatType::SymmetricEasyInc;
} else if min_port_diff != 0 && min_port_diff < 100 { } else if min_port_diff != 0 && min_port_diff < 100 {
NatType::SymmetricEasyDec return NatType::SymmetricEasyDec;
} else { } else {
NatType::Symmetric return NatType::Symmetric;
} }
} }
} else { } else {
NatType::Unknown return NatType::Unknown;
} }
} }
@@ -677,7 +679,7 @@ impl StunInfoCollectorTrait for StunInfoCollector {
.unwrap() .unwrap()
.clone() .clone()
.map(|x| x.collect_available_stun_server()) .map(|x| x.collect_available_stun_server())
.unwrap_or_default(); .unwrap_or(vec![]);
if stun_servers.is_empty() { if stun_servers.is_empty() {
let mut host_resolver = let mut host_resolver =
@@ -738,7 +740,7 @@ impl StunInfoCollector {
pub fn get_default_servers() -> Vec<String> { pub fn get_default_servers() -> Vec<String> {
// NOTICE: we may need to choose stun stun server based on geo location // NOTICE: we may need to choose stun stun server based on geo location
// stun server cross nation may return a external ip address with high latency and loss rate // stun server cross nation may return a external ip address with high latency and loss rate
[ vec![
"txt:stun.easytier.cn", "txt:stun.easytier.cn",
"stun.miwifi.com", "stun.miwifi.com",
"stun.chat.bilibili.com", "stun.chat.bilibili.com",
@@ -750,16 +752,16 @@ impl StunInfoCollector {
} }
pub fn get_default_servers_v6() -> Vec<String> { pub fn get_default_servers_v6() -> Vec<String> {
["txt:stun-v6.easytier.cn"] vec!["txt:stun-v6.easytier.cn"]
.iter() .iter()
.map(|x| x.to_string()) .map(|x| x.to_string())
.collect() .collect()
} }
async fn get_public_ipv6(servers: &[String]) -> Option<Ipv6Addr> { async fn get_public_ipv6(servers: &Vec<String>) -> Option<Ipv6Addr> {
let mut ips = HostResolverIter::new(servers.to_vec(), 10, true); let mut ips = HostResolverIter::new(servers.to_vec(), 10, true);
while let Some(ip) = ips.next().await { while let Some(ip) = ips.next().await {
let Ok(udp_socket) = UdpSocket::bind("[::]:0".to_string()).await else { let Ok(udp_socket) = UdpSocket::bind(format!("[::]:0")).await else {
break; break;
}; };
let udp = Arc::new(udp_socket); let udp = Arc::new(udp_socket);
@@ -768,8 +770,11 @@ impl StunInfoCollector {
.bind_request(false, false) .bind_request(false, false)
.await; .await;
tracing::debug!(?ret, "finish ipv6 udp nat type detect"); tracing::debug!(?ret, "finish ipv6 udp nat type detect");
if let Ok(Some(IpAddr::V6(v6))) = ret.map(|x| x.mapped_socket_addr.map(|x| x.ip())) { match ret.map(|x| x.mapped_socket_addr.map(|x| x.ip())) {
return Some(v6); Ok(Some(IpAddr::V6(v6))) => {
return Some(v6);
}
_ => {}
} }
} }
None None
@@ -849,9 +854,9 @@ impl StunInfoCollector {
self.tasks.lock().unwrap().spawn(async move { self.tasks.lock().unwrap().spawn(async move {
loop { loop {
let servers = stun_servers.read().unwrap().clone(); let servers = stun_servers.read().unwrap().clone();
if let Some(x) = Self::get_public_ipv6(&servers).await { Self::get_public_ipv6(&servers)
stored_ipv6.store(Some(x)) .await
} .map(|x| stored_ipv6.store(Some(x)));
let sleep_sec = if stored_ipv6.load().is_none() { let sleep_sec = if stored_ipv6.load().is_none() {
60 60

View File

@@ -34,7 +34,7 @@ impl From<LimiterConfig> for BucketConfig {
.unwrap_or(Duration::from_millis(10)); .unwrap_or(Duration::from_millis(10));
BucketConfig { BucketConfig {
capacity: burst_rate * fill_rate, capacity: burst_rate * fill_rate,
fill_rate, fill_rate: fill_rate,
refill_interval, refill_interval,
} }
} }
@@ -162,12 +162,6 @@ pub struct TokenBucketManager {
retain_task: ScopedTask<()>, retain_task: ScopedTask<()>,
} }
impl Default for TokenBucketManager {
fn default() -> Self {
Self::new()
}
}
impl TokenBucketManager { impl TokenBucketManager {
/// Creates a new TokenBucketManager /// Creates a new TokenBucketManager
pub fn new() -> Self { pub fn new() -> Self {
@@ -324,7 +318,7 @@ mod tests {
// Should have accumulated about 100 tokens (10,000 tokens/s * 0.001s) // Should have accumulated about 100 tokens (10,000 tokens/s * 0.001s)
let tokens = bucket.available_tokens.load(Ordering::Relaxed); let tokens = bucket.available_tokens.load(Ordering::Relaxed);
assert!( assert!(
(100..=200).contains(&tokens), tokens >= 100 && tokens <= 200,
"Unexpected token count: {}", "Unexpected token count: {}",
tokens tokens
); );
@@ -361,7 +355,8 @@ mod tests {
.list_foreign_networks() .list_foreign_networks()
.await .await
.foreign_networks .foreign_networks
.is_empty() .len()
== 0
}, },
Duration::from_secs(5), Duration::from_secs(5),
) )
@@ -375,7 +370,8 @@ mod tests {
.get_global_ctx() .get_global_ctx()
.token_bucket_manager() .token_bucket_manager()
.buckets .buckets
.is_empty() .len()
== 0
}, },
Duration::from_secs(10), Duration::from_secs(10),
) )

View File

@@ -16,7 +16,6 @@ use crate::{
dns::socket_addrs, error::Error, global_ctx::ArcGlobalCtx, stun::StunInfoCollectorTrait, dns::socket_addrs, error::Error, global_ctx::ArcGlobalCtx, stun::StunInfoCollectorTrait,
PeerId, PeerId,
}, },
connector::udp_hole_punch::handle_rpc_result,
peers::{ peers::{
peer_conn::PeerConnId, peer_conn::PeerConnId,
peer_manager::PeerManager, peer_manager::PeerManager,
@@ -92,7 +91,6 @@ struct DirectConnectorManagerData {
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
peer_manager: Arc<PeerManager>, peer_manager: Arc<PeerManager>,
dst_listener_blacklist: timedmap::TimedMap<DstListenerUrlBlackListItem, ()>, dst_listener_blacklist: timedmap::TimedMap<DstListenerUrlBlackListItem, ()>,
peer_black_list: timedmap::TimedMap<PeerId, ()>,
} }
impl DirectConnectorManagerData { impl DirectConnectorManagerData {
@@ -101,7 +99,6 @@ impl DirectConnectorManagerData {
global_ctx, global_ctx,
peer_manager, peer_manager,
dst_listener_blacklist: timedmap::TimedMap::new(), dst_listener_blacklist: timedmap::TimedMap::new(),
peer_black_list: timedmap::TimedMap::new(),
} }
} }
@@ -180,13 +177,16 @@ impl DirectConnectorManagerData {
// ask remote to send v6 hole punch packet // ask remote to send v6 hole punch packet
// and no matter what the result is, continue to connect // and no matter what the result is, continue to connect
let _ = self let _ = self
.remote_send_v6_hole_punch_packet(dst_peer_id, &local_socket, remote_url) .remote_send_v6_hole_punch_packet(dst_peer_id, &local_socket, &remote_url)
.await; .await;
let udp_connector = UdpTunnelConnector::new(remote_url.clone()); let udp_connector = UdpTunnelConnector::new(remote_url.clone());
let remote_addr = let remote_addr = super::check_scheme_and_get_socket_addr::<SocketAddr>(
super::check_scheme_and_get_socket_addr::<SocketAddr>(remote_url, "udp", IpVersion::V6) &remote_url,
.await?; "udp",
IpVersion::V6,
)
.await?;
let ret = udp_connector let ret = udp_connector
.try_connect_with_socket(local_socket, remote_addr) .try_connect_with_socket(local_socket, remote_addr)
.await?; .await?;
@@ -230,8 +230,8 @@ impl DirectConnectorManagerData {
dst_peer_id: PeerId, dst_peer_id: PeerId,
addr: String, addr: String,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut rand_gen = rand::rngs::OsRng; let mut rand_gen = rand::rngs::OsRng::default();
let backoff_ms = [1000, 2000, 4000]; let backoff_ms = vec![1000, 2000, 4000];
let mut backoff_idx = 0; let mut backoff_idx = 0;
tracing::debug!(?dst_peer_id, ?addr, "try_connect_to_ip start"); tracing::debug!(?dst_peer_id, ?addr, "try_connect_to_ip start");
@@ -240,7 +240,10 @@ impl DirectConnectorManagerData {
if self if self
.dst_listener_blacklist .dst_listener_blacklist
.contains(&DstListenerUrlBlackListItem(dst_peer_id, addr.clone())) .contains(&DstListenerUrlBlackListItem(
dst_peer_id.clone(),
addr.clone(),
))
{ {
return Err(Error::UrlInBlacklist); return Err(Error::UrlInBlacklist);
} }
@@ -275,7 +278,7 @@ impl DirectConnectorManagerData {
continue; continue;
} else { } else {
self.dst_listener_blacklist.insert( self.dst_listener_blacklist.insert(
DstListenerUrlBlackListItem(dst_peer_id, addr), DstListenerUrlBlackListItem(dst_peer_id.clone(), addr),
(), (),
std::time::Duration::from_secs(DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC), std::time::Duration::from_secs(DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC),
); );
@@ -309,7 +312,7 @@ impl DirectConnectorManagerData {
if addr.set_host(Some(ip.to_string().as_str())).is_ok() { if addr.set_host(Some(ip.to_string().as_str())).is_ok() {
tasks.spawn(Self::try_connect_to_ip( tasks.spawn(Self::try_connect_to_ip(
self.clone(), self.clone(),
dst_peer_id, dst_peer_id.clone(),
addr.to_string(), addr.to_string(),
)); ));
} else { } else {
@@ -324,7 +327,7 @@ impl DirectConnectorManagerData {
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) { } else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
tasks.spawn(Self::try_connect_to_ip( tasks.spawn(Self::try_connect_to_ip(
self.clone(), self.clone(),
dst_peer_id, dst_peer_id.clone(),
listener.to_string(), listener.to_string(),
)); ));
} }
@@ -349,10 +352,13 @@ impl DirectConnectorManagerData {
.iter() .iter()
.for_each(|ip| { .for_each(|ip| {
let mut addr = (*listener).clone(); let mut addr = (*listener).clone();
if addr.set_host(Some(format!("[{}]", ip).as_str())).is_ok() { if addr
.set_host(Some(format!("[{}]", ip.to_string()).as_str()))
.is_ok()
{
tasks.spawn(Self::try_connect_to_ip( tasks.spawn(Self::try_connect_to_ip(
self.clone(), self.clone(),
dst_peer_id, dst_peer_id.clone(),
addr.to_string(), addr.to_string(),
)); ));
} else { } else {
@@ -367,7 +373,7 @@ impl DirectConnectorManagerData {
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) { } else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
tasks.spawn(Self::try_connect_to_ip( tasks.spawn(Self::try_connect_to_ip(
self.clone(), self.clone(),
dst_peer_id, dst_peer_id.clone(),
listener.to_string(), listener.to_string(),
)); ));
} }
@@ -427,8 +433,13 @@ impl DirectConnectorManagerData {
} }
tracing::debug!("try direct connect to peer with listener: {}", listener); tracing::debug!("try direct connect to peer with listener: {}", listener);
self.spawn_direct_connect_task(dst_peer_id, &ip_list, listener, &mut tasks) self.spawn_direct_connect_task(
.await; dst_peer_id.clone(),
&ip_list,
&listener,
&mut tasks,
)
.await;
listener_list.push(listener.clone().to_string()); listener_list.push(listener.clone().to_string());
available_listeners.pop(); available_listeners.pop();
@@ -462,17 +473,7 @@ impl DirectConnectorManagerData {
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut backoff = let mut backoff =
udp_hole_punch::BackOff::new(vec![1000, 2000, 2000, 5000, 5000, 10000, 30000, 60000]); udp_hole_punch::BackOff::new(vec![1000, 2000, 2000, 5000, 5000, 10000, 30000, 60000]);
let mut attempt = 0;
loop { loop {
if self.peer_black_list.contains(&dst_peer_id) {
return Err(anyhow::anyhow!("peer {} is blacklisted", dst_peer_id).into());
}
if attempt > 0 {
tokio::time::sleep(Duration::from_millis(backoff.next_backoff())).await;
}
attempt += 1;
let peer_manager = self.peer_manager.clone(); let peer_manager = self.peer_manager.clone();
tracing::debug!("try direct connect to peer: {}", dst_peer_id); tracing::debug!("try direct connect to peer: {}", dst_peer_id);
@@ -485,11 +486,17 @@ impl DirectConnectorManagerData {
self.global_ctx.get_network_name(), self.global_ctx.get_network_name(),
); );
let ip_list = rpc_stub let ip_list = match rpc_stub
.get_ip_list(BaseController::default(), GetIpListRequest {}) .get_ip_list(BaseController::default(), GetIpListRequest {})
.await; .await
let ip_list = handle_rpc_result(ip_list, dst_peer_id, &self.peer_black_list) .with_context(|| format!("get ip list from peer {}", dst_peer_id))
.with_context(|| format!("get ip list from peer {}", dst_peer_id))?; {
Ok(ip_list) => ip_list,
Err(e) => {
tracing::error!(?e, "failed to get ip list from peer");
continue;
}
};
tracing::info!(ip_list = ?ip_list, dst_peer_id = ?dst_peer_id, "got ip list"); tracing::info!(ip_list = ?ip_list, dst_peer_id = ?dst_peer_id, "got ip list");
@@ -505,6 +512,8 @@ impl DirectConnectorManagerData {
); );
return Ok(()); return Ok(());
} }
tokio::time::sleep(Duration::from_millis(backoff.next_backoff())).await;
} }
} }
} }
@@ -538,16 +547,13 @@ impl PeerTaskLauncher for DirectConnectorLauncher {
} }
async fn collect_peers_need_task(&self, data: &Self::Data) -> Vec<Self::CollectPeerItem> { async fn collect_peers_need_task(&self, data: &Self::Data) -> Vec<Self::CollectPeerItem> {
data.peer_black_list.cleanup();
let my_peer_id = data.peer_manager.my_peer_id(); let my_peer_id = data.peer_manager.my_peer_id();
data.peer_manager data.peer_manager
.list_peers() .list_peers()
.await .await
.into_iter() .into_iter()
.filter(|peer_id| { .filter(|peer_id| {
*peer_id != my_peer_id *peer_id != my_peer_id && !data.peer_manager.has_directly_connected_conn(*peer_id)
&& !data.peer_manager.has_directly_connected_conn(*peer_id)
&& !data.peer_black_list.contains(peer_id)
}) })
.collect() .collect()
} }

View File

@@ -124,11 +124,11 @@ impl DNSTunnelConnector {
let responses = responses.clone(); let responses = responses.clone();
async move { async move {
let response = resolver.srv_lookup(srv_domain).await.with_context(|| { let response = resolver.srv_lookup(srv_domain).await.with_context(|| {
format!("srv_lookup failed, srv_domain: {}", srv_domain) format!("srv_lookup failed, srv_domain: {}", srv_domain.to_string())
})?; })?;
tracing::info!(?response, ?srv_domain, "srv_lookup response"); tracing::info!(?response, ?srv_domain, "srv_lookup response");
for record in response.iter() { for record in response.iter() {
let parsed_record = Self::handle_one_srv_record(record, protocol); let parsed_record = Self::handle_one_srv_record(record, &protocol);
tracing::info!(?parsed_record, ?srv_domain, "parsed_record"); tracing::info!(?parsed_record, ?srv_domain, "parsed_record");
if parsed_record.is_err() { if parsed_record.is_err() {
eprintln!( eprintln!(
@@ -153,7 +153,8 @@ impl DNSTunnelConnector {
let url = weighted_choice(srv_records.as_slice()).with_context(|| { let url = weighted_choice(srv_records.as_slice()).with_context(|| {
format!( format!(
"failed to choose a srv record, domain_name: {}, srv_records: {:?}", "failed to choose a srv record, domain_name: {}, srv_records: {:?}",
domain_name, srv_records domain_name.to_string(),
srv_records
) )
})?; })?;

View File

@@ -93,7 +93,7 @@ impl HttpTunnelConnector {
tracing::info!("try to create connector by url: {}", query[0]); tracing::info!("try to create connector by url: {}", query[0]);
self.redirect_type = HttpRedirectType::RedirectToQuery; self.redirect_type = HttpRedirectType::RedirectToQuery;
return create_connector_by_url( return create_connector_by_url(
query[0].as_ref(), &query[0].to_string(),
&self.global_ctx, &self.global_ctx,
self.ip_version, self.ip_version,
) )
@@ -193,7 +193,7 @@ impl HttpTunnelConnector {
.ok_or_else(|| Error::InvalidUrl("no redirect address found".to_string()))?; .ok_or_else(|| Error::InvalidUrl("no redirect address found".to_string()))?;
let new_url = url::Url::parse(redirect_url.as_str()) let new_url = url::Url::parse(redirect_url.as_str())
.with_context(|| format!("parsing redirect url failed. url: {}", redirect_url))?; .with_context(|| format!("parsing redirect url failed. url: {}", redirect_url))?;
return self.handle_302_redirect(new_url, redirect_url).await; return self.handle_302_redirect(new_url, &redirect_url).await;
} else if res.status_code().is_success() { } else if res.status_code().is_success() {
return self.handle_200_success(&body).await; return self.handle_200_success(&body).await;
} else { } else {

View File

@@ -4,11 +4,11 @@ use std::{
}; };
use anyhow::Context; use anyhow::Context;
use dashmap::DashSet; use dashmap::{DashMap, DashSet};
use tokio::{ use tokio::{
sync::{ sync::{
broadcast::{error::RecvError, Receiver}, broadcast::{error::RecvError, Receiver},
mpsc, mpsc, Mutex,
}, },
task::JoinSet, task::JoinSet,
time::timeout, time::timeout,
@@ -32,6 +32,7 @@ use crate::{
global_ctx::{ArcGlobalCtx, GlobalCtxEvent}, global_ctx::{ArcGlobalCtx, GlobalCtxEvent},
netns::NetNS, netns::NetNS,
}, },
connector::set_bind_addr_for_peer_connector,
peers::peer_manager::PeerManager, peers::peer_manager::PeerManager,
proto::cli::{ proto::cli::{
Connector, ConnectorManageRpc, ConnectorStatus, ListConnectorRequest, Connector, ConnectorManageRpc, ConnectorStatus, ListConnectorRequest,
@@ -42,7 +43,8 @@ use crate::{
use super::create_connector_by_url; use super::create_connector_by_url;
type ConnectorMap = Arc<DashSet<String>>; type MutexConnector = Arc<Mutex<Box<dyn TunnelConnector>>>;
type ConnectorMap = Arc<DashMap<String, MutexConnector>>;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
struct ReconnResult { struct ReconnResult {
@@ -70,7 +72,7 @@ pub struct ManualConnectorManager {
impl ManualConnectorManager { impl ManualConnectorManager {
pub fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Self { pub fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Self {
let connectors = Arc::new(DashSet::new()); let connectors = Arc::new(DashMap::new());
let tasks = JoinSet::new(); let tasks = JoinSet::new();
let event_subscriber = global_ctx.subscribe(); let event_subscriber = global_ctx.subscribe();
@@ -103,11 +105,14 @@ impl ManualConnectorManager {
T: TunnelConnector + 'static, T: TunnelConnector + 'static,
{ {
tracing::info!("add_connector: {}", connector.remote_url()); tracing::info!("add_connector: {}", connector.remote_url());
self.data.connectors.insert(connector.remote_url().into()); self.data.connectors.insert(
connector.remote_url().into(),
Arc::new(Mutex::new(Box::new(connector))),
);
} }
pub async fn add_connector_by_url(&self, url: &str) -> Result<(), Error> { pub async fn add_connector_by_url(&self, url: &str) -> Result<(), Error> {
self.data.connectors.insert(url.to_owned()); self.add_connector(create_connector_by_url(url, &self.global_ctx, IpVersion::Both).await?);
Ok(()) Ok(())
} }
@@ -131,7 +136,7 @@ impl ManualConnectorManager {
.data .data
.connectors .connectors
.iter() .iter()
.map(|x| x.key().clone()) .map(|x| x.key().clone().into())
.collect(); .collect();
let dead_urls: BTreeSet<String> = Self::collect_dead_conns(self.data.clone()) let dead_urls: BTreeSet<String> = Self::collect_dead_conns(self.data.clone())
@@ -155,8 +160,12 @@ impl ManualConnectorManager {
); );
} }
let reconnecting_urls: BTreeSet<String> = let reconnecting_urls: BTreeSet<String> = self
self.data.reconnecting.iter().map(|x| x.clone()).collect(); .data
.reconnecting
.iter()
.map(|x| x.clone().into())
.collect();
for conn_url in reconnecting_urls { for conn_url in reconnecting_urls {
ret.insert( ret.insert(
@@ -227,16 +236,16 @@ impl ManualConnectorManager {
for dead_url in dead_urls { for dead_url in dead_urls {
let data_clone = data.clone(); let data_clone = data.clone();
let sender = reconn_result_send.clone(); let sender = reconn_result_send.clone();
data.connectors.remove(&dead_url).unwrap(); let (_, connector) = data.connectors.remove(&dead_url).unwrap();
let insert_succ = data.reconnecting.insert(dead_url.clone()); let insert_succ = data.reconnecting.insert(dead_url.clone());
assert!(insert_succ); assert!(insert_succ);
tasks.lock().unwrap().spawn(async move { tasks.lock().unwrap().spawn(async move {
let reconn_ret = Self::conn_reconnect(data_clone.clone(), dead_url.clone() ).await; let reconn_ret = Self::conn_reconnect(data_clone.clone(), dead_url.clone(), connector.clone()).await;
let _ = sender.send(reconn_ret).await; let _ = sender.send(reconn_ret).await;
data_clone.reconnecting.remove(&dead_url).unwrap(); data_clone.reconnecting.remove(&dead_url).unwrap();
data_clone.connectors.insert(dead_url.clone()); data_clone.connectors.insert(dead_url.clone(), connector);
}); });
} }
tracing::info!("reconn_interval tick, done"); tracing::info!("reconn_interval tick, done");
@@ -278,7 +287,7 @@ impl ManualConnectorManager {
let remove_later = DashSet::new(); let remove_later = DashSet::new();
for it in data.removed_conn_urls.iter() { for it in data.removed_conn_urls.iter() {
let url = it.key(); let url = it.key();
if data.connectors.remove(url).is_some() { if let Some(_) = data.connectors.remove(url) {
tracing::warn!("connector: {}, removed", url); tracing::warn!("connector: {}, removed", url);
continue; continue;
} else if data.reconnecting.contains(url) { } else if data.reconnecting.contains(url) {
@@ -297,7 +306,11 @@ impl ManualConnectorManager {
async fn collect_dead_conns(data: Arc<ConnectorManagerData>) -> BTreeSet<String> { async fn collect_dead_conns(data: Arc<ConnectorManagerData>) -> BTreeSet<String> {
Self::handle_remove_connector(data.clone()); Self::handle_remove_connector(data.clone());
let all_urls: BTreeSet<String> = data.connectors.iter().map(|x| x.key().clone()).collect(); let all_urls: BTreeSet<String> = data
.connectors
.iter()
.map(|x| x.key().clone().into())
.collect();
let mut ret = BTreeSet::new(); let mut ret = BTreeSet::new();
for url in all_urls.iter() { for url in all_urls.iter() {
if !data.alive_conn_urls.contains(url) { if !data.alive_conn_urls.contains(url) {
@@ -310,13 +323,25 @@ impl ManualConnectorManager {
async fn conn_reconnect_with_ip_version( async fn conn_reconnect_with_ip_version(
data: Arc<ConnectorManagerData>, data: Arc<ConnectorManagerData>,
dead_url: String, dead_url: String,
connector: MutexConnector,
ip_version: IpVersion, ip_version: IpVersion,
) -> Result<ReconnResult, Error> { ) -> Result<ReconnResult, Error> {
let connector = let ip_collector = data.global_ctx.get_ip_collector();
create_connector_by_url(&dead_url, &data.global_ctx.clone(), ip_version).await?;
data.global_ctx connector.lock().await.set_ip_version(ip_version);
.issue_event(GlobalCtxEvent::Connecting(connector.remote_url().clone()));
if data.global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
connector.lock().await.as_mut(),
ip_version == IpVersion::V4,
&ip_collector,
)
.await;
}
data.global_ctx.issue_event(GlobalCtxEvent::Connecting(
connector.lock().await.remote_url().clone(),
));
tracing::info!("reconnect try connect... conn: {:?}", connector); tracing::info!("reconnect try connect... conn: {:?}", connector);
let Some(pm) = data.peer_manager.upgrade() else { let Some(pm) = data.peer_manager.upgrade() else {
return Err(Error::AnyhowError(anyhow::anyhow!( return Err(Error::AnyhowError(anyhow::anyhow!(
@@ -324,7 +349,9 @@ impl ManualConnectorManager {
))); )));
}; };
let (peer_id, conn_id) = pm.try_direct_connect(connector).await?; let (peer_id, conn_id) = pm
.try_direct_connect(connector.lock().await.as_mut())
.await?;
tracing::info!("reconnect succ: {} {} {}", peer_id, conn_id, dead_url); tracing::info!("reconnect succ: {} {} {}", peer_id, conn_id, dead_url);
Ok(ReconnResult { Ok(ReconnResult {
dead_url, dead_url,
@@ -336,6 +363,7 @@ impl ManualConnectorManager {
async fn conn_reconnect( async fn conn_reconnect(
data: Arc<ConnectorManagerData>, data: Arc<ConnectorManagerData>,
dead_url: String, dead_url: String,
connector: MutexConnector,
) -> Result<ReconnResult, Error> { ) -> Result<ReconnResult, Error> {
tracing::info!("reconnect: {}", dead_url); tracing::info!("reconnect: {}", dead_url);
@@ -387,33 +415,31 @@ impl ManualConnectorManager {
let ret = timeout( let ret = timeout(
// allow http connector to wait longer // allow http connector to wait longer
std::time::Duration::from_secs(if use_long_timeout { 20 } else { 2 }), std::time::Duration::from_secs(if use_long_timeout { 20 } else { 2 }),
Self::conn_reconnect_with_ip_version(data.clone(), dead_url.clone(), ip_version), Self::conn_reconnect_with_ip_version(
data.clone(),
dead_url.clone(),
connector.clone(),
ip_version,
),
) )
.await; .await;
tracing::info!("reconnect: {} done, ret: {:?}", dead_url, ret); tracing::info!("reconnect: {} done, ret: {:?}", dead_url, ret);
match ret { if ret.is_ok() && ret.as_ref().unwrap().is_ok() {
Ok(Ok(_)) => { reconn_ret = ret.unwrap();
// 外层和内层都成功:解包并跳出 break;
reconn_ret = ret.unwrap(); } else {
break; if ret.is_err() {
} reconn_ret = Err(ret.unwrap_err().into());
Ok(Err(e)) => { } else if ret.as_ref().unwrap().is_err() {
// 外层成功,内层失败 reconn_ret = Err(ret.unwrap().unwrap_err());
reconn_ret = Err(e);
}
Err(e) => {
// 外层失败
reconn_ret = Err(e.into());
} }
data.global_ctx.issue_event(GlobalCtxEvent::ConnectError(
dead_url.clone(),
format!("{:?}", ip_version),
format!("{:?}", reconn_ret),
));
} }
// 发送事件(只有在未 break 时才执行)
data.global_ctx.issue_event(GlobalCtxEvent::ConnectError(
dead_url.clone(),
format!("{:?}", ip_version),
format!("{:?}", reconn_ret),
));
} }
reconn_ret reconn_ret

View File

@@ -260,7 +260,7 @@ impl PunchBothEasySymHoleClient {
) )
.await; .await;
let remote_ret = handle_rpc_result(remote_ret, dst_peer_id, &self.blacklist)?; let remote_ret = handle_rpc_result(remote_ret, dst_peer_id, self.blacklist.clone())?;
if remote_ret.is_busy { if remote_ret.is_busy {
*is_busy = true; *is_busy = true;
@@ -314,12 +314,8 @@ impl PunchBothEasySymHoleClient {
); );
for _ in 0..2 { for _ in 0..2 {
match try_connect_with_socket( match try_connect_with_socket(socket.socket.clone(), remote_mapped_addr.into())
global_ctx.clone(), .await
socket.socket.clone(),
remote_mapped_addr.into(),
)
.await
{ {
Ok(tunnel) => { Ok(tunnel) => {
return Ok(Some(tunnel)); return Ok(Some(tunnel));
@@ -389,7 +385,7 @@ pub mod tests {
let udp1 = Arc::new(UdpSocket::bind("0.0.0.0:40164").await.unwrap()); let udp1 = Arc::new(UdpSocket::bind("0.0.0.0:40164").await.unwrap());
// 144 - DST_PORT_OFFSET = 124 // 144 - DST_PORT_OFFSET = 124
let udp2 = Arc::new(UdpSocket::bind("0.0.0.0:40124").await.unwrap()); let udp2 = Arc::new(UdpSocket::bind("0.0.0.0:40124").await.unwrap());
let udps = [udp1, udp2]; let udps = vec![udp1, udp2];
let counter = Arc::new(AtomicU32::new(0)); let counter = Arc::new(AtomicU32::new(0));

View File

@@ -1,5 +1,5 @@
use std::{ use std::{
net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4}, net::{Ipv4Addr, SocketAddr, SocketAddrV4},
sync::Arc, sync::Arc,
time::Duration, time::Duration,
}; };
@@ -67,9 +67,9 @@ impl From<NatType> for UdpNatType {
} }
} }
impl From<UdpNatType> for NatType { impl Into<NatType> for UdpNatType {
fn from(val: UdpNatType) -> Self { fn into(self) -> NatType {
match val { match self {
UdpNatType::Unknown => NatType::Unknown, UdpNatType::Unknown => NatType::Unknown,
UdpNatType::Open(nat_type) => nat_type, UdpNatType::Open(nat_type) => nat_type,
UdpNatType::Cone(nat_type) => nat_type, UdpNatType::Cone(nat_type) => nat_type,
@@ -249,7 +249,7 @@ impl UdpSocketArray {
tracing::info!(?addr, ?tid, "got hole punching packet with intreast tid"); tracing::info!(?addr, ?tid, "got hole punching packet with intreast tid");
tid_to_socket tid_to_socket
.entry(tid) .entry(tid)
.or_default() .or_insert_with(Vec::new)
.push(PunchedUdpSocket { .push(PunchedUdpSocket {
socket: socket.clone(), socket: socket.clone(),
tid, tid,
@@ -556,7 +556,7 @@ impl PunchHoleServerCommon {
#[tracing::instrument(err, ret(level=Level::DEBUG), skip(ports))] #[tracing::instrument(err, ret(level=Level::DEBUG), skip(ports))]
pub(crate) async fn send_symmetric_hole_punch_packet( pub(crate) async fn send_symmetric_hole_punch_packet(
ports: &[u16], ports: &Vec<u16>,
udp: Arc<UdpSocket>, udp: Arc<UdpSocket>,
transaction_id: u32, transaction_id: u32,
public_ips: &Vec<Ipv4Addr>, public_ips: &Vec<Ipv4Addr>,
@@ -582,33 +582,7 @@ pub(crate) async fn send_symmetric_hole_punch_packet(
Ok(cur_port_idx % ports.len()) Ok(cur_port_idx % ports.len())
} }
async fn check_udp_socket_local_addr(
global_ctx: ArcGlobalCtx,
remote_mapped_addr: SocketAddr,
) -> Result<(), Error> {
let socket = UdpSocket::bind("0.0.0.0:0").await?;
socket.connect(remote_mapped_addr).await?;
if let Ok(local_addr) = socket.local_addr() {
// local_addr should not be equal to virtual ipv4 or virtual ipv6
match local_addr.ip() {
IpAddr::V4(ip) => {
if global_ctx.get_ipv4().map(|ip| ip.address()) == Some(ip) {
return Err(anyhow::anyhow!("local address is virtual ipv4").into());
}
}
IpAddr::V6(ip) => {
if global_ctx.get_ipv6().map(|ip| ip.address()) == Some(ip) {
return Err(anyhow::anyhow!("local address is virtual ipv6").into());
}
}
}
}
Ok(())
}
pub(crate) async fn try_connect_with_socket( pub(crate) async fn try_connect_with_socket(
global_ctx: ArcGlobalCtx,
socket: Arc<UdpSocket>, socket: Arc<UdpSocket>,
remote_mapped_addr: SocketAddr, remote_mapped_addr: SocketAddr,
) -> Result<Box<dyn Tunnel>, Error> { ) -> Result<Box<dyn Tunnel>, Error> {
@@ -622,11 +596,8 @@ pub(crate) async fn try_connect_with_socket(
.parse() .parse()
.unwrap(), .unwrap(),
); );
check_udp_socket_local_addr(global_ctx, remote_mapped_addr).await?;
connector connector
.try_connect_with_socket(socket, remote_mapped_addr) .try_connect_with_socket(socket, remote_mapped_addr)
.await .await
.map_err(Error::from) .map_err(|e| Error::from(e))
} }

View File

@@ -154,7 +154,7 @@ impl PunchConeHoleClient {
) )
.await; .await;
let resp = handle_rpc_result(resp, dst_peer_id, &self.blacklist)?; let resp = handle_rpc_result(resp, dst_peer_id, self.blacklist.clone())?;
let remote_mapped_addr = resp.listener_mapped_addr.ok_or(anyhow::anyhow!( let remote_mapped_addr = resp.listener_mapped_addr.ok_or(anyhow::anyhow!(
"select_punch_listener response missing listener_mapped_addr" "select_punch_listener response missing listener_mapped_addr"
@@ -172,7 +172,7 @@ impl PunchConeHoleClient {
udp_array udp_array
.send_with_all( .send_with_all(
&new_hole_punch_packet(tid, HOLE_PUNCH_PACKET_BODY_LEN).into_bytes(), &new_hole_punch_packet(tid, HOLE_PUNCH_PACKET_BODY_LEN).into_bytes(),
remote_mapped_addr.into(), remote_mapped_addr.clone().into(),
) )
.await .await
.with_context(|| "failed to send hole punch packet from local") .with_context(|| "failed to send hole punch packet from local")
@@ -188,7 +188,7 @@ impl PunchConeHoleClient {
..Default::default() ..Default::default()
}, },
SendPunchPacketConeRequest { SendPunchPacketConeRequest {
listener_mapped_addr: Some(remote_mapped_addr), listener_mapped_addr: Some(remote_mapped_addr.into()),
dest_addr: Some(local_mapped_addr.into()), dest_addr: Some(local_mapped_addr.into()),
transaction_id: tid, transaction_id: tid,
packet_count_per_batch: 2, packet_count_per_batch: 2,
@@ -223,12 +223,8 @@ impl PunchConeHoleClient {
tracing::debug!(?socket, ?tid, "punched socket found, try connect with it"); tracing::debug!(?socket, ?tid, "punched socket found, try connect with it");
for _ in 0..2 { for _ in 0..2 {
match try_connect_with_socket( match try_connect_with_socket(socket.socket.clone(), remote_mapped_addr.into())
global_ctx.clone(), .await
socket.socket.clone(),
remote_mapped_addr.into(),
)
.await
{ {
Ok(tunnel) => { Ok(tunnel) => {
tracing::info!(?tunnel, "hole punched"); tracing::info!(?tunnel, "hole punched");

View File

@@ -39,7 +39,7 @@ pub(crate) mod cone;
pub(crate) mod sym_to_cone; pub(crate) mod sym_to_cone;
// sym punch should be serialized // sym punch should be serialized
static SYM_PUNCH_LOCK: Lazy<DashMap<PeerId, Arc<Mutex<()>>>> = Lazy::new(DashMap::new); static SYM_PUNCH_LOCK: Lazy<DashMap<PeerId, Arc<Mutex<()>>>> = Lazy::new(|| DashMap::new());
pub static RUN_TESTING: Lazy<AtomicBool> = Lazy::new(|| AtomicBool::new(false)); pub static RUN_TESTING: Lazy<AtomicBool> = Lazy::new(|| AtomicBool::new(false));
// Blacklist timeout in seconds // Blacklist timeout in seconds
@@ -183,7 +183,7 @@ impl BackOff {
pub fn handle_rpc_result<T>( pub fn handle_rpc_result<T>(
ret: Result<T, rpc_types::error::Error>, ret: Result<T, rpc_types::error::Error>,
dst_peer_id: PeerId, dst_peer_id: PeerId,
blacklist: &timedmap::TimedMap<PeerId, ()>, blacklist: Arc<timedmap::TimedMap<PeerId, ()>>,
) -> Result<T, rpc_types::error::Error> { ) -> Result<T, rpc_types::error::Error> {
match ret { match ret {
Ok(ret) => Ok(ret), Ok(ret) => Ok(ret),
@@ -223,7 +223,7 @@ impl UdpHoePunchConnectorData {
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
async fn handle_punch_result( async fn handle_punch_result(
&self, self: &Self,
ret: Result<Option<Box<dyn Tunnel>>, Error>, ret: Result<Option<Box<dyn Tunnel>>, Error>,
backoff: Option<&mut BackOff>, backoff: Option<&mut BackOff>,
round: Option<&mut u32>, round: Option<&mut u32>,
@@ -236,8 +236,10 @@ impl UdpHoePunchConnectorData {
if let Some(round) = round { if let Some(round) = round {
*round = round.saturating_sub(1); *round = round.saturating_sub(1);
} }
} else if let Some(round) = round { } else {
*round += 1; if let Some(round) = round {
*round += 1;
}
} }
}; };
@@ -462,7 +464,7 @@ impl PeerTaskLauncher for UdpHolePunchPeerTaskLauncher {
} }
let conns = data.peer_mgr.list_peer_conns(peer_id).await; let conns = data.peer_mgr.list_peer_conns(peer_id).await;
if conns.is_some() && !conns.unwrap().is_empty() { if conns.is_some() && conns.unwrap().len() > 0 {
continue; continue;
} }

View File

@@ -14,15 +14,11 @@ use tokio::{net::UdpSocket, sync::RwLock};
use tracing::Level; use tracing::Level;
use crate::{ use crate::{
common::{ common::{scoped_task::ScopedTask, stun::StunInfoCollectorTrait, PeerId},
global_ctx::ArcGlobalCtx, scoped_task::ScopedTask, stun::StunInfoCollectorTrait, PeerId, connector::udp_hole_punch::common::{
}, send_symmetric_hole_punch_packet, try_connect_with_socket, HOLE_PUNCH_PACKET_BODY_LEN,
connector::udp_hole_punch::{
common::{
send_symmetric_hole_punch_packet, try_connect_with_socket, HOLE_PUNCH_PACKET_BODY_LEN,
},
handle_rpc_result,
}, },
connector::udp_hole_punch::handle_rpc_result,
defer, defer,
peers::peer_manager::PeerManager, peers::peer_manager::PeerManager,
proto::{ proto::{
@@ -80,9 +76,9 @@ impl PunchSymToConeHoleServer {
let public_ips = request let public_ips = request
.public_ips .public_ips
.into_iter() .into_iter()
.map(std::net::Ipv4Addr::from) .map(|ip| std::net::Ipv4Addr::from(ip))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if public_ips.is_empty() { if public_ips.len() == 0 {
tracing::warn!("send_punch_packet_easy_sym got zero len public ip"); tracing::warn!("send_punch_packet_easy_sym got zero len public ip");
return Err( return Err(
anyhow::anyhow!("send_punch_packet_easy_sym got zero len public ip").into(), anyhow::anyhow!("send_punch_packet_easy_sym got zero len public ip").into(),
@@ -158,9 +154,9 @@ impl PunchSymToConeHoleServer {
let public_ips = request let public_ips = request
.public_ips .public_ips
.into_iter() .into_iter()
.map(std::net::Ipv4Addr::from) .map(|ip| std::net::Ipv4Addr::from(ip))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if public_ips.is_empty() { if public_ips.len() == 0 {
tracing::warn!("try_punch_symmetric got zero len public ip"); tracing::warn!("try_punch_symmetric got zero len public ip");
return Err(anyhow::anyhow!("try_punch_symmetric got zero len public ip").into()); return Err(anyhow::anyhow!("try_punch_symmetric got zero len public ip").into());
} }
@@ -281,7 +277,7 @@ impl PunchSymToConeHoleClient {
return; return;
}; };
let req = SendPunchPacketEasySymRequest { let req = SendPunchPacketEasySymRequest {
listener_mapped_addr: remote_mapped_addr.into(), listener_mapped_addr: remote_mapped_addr.clone().into(),
public_ips: public_ips.clone().into_iter().map(|x| x.into()).collect(), public_ips: public_ips.clone().into_iter().map(|x| x.into()).collect(),
transaction_id: tid, transaction_id: tid,
base_port_num: base_port_for_easy_sym.unwrap() as u32, base_port_num: base_port_for_easy_sym.unwrap() as u32,
@@ -313,7 +309,7 @@ impl PunchSymToConeHoleClient {
port_index: u32, port_index: u32,
) -> Option<u32> { ) -> Option<u32> {
let req = SendPunchPacketHardSymRequest { let req = SendPunchPacketHardSymRequest {
listener_mapped_addr: remote_mapped_addr.into(), listener_mapped_addr: remote_mapped_addr.clone().into(),
public_ips: public_ips.clone().into_iter().map(|x| x.into()).collect(), public_ips: public_ips.clone().into_iter().map(|x| x.into()).collect(),
transaction_id: tid, transaction_id: tid,
round, round,
@@ -333,16 +329,16 @@ impl PunchSymToConeHoleClient {
{ {
Err(e) => { Err(e) => {
tracing::error!(?e, "failed to send punch packet for hard sym"); tracing::error!(?e, "failed to send punch packet for hard sym");
None return None;
} }
Ok(resp) => Some(resp.next_port_index), Ok(resp) => return Some(resp.next_port_index),
} }
} }
async fn get_rpc_stub( async fn get_rpc_stub(
&self, &self,
dst_peer_id: PeerId, dst_peer_id: PeerId,
) -> Box<dyn UdpHolePunchRpc<Controller = BaseController> + std::marker::Send + 'static> { ) -> Box<(dyn UdpHolePunchRpc<Controller = BaseController> + std::marker::Send + 'static)> {
self.peer_mgr self.peer_mgr
.get_peer_rpc_mgr() .get_peer_rpc_mgr()
.rpc_client() .rpc_client()
@@ -354,7 +350,6 @@ impl PunchSymToConeHoleClient {
} }
async fn check_hole_punch_result<T>( async fn check_hole_punch_result<T>(
global_ctx: ArcGlobalCtx,
udp_array: &Arc<UdpSocketArray>, udp_array: &Arc<UdpSocketArray>,
packet: &[u8], packet: &[u8],
tid: u32, tid: u32,
@@ -366,7 +361,7 @@ impl PunchSymToConeHoleClient {
let mut finish_time: Option<Instant> = None; let mut finish_time: Option<Instant> = None;
while finish_time.is_none() || finish_time.as_ref().unwrap().elapsed().as_millis() < 1000 { while finish_time.is_none() || finish_time.as_ref().unwrap().elapsed().as_millis() < 1000 {
udp_array udp_array
.send_with_all(packet, remote_mapped_addr.into()) .send_with_all(&packet, remote_mapped_addr.into())
.await?; .await?;
tokio::time::sleep(Duration::from_millis(200)).await; tokio::time::sleep(Duration::from_millis(200)).await;
@@ -381,13 +376,7 @@ impl PunchSymToConeHoleClient {
}; };
// if hole punched but tunnel creation failed, need to retry entire process. // if hole punched but tunnel creation failed, need to retry entire process.
match try_connect_with_socket( match try_connect_with_socket(socket.socket.clone(), remote_mapped_addr.into()).await {
global_ctx.clone(),
socket.socket.clone(),
remote_mapped_addr.into(),
)
.await
{
Ok(tunnel) => { Ok(tunnel) => {
ret_tunnel.replace(tunnel); ret_tunnel.replace(tunnel);
break; break;
@@ -437,7 +426,7 @@ impl PunchSymToConeHoleClient {
) )
.await; .await;
let resp = handle_rpc_result(resp, dst_peer_id, &self.blacklist)?; let resp = handle_rpc_result(resp, dst_peer_id, self.blacklist.clone())?;
let remote_mapped_addr = resp.listener_mapped_addr.ok_or(anyhow::anyhow!( let remote_mapped_addr = resp.listener_mapped_addr.ok_or(anyhow::anyhow!(
"select_punch_listener response missing listener_mapped_addr" "select_punch_listener response missing listener_mapped_addr"
@@ -446,7 +435,6 @@ impl PunchSymToConeHoleClient {
// try direct connect first // try direct connect first
if self.try_direct_connect.load(Ordering::Relaxed) { if self.try_direct_connect.load(Ordering::Relaxed) {
if let Ok(tunnel) = try_connect_with_socket( if let Ok(tunnel) = try_connect_with_socket(
global_ctx.clone(),
Arc::new(UdpSocket::bind("0.0.0.0:0").await?), Arc::new(UdpSocket::bind("0.0.0.0:0").await?),
remote_mapped_addr.into(), remote_mapped_addr.into(),
) )
@@ -484,17 +472,16 @@ impl PunchSymToConeHoleClient {
rpc_stub, rpc_stub,
base_port_for_easy_sym, base_port_for_easy_sym,
my_nat_info, my_nat_info,
remote_mapped_addr, remote_mapped_addr.clone(),
public_ips.clone(), public_ips.clone(),
tid, tid,
)) ))
.into(); .into();
let ret_tunnel = Self::check_hole_punch_result( let ret_tunnel = Self::check_hole_punch_result(
global_ctx.clone(),
&udp_array, &udp_array,
&packet, &packet,
tid, tid,
remote_mapped_addr, remote_mapped_addr.clone(),
&scoped_punch_task, &scoped_punch_task,
) )
.await?; .await?;
@@ -510,7 +497,7 @@ impl PunchSymToConeHoleClient {
let scoped_punch_task: ScopedTask<Option<u32>> = let scoped_punch_task: ScopedTask<Option<u32>> =
tokio::spawn(Self::remote_send_hole_punch_packet_random( tokio::spawn(Self::remote_send_hole_punch_packet_random(
rpc_stub, rpc_stub,
remote_mapped_addr, remote_mapped_addr.clone(),
public_ips.clone(), public_ips.clone(),
tid, tid,
round, round,
@@ -518,11 +505,10 @@ impl PunchSymToConeHoleClient {
)) ))
.into(); .into();
let ret_tunnel = Self::check_hole_punch_result( let ret_tunnel = Self::check_hole_punch_result(
global_ctx,
&udp_array, &udp_array,
&packet, &packet,
tid, tid,
remote_mapped_addr, remote_mapped_addr.clone(),
&scoped_punch_task, &scoped_punch_task,
) )
.await?; .await?;

View File

@@ -4,6 +4,7 @@ use std::{
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr},
path::PathBuf, path::PathBuf,
str::FromStr, str::FromStr,
sync::Mutex,
time::Duration, time::Duration,
vec, vec,
}; };
@@ -21,26 +22,23 @@ use tokio::time::timeout;
use easytier::{ use easytier::{
common::{ common::{
config::PortForwardConfig,
constants::EASYTIER_VERSION, constants::EASYTIER_VERSION,
stun::{StunInfoCollector, StunInfoCollectorTrait}, stun::{StunInfoCollector, StunInfoCollectorTrait},
}, },
proto::{ proto::{
cli::{ cli::{
list_peer_route_pair, AclManageRpc, AclManageRpcClientFactory, AddPortForwardRequest, list_peer_route_pair, AclManageRpc, AclManageRpcClientFactory, ConnectorManageRpc,
ConnectorManageRpc, ConnectorManageRpcClientFactory, DumpRouteRequest, ConnectorManageRpcClientFactory, DumpRouteRequest, GetAclStatsRequest,
GetAclStatsRequest, GetPrometheusStatsRequest, GetStatsRequest, GetVpnPortalInfoRequest, ListConnectorRequest, ListForeignNetworkRequest,
GetVpnPortalInfoRequest, GetWhitelistRequest, ListConnectorRequest, ListGlobalForeignNetworkRequest, ListMappedListenerRequest, ListPeerRequest,
ListForeignNetworkRequest, ListGlobalForeignNetworkRequest, ListMappedListenerRequest, ListPeerResponse, ListRouteRequest, ListRouteResponse, ManageMappedListenerRequest,
ListPeerRequest, ListPeerResponse, ListPortForwardRequest, ListRouteRequest, MappedListenerManageAction, MappedListenerManageRpc,
ListRouteResponse, ManageMappedListenerRequest, MappedListenerManageAction, MappedListenerManageRpcClientFactory, NodeInfo, PeerManageRpc,
MappedListenerManageRpc, MappedListenerManageRpcClientFactory, NodeInfo, PeerManageRpc, PeerManageRpcClientFactory, ShowNodeInfoRequest, TcpProxyEntryState,
PeerManageRpcClientFactory, PortForwardManageRpc, PortForwardManageRpcClientFactory, TcpProxyEntryTransportType, TcpProxyRpc, TcpProxyRpcClientFactory, VpnPortalRpc,
RemovePortForwardRequest, SetWhitelistRequest, ShowNodeInfoRequest, StatsRpc, VpnPortalRpcClientFactory,
StatsRpcClientFactory, TcpProxyEntryState, TcpProxyEntryTransportType, TcpProxyRpc,
TcpProxyRpcClientFactory, VpnPortalRpc, VpnPortalRpcClientFactory,
}, },
common::{NatType, SocketType}, common::NatType,
peer_rpc::{GetGlobalPeerMapRequest, PeerCenterRpc, PeerCenterRpcClientFactory}, peer_rpc::{GetGlobalPeerMapRequest, PeerCenterRpc, PeerCenterRpcClientFactory},
rpc_impl::standalone::StandAloneClient, rpc_impl::standalone::StandAloneClient,
rpc_types::controller::BaseController, rpc_types::controller::BaseController,
@@ -98,12 +96,6 @@ enum SubCommand {
Proxy, Proxy,
#[command(about = "show ACL rules statistics")] #[command(about = "show ACL rules statistics")]
Acl(AclArgs), Acl(AclArgs),
#[command(about = "manage port forwarding")]
PortForward(PortForwardArgs),
#[command(about = "manage TCP/UDP whitelist")]
Whitelist(WhitelistArgs),
#[command(about = "show statistics information")]
Stats(StatsArgs),
#[command(about = t!("core_clap.generate_completions").to_string())] #[command(about = t!("core_clap.generate_completions").to_string())]
GenAutocomplete { shell: Shell }, GenAutocomplete { shell: Shell },
} }
@@ -201,76 +193,6 @@ enum AclSubCommand {
Stats, Stats,
} }
#[derive(Args, Debug)]
struct PortForwardArgs {
#[command(subcommand)]
sub_command: Option<PortForwardSubCommand>,
}
#[derive(Subcommand, Debug)]
enum PortForwardSubCommand {
/// Add port forward rule
Add {
#[arg(help = "Protocol (tcp/udp)")]
protocol: String,
#[arg(help = "Local bind address (e.g., 0.0.0.0:8080)")]
bind_addr: String,
#[arg(help = "Destination address (e.g., 10.1.1.1:80)")]
dst_addr: String,
},
/// Remove port forward rule
Remove {
#[arg(help = "Protocol (tcp/udp)")]
protocol: String,
#[arg(help = "Local bind address (e.g., 0.0.0.0:8080)")]
bind_addr: String,
#[arg(help = "Optional Destination address (e.g., 10.1.1.1:80)")]
dst_addr: Option<String>,
},
/// List port forward rules
List,
}
#[derive(Args, Debug)]
struct WhitelistArgs {
#[command(subcommand)]
sub_command: Option<WhitelistSubCommand>,
}
#[derive(Subcommand, Debug)]
enum WhitelistSubCommand {
/// Set TCP port whitelist
SetTcp {
#[arg(help = "TCP ports (e.g., 80,443,8000-9000)")]
ports: String,
},
/// Set UDP port whitelist
SetUdp {
#[arg(help = "UDP ports (e.g., 53,5000-6000)")]
ports: String,
},
/// Clear TCP whitelist
ClearTcp,
/// Clear UDP whitelist
ClearUdp,
/// Show current whitelist configuration
Show,
}
#[derive(Args, Debug)]
struct StatsArgs {
#[command(subcommand)]
sub_command: Option<StatsSubCommand>,
}
#[derive(Subcommand, Debug)]
enum StatsSubCommand {
/// Show general statistics
Show,
/// Show statistics in Prometheus format
Prometheus,
}
#[derive(Args, Debug)] #[derive(Args, Debug)]
struct ServiceArgs { struct ServiceArgs {
#[arg(short, long, default_value = env!("CARGO_PKG_NAME"), help = "service name")] #[arg(short, long, default_value = env!("CARGO_PKG_NAME"), help = "service name")]
@@ -325,7 +247,7 @@ struct InstallArgs {
type Error = anyhow::Error; type Error = anyhow::Error;
struct CommandHandler<'a> { struct CommandHandler<'a> {
client: tokio::sync::Mutex<RpcClient>, client: Mutex<RpcClient>,
verbose: bool, verbose: bool,
output_format: &'a OutputFormat, output_format: &'a OutputFormat,
} }
@@ -339,7 +261,7 @@ impl CommandHandler<'_> {
Ok(self Ok(self
.client .client
.lock() .lock()
.await .unwrap()
.scoped_client::<PeerManageRpcClientFactory<BaseController>>("".to_string()) .scoped_client::<PeerManageRpcClientFactory<BaseController>>("".to_string())
.await .await
.with_context(|| "failed to get peer manager client")?) .with_context(|| "failed to get peer manager client")?)
@@ -351,7 +273,7 @@ impl CommandHandler<'_> {
Ok(self Ok(self
.client .client
.lock() .lock()
.await .unwrap()
.scoped_client::<ConnectorManageRpcClientFactory<BaseController>>("".to_string()) .scoped_client::<ConnectorManageRpcClientFactory<BaseController>>("".to_string())
.await .await
.with_context(|| "failed to get connector manager client")?) .with_context(|| "failed to get connector manager client")?)
@@ -363,7 +285,7 @@ impl CommandHandler<'_> {
Ok(self Ok(self
.client .client
.lock() .lock()
.await .unwrap()
.scoped_client::<MappedListenerManageRpcClientFactory<BaseController>>("".to_string()) .scoped_client::<MappedListenerManageRpcClientFactory<BaseController>>("".to_string())
.await .await
.with_context(|| "failed to get mapped listener manager client")?) .with_context(|| "failed to get mapped listener manager client")?)
@@ -375,7 +297,7 @@ impl CommandHandler<'_> {
Ok(self Ok(self
.client .client
.lock() .lock()
.await .unwrap()
.scoped_client::<PeerCenterRpcClientFactory<BaseController>>("".to_string()) .scoped_client::<PeerCenterRpcClientFactory<BaseController>>("".to_string())
.await .await
.with_context(|| "failed to get peer center client")?) .with_context(|| "failed to get peer center client")?)
@@ -387,7 +309,7 @@ impl CommandHandler<'_> {
Ok(self Ok(self
.client .client
.lock() .lock()
.await .unwrap()
.scoped_client::<VpnPortalRpcClientFactory<BaseController>>("".to_string()) .scoped_client::<VpnPortalRpcClientFactory<BaseController>>("".to_string())
.await .await
.with_context(|| "failed to get vpn portal client")?) .with_context(|| "failed to get vpn portal client")?)
@@ -399,7 +321,7 @@ impl CommandHandler<'_> {
Ok(self Ok(self
.client .client
.lock() .lock()
.await .unwrap()
.scoped_client::<AclManageRpcClientFactory<BaseController>>("".to_string()) .scoped_client::<AclManageRpcClientFactory<BaseController>>("".to_string())
.await .await
.with_context(|| "failed to get acl manager client")?) .with_context(|| "failed to get acl manager client")?)
@@ -412,36 +334,12 @@ impl CommandHandler<'_> {
Ok(self Ok(self
.client .client
.lock() .lock()
.await .unwrap()
.scoped_client::<TcpProxyRpcClientFactory<BaseController>>(transport_type.to_string()) .scoped_client::<TcpProxyRpcClientFactory<BaseController>>(transport_type.to_string())
.await .await
.with_context(|| "failed to get vpn portal client")?) .with_context(|| "failed to get vpn portal client")?)
} }
async fn get_port_forward_manager_client(
&self,
) -> Result<Box<dyn PortForwardManageRpc<Controller = BaseController>>, Error> {
Ok(self
.client
.lock()
.await
.scoped_client::<PortForwardManageRpcClientFactory<BaseController>>("".to_string())
.await
.with_context(|| "failed to get port forward manager client")?)
}
async fn get_stats_client(
&self,
) -> Result<Box<dyn StatsRpc<Controller = BaseController>>, Error> {
Ok(self
.client
.lock()
.await
.scoped_client::<StatsRpcClientFactory<BaseController>>("".to_string())
.await
.with_context(|| "failed to get stats client")?)
}
async fn list_peers(&self) -> Result<ListPeerResponse, Error> { async fn list_peers(&self) -> Result<ListPeerResponse, Error> {
let client = self.get_peer_manager_client().await?; let client = self.get_peer_manager_client().await?;
let request = ListPeerRequest::default(); let request = ListPeerRequest::default();
@@ -573,18 +471,6 @@ impl CommandHandler<'_> {
items.push(p.into()); items.push(p.into());
} }
// Sort items by ipv4 (using IpAddr for proper numeric comparison) first, then by hostname
items.sort_by(|a, b| {
use std::net::{IpAddr, Ipv4Addr};
use std::str::FromStr;
let a_ip = IpAddr::from_str(&a.ipv4).unwrap_or(IpAddr::V4(Ipv4Addr::UNSPECIFIED));
let b_ip = IpAddr::from_str(&b.ipv4).unwrap_or(IpAddr::V4(Ipv4Addr::UNSPECIFIED));
match a_ip.cmp(&b_ip) {
std::cmp::Ordering::Equal => a.hostname.cmp(&b.hostname),
other => other,
}
});
print_output(&items, self.output_format)?; print_output(&items, self.output_format)?;
Ok(()) Ok(())
@@ -865,7 +751,7 @@ impl CommandHandler<'_> {
Ok(()) Ok(())
} }
async fn handle_mapped_listener_add(&self, url: &str) -> Result<(), Error> { async fn handle_mapped_listener_add(&self, url: &String) -> Result<(), Error> {
let url = Self::mapped_listener_validate_url(url)?; let url = Self::mapped_listener_validate_url(url)?;
let client = self.get_mapped_listener_manager_client().await?; let client = self.get_mapped_listener_manager_client().await?;
let request = ManageMappedListenerRequest { let request = ManageMappedListenerRequest {
@@ -878,7 +764,7 @@ impl CommandHandler<'_> {
Ok(()) Ok(())
} }
async fn handle_mapped_listener_remove(&self, url: &str) -> Result<(), Error> { async fn handle_mapped_listener_remove(&self, url: &String) -> Result<(), Error> {
let url = Self::mapped_listener_validate_url(url)?; let url = Self::mapped_listener_validate_url(url)?;
let client = self.get_mapped_listener_manager_client().await?; let client = self.get_mapped_listener_manager_client().await?;
let request = ManageMappedListenerRequest { let request = ManageMappedListenerRequest {
@@ -891,7 +777,7 @@ impl CommandHandler<'_> {
Ok(()) Ok(())
} }
fn mapped_listener_validate_url(url: &str) -> Result<url::Url, Error> { fn mapped_listener_validate_url(url: &String) -> Result<url::Url, Error> {
let url = url::Url::parse(url)?; let url = url::Url::parse(url)?;
if url.scheme() != "tcp" && url.scheme() != "udp" { if url.scheme() != "tcp" && url.scheme() != "udp" {
return Err(anyhow::anyhow!( return Err(anyhow::anyhow!(
@@ -902,264 +788,6 @@ impl CommandHandler<'_> {
} }
Ok(url) Ok(url)
} }
async fn handle_port_forward_add(
&self,
protocol: &str,
bind_addr: &str,
dst_addr: &str,
) -> Result<(), Error> {
let bind_addr: std::net::SocketAddr = bind_addr
.parse()
.with_context(|| format!("Invalid bind address: {}", bind_addr))?;
let dst_addr: std::net::SocketAddr = dst_addr
.parse()
.with_context(|| format!("Invalid destination address: {}", dst_addr))?;
if protocol != "tcp" && protocol != "udp" {
return Err(anyhow::anyhow!("Protocol must be 'tcp' or 'udp'"));
}
let client = self.get_port_forward_manager_client().await?;
let request = AddPortForwardRequest {
cfg: Some(
PortForwardConfig {
proto: protocol.to_string(),
bind_addr,
dst_addr,
}
.into(),
),
};
client
.add_port_forward(BaseController::default(), request)
.await?;
println!(
"Port forward rule added: {} {} -> {}",
protocol, bind_addr, dst_addr
);
Ok(())
}
async fn handle_port_forward_remove(
&self,
protocol: &str,
bind_addr: &str,
dst_addr: Option<&str>,
) -> Result<(), Error> {
let bind_addr: std::net::SocketAddr = bind_addr
.parse()
.with_context(|| format!("Invalid bind address: {}", bind_addr))?;
if protocol != "tcp" && protocol != "udp" {
return Err(anyhow::anyhow!("Protocol must be 'tcp' or 'udp'"));
}
let client = self.get_port_forward_manager_client().await?;
let request = RemovePortForwardRequest {
cfg: Some(
PortForwardConfig {
proto: protocol.to_string(),
bind_addr,
dst_addr: dst_addr
.map(|s| s.parse::<SocketAddr>().unwrap())
.unwrap_or("0.0.0.0:0".parse::<SocketAddr>().unwrap()),
}
.into(),
),
};
client
.remove_port_forward(BaseController::default(), request)
.await?;
println!("Port forward rule removed: {} {}", protocol, bind_addr);
Ok(())
}
async fn handle_port_forward_list(&self) -> Result<(), Error> {
let client = self.get_port_forward_manager_client().await?;
let request = ListPortForwardRequest::default();
let response = client
.list_port_forward(BaseController::default(), request)
.await?;
if self.verbose || *self.output_format == OutputFormat::Json {
println!("{}", serde_json::to_string_pretty(&response)?);
return Ok(());
}
#[derive(tabled::Tabled, serde::Serialize)]
struct PortForwardTableItem {
protocol: String,
bind_addr: String,
dst_addr: String,
}
let items: Vec<PortForwardTableItem> = response
.cfgs
.into_iter()
.map(|rule| PortForwardTableItem {
protocol: format!(
"{:?}",
SocketType::try_from(rule.socket_type).unwrap_or(SocketType::Tcp)
),
bind_addr: rule
.bind_addr
.map(|addr| addr.to_string())
.unwrap_or_default(),
dst_addr: rule
.dst_addr
.map(|addr| addr.to_string())
.unwrap_or_default(),
})
.collect();
print_output(&items, self.output_format)?;
Ok(())
}
async fn handle_whitelist_set_tcp(&self, ports: &str) -> Result<(), Error> {
let tcp_ports = Self::parse_port_list(ports)?;
let client = self.get_acl_manager_client().await?;
// Get current UDP ports to preserve them
let current = client
.get_whitelist(BaseController::default(), GetWhitelistRequest::default())
.await?;
let request = SetWhitelistRequest {
tcp_ports,
udp_ports: current.udp_ports,
};
client
.set_whitelist(BaseController::default(), request)
.await?;
println!("TCP whitelist updated: {}", ports);
Ok(())
}
async fn handle_whitelist_set_udp(&self, ports: &str) -> Result<(), Error> {
let udp_ports = Self::parse_port_list(ports)?;
let client = self.get_acl_manager_client().await?;
// Get current TCP ports to preserve them
let current = client
.get_whitelist(BaseController::default(), GetWhitelistRequest::default())
.await?;
let request = SetWhitelistRequest {
tcp_ports: current.tcp_ports,
udp_ports,
};
client
.set_whitelist(BaseController::default(), request)
.await?;
println!("UDP whitelist updated: {}", ports);
Ok(())
}
async fn handle_whitelist_clear_tcp(&self) -> Result<(), Error> {
let client = self.get_acl_manager_client().await?;
// Get current UDP ports to preserve them
let current = client
.get_whitelist(BaseController::default(), GetWhitelistRequest::default())
.await?;
let request = SetWhitelistRequest {
tcp_ports: vec![],
udp_ports: current.udp_ports,
};
client
.set_whitelist(BaseController::default(), request)
.await?;
println!("TCP whitelist cleared");
Ok(())
}
async fn handle_whitelist_clear_udp(&self) -> Result<(), Error> {
let client = self.get_acl_manager_client().await?;
// Get current TCP ports to preserve them
let current = client
.get_whitelist(BaseController::default(), GetWhitelistRequest::default())
.await?;
let request = SetWhitelistRequest {
tcp_ports: current.tcp_ports,
udp_ports: vec![],
};
client
.set_whitelist(BaseController::default(), request)
.await?;
println!("UDP whitelist cleared");
Ok(())
}
async fn handle_whitelist_show(&self) -> Result<(), Error> {
let client = self.get_acl_manager_client().await?;
let request = GetWhitelistRequest::default();
let response = client
.get_whitelist(BaseController::default(), request)
.await?;
if self.verbose || *self.output_format == OutputFormat::Json {
println!("{}", serde_json::to_string_pretty(&response)?);
return Ok(());
}
println!(
"TCP Whitelist: {}",
if response.tcp_ports.is_empty() {
"None".to_string()
} else {
response.tcp_ports.join(", ")
}
);
println!(
"UDP Whitelist: {}",
if response.udp_ports.is_empty() {
"None".to_string()
} else {
response.udp_ports.join(", ")
}
);
Ok(())
}
fn parse_port_list(ports_str: &str) -> Result<Vec<String>, Error> {
let mut ports = Vec::new();
for port_spec in ports_str.split(',') {
let port_spec = port_spec.trim();
if port_spec.contains('-') {
// Handle port range
let parts: Vec<&str> = port_spec.split('-').collect();
if parts.len() != 2 {
return Err(anyhow::anyhow!("Invalid port range: {}", port_spec));
}
let start: u16 = parts[0]
.parse()
.with_context(|| format!("Invalid start port: {}", parts[0]))?;
let end: u16 = parts[1]
.parse()
.with_context(|| format!("Invalid end port: {}", parts[1]))?;
if start > end {
return Err(anyhow::anyhow!("Invalid port range: start > end"));
}
ports.push(format!("{}-{}", start, end));
} else {
// Handle single port
let port: u16 = port_spec
.parse()
.with_context(|| format!("Invalid port number: {}", port_spec))?;
ports.push(port.to_string());
}
}
Ok(ports)
}
} }
#[derive(Debug)] #[derive(Debug)]
@@ -1457,7 +1085,7 @@ async fn main() -> Result<(), Error> {
.unwrap(), .unwrap(),
)); ));
let handler = CommandHandler { let handler = CommandHandler {
client: tokio::sync::Mutex::new(client), client: Mutex::new(client),
verbose: cli.verbose, verbose: cli.verbose,
output_format: &cli.output_format, output_format: &cli.output_format,
}; };
@@ -1715,10 +1343,16 @@ async fn main() -> Result<(), Error> {
format!("{:?}", stun_info.udp_nat_type()).as_str(), format!("{:?}", stun_info.udp_nat_type()).as_str(),
]); ]);
ip_list.interface_ipv4s.iter().for_each(|ip| { ip_list.interface_ipv4s.iter().for_each(|ip| {
builder.push_record(vec!["Interface IPv4", ip.to_string().as_str()]); builder.push_record(vec![
"Interface IPv4",
format!("{}", ip.to_string()).as_str(),
]);
}); });
ip_list.interface_ipv6s.iter().for_each(|ip| { ip_list.interface_ipv6s.iter().for_each(|ip| {
builder.push_record(vec!["Interface IPv6", ip.to_string().as_str()]); builder.push_record(vec![
"Interface IPv6",
format!("{}", ip.to_string()).as_str(),
]);
}); });
for (idx, l) in node_info.listeners.iter().enumerate() { for (idx, l) in node_info.listeners.iter().enumerate() {
if l.starts_with("ring") { if l.starts_with("ring") {
@@ -1860,109 +1494,6 @@ async fn main() -> Result<(), Error> {
handler.handle_acl_stats().await?; handler.handle_acl_stats().await?;
} }
}, },
SubCommand::PortForward(port_forward_args) => match &port_forward_args.sub_command {
Some(PortForwardSubCommand::Add {
protocol,
bind_addr,
dst_addr,
}) => {
handler
.handle_port_forward_add(protocol, bind_addr, dst_addr)
.await?;
}
Some(PortForwardSubCommand::Remove {
protocol,
bind_addr,
dst_addr,
}) => {
handler
.handle_port_forward_remove(protocol, bind_addr, dst_addr.as_deref())
.await?;
}
Some(PortForwardSubCommand::List) | None => {
handler.handle_port_forward_list().await?;
}
},
SubCommand::Whitelist(whitelist_args) => match &whitelist_args.sub_command {
Some(WhitelistSubCommand::SetTcp { ports }) => {
handler.handle_whitelist_set_tcp(ports).await?;
}
Some(WhitelistSubCommand::SetUdp { ports }) => {
handler.handle_whitelist_set_udp(ports).await?;
}
Some(WhitelistSubCommand::ClearTcp) => {
handler.handle_whitelist_clear_tcp().await?;
}
Some(WhitelistSubCommand::ClearUdp) => {
handler.handle_whitelist_clear_udp().await?;
}
Some(WhitelistSubCommand::Show) | None => {
handler.handle_whitelist_show().await?;
}
},
SubCommand::Stats(stats_args) => match &stats_args.sub_command {
Some(StatsSubCommand::Show) | None => {
let client = handler.get_stats_client().await?;
let request = GetStatsRequest {};
let response = client.get_stats(BaseController::default(), request).await?;
if cli.output_format == OutputFormat::Json {
println!("{}", serde_json::to_string_pretty(&response.metrics)?);
} else {
#[derive(tabled::Tabled, serde::Serialize)]
struct StatsTableRow {
#[tabled(rename = "Metric Name")]
name: String,
#[tabled(rename = "Value")]
value: String,
#[tabled(rename = "Labels")]
labels: String,
}
let table_rows: Vec<StatsTableRow> = response
.metrics
.iter()
.map(|metric| {
let labels_str = if metric.labels.is_empty() {
"-".to_string()
} else {
metric
.labels
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect::<Vec<_>>()
.join(", ")
};
let formatted_value = if metric.name.contains("bytes") {
format_size(metric.value, humansize::BINARY)
} else if metric.name.contains("duration") {
format!("{} ms", metric.value)
} else {
metric.value.to_string()
};
StatsTableRow {
name: metric.name.clone(),
value: formatted_value,
labels: labels_str,
}
})
.collect();
print_output(&table_rows, &cli.output_format)?
}
}
Some(StatsSubCommand::Prometheus) => {
let client = handler.get_stats_client().await?;
let request = GetPrometheusStatsRequest {};
let response = client
.get_prometheus_stats(BaseController::default(), request)
.await?;
println!("{}", response.prometheus_text);
}
},
SubCommand::GenAutocomplete { shell } => { SubCommand::GenAutocomplete { shell } => {
let mut cmd = Cli::command(); let mut cmd = Cli::command();
easytier::print_completions(shell, &mut cmd, "easytier-cli"); easytier::print_completions(shell, &mut cmd, "easytier-cli");

View File

@@ -4,7 +4,7 @@
extern crate rust_i18n; extern crate rust_i18n;
use std::{ use std::{
net::{IpAddr, SocketAddr}, net::{Ipv4Addr, SocketAddr},
path::PathBuf, path::PathBuf,
process::ExitCode, process::ExitCode,
sync::Arc, sync::Arc,
@@ -18,9 +18,8 @@ use clap_complete::Shell;
use easytier::{ use easytier::{
common::{ common::{
config::{ config::{
get_avaliable_encrypt_methods, ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig, ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig, LoggingConfigLoader,
LoggingConfigLoader, NetworkIdentity, PeerConfig, PortForwardConfig, TomlConfigLoader, NetworkIdentity, PeerConfig, PortForwardConfig, TomlConfigLoader, VpnPortalConfig,
VpnPortalConfig,
}, },
constants::EASYTIER_VERSION, constants::EASYTIER_VERSION,
global_ctx::GlobalCtx, global_ctx::GlobalCtx,
@@ -30,7 +29,10 @@ use easytier::{
connector::create_connector_by_url, connector::create_connector_by_url,
instance_manager::NetworkInstanceManager, instance_manager::NetworkInstanceManager,
launcher::{add_proxy_network_to_config, ConfigSource}, launcher::{add_proxy_network_to_config, ConfigSource},
proto::common::{CompressionAlgoPb, NatType}, proto::{
acl::{Acl, AclV1, Action, Chain, ChainType, Protocol, Rule},
common::{CompressionAlgoPb, NatType},
},
tunnel::{IpVersion, PROTO_PORT_OFFSET}, tunnel::{IpVersion, PROTO_PORT_OFFSET},
utils::{init_logger, setup_panic_handler}, utils::{init_logger, setup_panic_handler},
web_client, web_client,
@@ -53,15 +55,10 @@ use jemalloc_ctl::{epoch, stats, Access as _, AsName as _};
#[global_allocator] #[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
#[cfg(feature = "jemalloc-prof")]
#[allow(non_upper_case_globals)]
#[export_name = "malloc_conf"]
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0";
fn set_prof_active(_active: bool) { fn set_prof_active(_active: bool) {
#[cfg(feature = "jemalloc-prof")] #[cfg(feature = "jemalloc-prof")]
{ {
const PROF_ACTIVE: &[u8] = b"prof.active\0"; const PROF_ACTIVE: &'static [u8] = b"prof.active\0";
let name = PROF_ACTIVE.name(); let name = PROF_ACTIVE.name();
name.write(_active).expect("Should succeed to set prof"); name.write(_active).expect("Should succeed to set prof");
} }
@@ -70,7 +67,7 @@ fn set_prof_active(_active: bool) {
fn dump_profile(_cur_allocated: usize) { fn dump_profile(_cur_allocated: usize) {
#[cfg(feature = "jemalloc-prof")] #[cfg(feature = "jemalloc-prof")]
{ {
const PROF_DUMP: &[u8] = b"prof.dump\0"; const PROF_DUMP: &'static [u8] = b"prof.dump\0";
static mut PROF_DUMP_FILE_NAME: [u8; 128] = [0; 128]; static mut PROF_DUMP_FILE_NAME: [u8; 128] = [0; 128];
let file_name_str = format!( let file_name_str = format!(
"profile-{}-{}.out", "profile-{}-{}.out",
@@ -284,15 +281,6 @@ struct NetworkOptions {
)] )]
disable_encryption: Option<bool>, disable_encryption: Option<bool>,
#[arg(
long,
env = "ET_ENCRYPTION_ALGORITHM",
help = t!("core_clap.encryption_algorithm").to_string(),
default_value = "aes-gcm",
value_parser = get_avaliable_encrypt_methods()
)]
encryption_algorithm: Option<String>,
#[arg( #[arg(
long, long,
env = "ET_MULTI_THREAD", env = "ET_MULTI_THREAD",
@@ -348,7 +336,7 @@ struct NetworkOptions {
help = t!("core_clap.exit_nodes").to_string(), help = t!("core_clap.exit_nodes").to_string(),
num_args = 0.. num_args = 0..
)] )]
exit_nodes: Vec<IpAddr>, exit_nodes: Vec<Ipv4Addr>,
#[arg( #[arg(
long, long,
@@ -525,7 +513,7 @@ struct NetworkOptions {
#[arg( #[arg(
long, long,
value_delimiter = ',', value_delimiter = ',',
help = t!("core_clap.tcp_whitelist").to_string(), help = "TCP port whitelist. Supports single ports (80) and ranges (8000-9000)",
num_args = 0.. num_args = 0..
)] )]
tcp_whitelist: Vec<String>, tcp_whitelist: Vec<String>,
@@ -533,28 +521,10 @@ struct NetworkOptions {
#[arg( #[arg(
long, long,
value_delimiter = ',', value_delimiter = ',',
help = t!("core_clap.udp_whitelist").to_string(), help = "UDP port whitelist. Supports single ports (53) and ranges (5000-6000)",
num_args = 0.. num_args = 0..
)] )]
udp_whitelist: Vec<String>, udp_whitelist: Vec<String>,
#[arg(
long,
env = "ET_DISABLE_RELAY_KCP",
help = t!("core_clap.disable_relay_kcp").to_string(),
num_args = 0..=1,
default_missing_value = "true"
)]
disable_relay_kcp: Option<bool>,
#[arg(
long,
env = "ET_ENABLE_RELAY_FOREIGN_NETWORK_KCP",
help = t!("core_clap.enable_relay_foreign_network_kcp").to_string(),
num_args = 0..=1,
default_missing_value = "true"
)]
enable_relay_foreign_network_kcp: Option<bool>,
} }
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
@@ -652,6 +622,117 @@ impl NetworkOptions {
false false
} }
fn parse_port_list(port_list: &[String]) -> anyhow::Result<Vec<String>> {
let mut ports = Vec::new();
for port_spec in port_list {
if port_spec.contains('-') {
// Handle port range like "8000-9000"
let parts: Vec<&str> = port_spec.split('-').collect();
if parts.len() != 2 {
return Err(anyhow::anyhow!("Invalid port range format: {}", port_spec));
}
let start: u16 = parts[0]
.parse()
.with_context(|| format!("Invalid start port in range: {}", port_spec))?;
let end: u16 = parts[1]
.parse()
.with_context(|| format!("Invalid end port in range: {}", port_spec))?;
if start > end {
return Err(anyhow::anyhow!(
"Start port must be <= end port in range: {}",
port_spec
));
}
// Add individual ports in the range
for port in start..=end {
ports.push(port.to_string());
}
} else {
// Handle single port
let port: u16 = port_spec
.parse()
.with_context(|| format!("Invalid port number: {}", port_spec))?;
ports.push(port.to_string());
}
}
Ok(ports)
}
fn generate_acl_from_whitelists(&self) -> anyhow::Result<Option<Acl>> {
if self.tcp_whitelist.is_empty() && self.udp_whitelist.is_empty() {
return Ok(None);
}
let mut acl = Acl {
acl_v1: Some(AclV1 { chains: vec![] }),
};
let acl_v1 = acl.acl_v1.as_mut().unwrap();
// Create inbound chain for whitelist rules
let mut inbound_chain = Chain {
name: "inbound_whitelist".to_string(),
chain_type: ChainType::Inbound as i32,
description: "Auto-generated inbound whitelist from CLI".to_string(),
enabled: true,
rules: vec![],
default_action: Action::Drop as i32, // Default deny
};
let mut rule_priority = 1000u32;
// Add TCP whitelist rules
if !self.tcp_whitelist.is_empty() {
let tcp_ports = Self::parse_port_list(&self.tcp_whitelist)?;
let tcp_rule = Rule {
name: "tcp_whitelist".to_string(),
description: "Auto-generated TCP whitelist rule".to_string(),
priority: rule_priority,
enabled: true,
protocol: Protocol::Tcp as i32,
ports: tcp_ports,
source_ips: vec![],
destination_ips: vec![],
source_ports: vec![],
action: Action::Allow as i32,
rate_limit: 0,
burst_limit: 0,
stateful: true,
};
inbound_chain.rules.push(tcp_rule);
rule_priority -= 1;
}
// Add UDP whitelist rules
if !self.udp_whitelist.is_empty() {
let udp_ports = Self::parse_port_list(&self.udp_whitelist)?;
let udp_rule = Rule {
name: "udp_whitelist".to_string(),
description: "Auto-generated UDP whitelist rule".to_string(),
priority: rule_priority,
enabled: true,
protocol: Protocol::Udp as i32,
ports: udp_ports,
source_ips: vec![],
destination_ips: vec![],
source_ports: vec![],
action: Action::Allow as i32,
rate_limit: 0,
burst_limit: 0,
stateful: false,
};
inbound_chain.rules.push(udp_rule);
}
acl_v1.chains.push(inbound_chain);
Ok(Some(acl))
}
fn merge_into(&self, cfg: &mut TomlConfigLoader) -> anyhow::Result<()> { fn merge_into(&self, cfg: &mut TomlConfigLoader) -> anyhow::Result<()> {
if self.hostname.is_some() { if self.hostname.is_some() {
cfg.set_hostname(self.hostname.clone()); cfg.set_hostname(self.hostname.clone());
@@ -701,7 +782,7 @@ impl NetworkOptions {
.map(|s| s.parse().unwrap()) .map(|s| s.parse().unwrap())
.collect(), .collect(),
); );
} else if cfg.get_listeners().is_none() { } else if cfg.get_listeners() == None {
cfg.set_listeners( cfg.set_listeners(
Cli::parse_listeners(false, vec!["11010".to_string()])? Cli::parse_listeners(false, vec!["11010".to_string()])?
.into_iter() .into_iter()
@@ -740,7 +821,7 @@ impl NetworkOptions {
} }
for n in self.proxy_networks.iter() { for n in self.proxy_networks.iter() {
add_proxy_network_to_config(n, cfg)?; add_proxy_network_to_config(n, &cfg)?;
} }
let rpc_portal = if let Some(r) = &self.rpc_portal { let rpc_portal = if let Some(r) = &self.rpc_portal {
@@ -754,9 +835,9 @@ impl NetworkOptions {
cfg.set_rpc_portal(rpc_portal); cfg.set_rpc_portal(rpc_portal);
if let Some(rpc_portal_whitelist) = &self.rpc_portal_whitelist { if let Some(rpc_portal_whitelist) = &self.rpc_portal_whitelist {
let mut whitelist = cfg.get_rpc_portal_whitelist().unwrap_or_default(); let mut whitelist = cfg.get_rpc_portal_whitelist().unwrap_or_else(|| Vec::new());
for cidr in rpc_portal_whitelist { for cidr in rpc_portal_whitelist {
whitelist.push(*cidr); whitelist.push((*cidr).clone());
} }
cfg.set_rpc_portal_whitelist(Some(whitelist)); cfg.set_rpc_portal_whitelist(Some(whitelist));
} }
@@ -825,18 +906,18 @@ impl NetworkOptions {
port_forward.port().expect("local bind port is missing") port_forward.port().expect("local bind port is missing")
) )
.parse() .parse()
.unwrap_or_else(|_| panic!("failed to parse local bind addr {}", example_str)); .expect(format!("failed to parse local bind addr {}", example_str).as_str());
let dst_addr = port_forward let dst_addr = format!(
.path_segments() "{}",
.unwrap_or_else(|| panic!("remote destination addr is missing {}", example_str)) port_forward
.next() .path_segments()
.unwrap_or_else(|| panic!("remote destination addr is missing {}", example_str)) .expect(format!("remote destination addr is missing {}", example_str).as_str())
.to_string() .next()
.parse() .expect(format!("remote destination addr is missing {}", example_str).as_str())
.unwrap_or_else(|_| { )
panic!("failed to parse remote destination addr {}", example_str) .parse()
}); .expect(format!("failed to parse remote destination addr {}", example_str).as_str());
let port_forward_item = PortForwardConfig { let port_forward_item = PortForwardConfig {
bind_addr, bind_addr,
@@ -856,9 +937,6 @@ impl NetworkOptions {
if let Some(v) = self.disable_encryption { if let Some(v) = self.disable_encryption {
f.enable_encryption = !v; f.enable_encryption = !v;
} }
if let Some(algorithm) = &self.encryption_algorithm {
f.encryption_algorithm = algorithm.clone();
}
if let Some(v) = self.disable_ipv6 { if let Some(v) = self.disable_ipv6 {
f.enable_ipv6 = !v; f.enable_ipv6 = !v;
} }
@@ -906,23 +984,16 @@ impl NetworkOptions {
.foreign_relay_bps_limit .foreign_relay_bps_limit
.unwrap_or(f.foreign_relay_bps_limit); .unwrap_or(f.foreign_relay_bps_limit);
f.multi_thread_count = self.multi_thread_count.unwrap_or(f.multi_thread_count); f.multi_thread_count = self.multi_thread_count.unwrap_or(f.multi_thread_count);
f.disable_relay_kcp = self.disable_relay_kcp.unwrap_or(f.disable_relay_kcp);
f.enable_relay_foreign_network_kcp = self
.enable_relay_foreign_network_kcp
.unwrap_or(f.enable_relay_foreign_network_kcp);
cfg.set_flags(f); cfg.set_flags(f);
if !self.exit_nodes.is_empty() { if !self.exit_nodes.is_empty() {
cfg.set_exit_nodes(self.exit_nodes.clone()); cfg.set_exit_nodes(self.exit_nodes.clone());
} }
let mut old_tcp_whitelist = cfg.get_tcp_whitelist(); // Handle port whitelists by generating ACL configuration
old_tcp_whitelist.extend(self.tcp_whitelist.clone()); if let Some(acl) = self.generate_acl_from_whitelists()? {
cfg.set_tcp_whitelist(old_tcp_whitelist); cfg.set_acl(Some(acl));
}
let mut old_udp_whitelist = cfg.get_udp_whitelist();
old_udp_whitelist.extend(self.udp_whitelist.clone());
cfg.set_udp_whitelist(old_udp_whitelist);
Ok(()) Ok(())
} }
@@ -1141,7 +1212,7 @@ async fn run_main(cli: Cli) -> anyhow::Result<()> {
let mut cfg = TomlConfigLoader::default(); let mut cfg = TomlConfigLoader::default();
cli.network_options cli.network_options
.merge_into(&mut cfg) .merge_into(&mut cfg)
.with_context(|| "failed to create config from cli".to_string())?; .with_context(|| format!("failed to create config from cli"))?;
println!("Starting easytier from cli with config:"); println!("Starting easytier from cli with config:");
println!("############### TOML ###############\n"); println!("############### TOML ###############\n");
println!("{}", cfg.dump()); println!("{}", cfg.dump());
@@ -1151,14 +1222,6 @@ async fn run_main(cli: Cli) -> anyhow::Result<()> {
tokio::select! { tokio::select! {
_ = manager.wait() => { _ = manager.wait() => {
let infos = manager.collect_network_infos()?;
let errs = infos
.into_values()
.filter_map(|info| info.error_msg)
.collect::<Vec<_>>();
if !errs.is_empty() {
return Err(anyhow::anyhow!("some instances stopped with errors"));
}
} }
_ = tokio::signal::ctrl_c() => { _ = tokio::signal::ctrl_c() => {
println!("ctrl-c received, exiting..."); println!("ctrl-c received, exiting...");

View File

@@ -294,7 +294,7 @@ pub fn new_udp_header<T: ToTargetAddr>(target_addr: T) -> Result<Vec<u8>> {
} }
/// Parse data from UDP client on raw buffer, return (frag, target_addr, payload). /// Parse data from UDP client on raw buffer, return (frag, target_addr, payload).
pub async fn parse_udp_request(mut req: &[u8]) -> Result<(u8, TargetAddr, &[u8])> { pub async fn parse_udp_request<'a>(mut req: &'a [u8]) -> Result<(u8, TargetAddr, &'a [u8])> {
let rsv = read_exact!(req, [0u8; 2]).context("Malformed request")?; let rsv = read_exact!(req, [0u8; 2]).context("Malformed request")?;
if !rsv.eq(&[0u8; 2]) { if !rsv.eq(&[0u8; 2]) {

View File

@@ -455,16 +455,16 @@ impl<T: AsyncRead + AsyncWrite + Unpin, A: Authentication, C: AsyncTcpConnector>
info!("User logged successfully."); info!("User logged successfully.");
Ok(credentials) return Ok(credentials);
} else { } else {
self.inner self.inner
.write_all(&[1, consts::SOCKS5_AUTH_METHOD_NOT_ACCEPTABLE]) .write_all(&[1, consts::SOCKS5_AUTH_METHOD_NOT_ACCEPTABLE])
.await .await
.context("Can't reply with auth method not acceptable.")?; .context("Can't reply with auth method not acceptable.")?;
Err(SocksError::AuthenticationRejected( return Err(SocksError::AuthenticationRejected(format!(
"Authentication, rejected.".to_string(), "Authentication, rejected."
)) )));
} }
} }

View File

@@ -72,7 +72,10 @@ impl TargetAddr {
} }
pub fn is_ip(&self) -> bool { pub fn is_ip(&self) -> bool {
matches!(self, TargetAddr::Ip(_)) match self {
TargetAddr::Ip(_) => true,
_ => false,
}
} }
pub fn is_domain(&self) -> bool { pub fn is_domain(&self) -> bool {
@@ -101,7 +104,7 @@ impl TargetAddr {
} }
TargetAddr::Domain(ref domain, port) => { TargetAddr::Domain(ref domain, port) => {
debug!("TargetAddr::Domain"); debug!("TargetAddr::Domain");
if domain.len() > u8::MAX as usize { if domain.len() > u8::max_value() as usize {
return Err(SocksError::ExceededMaxDomainLen(domain.len()).into()); return Err(SocksError::ExceededMaxDomainLen(domain.len()).into());
} }
buf.extend_from_slice(&[consts::SOCKS5_ADDR_TYPE_DOMAIN_NAME, domain.len() as u8]); buf.extend_from_slice(&[consts::SOCKS5_ADDR_TYPE_DOMAIN_NAME, domain.len() as u8]);
@@ -122,7 +125,8 @@ impl std::net::ToSocketAddrs for TargetAddr {
fn to_socket_addrs(&self) -> io::Result<IntoIter<SocketAddr>> { fn to_socket_addrs(&self) -> io::Result<IntoIter<SocketAddr>> {
match *self { match *self {
TargetAddr::Ip(addr) => Ok(vec![addr].into_iter()), TargetAddr::Ip(addr) => Ok(vec![addr].into_iter()),
TargetAddr::Domain(_, _) => Err(io::Error::other( TargetAddr::Domain(_, _) => Err(io::Error::new(
io::ErrorKind::Other,
"Domain name has to be explicitly resolved, please use TargetAddr::resolve_dns().", "Domain name has to be explicitly resolved, please use TargetAddr::resolve_dns().",
)), )),
} }
@@ -145,7 +149,7 @@ pub trait ToTargetAddr {
fn to_target_addr(&self) -> io::Result<TargetAddr>; fn to_target_addr(&self) -> io::Result<TargetAddr>;
} }
impl ToTargetAddr for (&str, u16) { impl<'a> ToTargetAddr for (&'a str, u16) {
fn to_target_addr(&self) -> io::Result<TargetAddr> { fn to_target_addr(&self) -> io::Result<TargetAddr> {
// try to parse as an IP first // try to parse as an IP first
if let Ok(addr) = self.0.parse::<Ipv4Addr>() { if let Ok(addr) = self.0.parse::<Ipv4Addr>() {

View File

@@ -23,7 +23,6 @@ use tracing::Instrument;
use crate::{ use crate::{
common::{error::Error, global_ctx::ArcGlobalCtx, PeerId}, common::{error::Error, global_ctx::ArcGlobalCtx, PeerId},
gateway::ip_reassembler::ComposeIpv4PacketArgs,
peers::{peer_manager::PeerManager, PeerPacketFilter}, peers::{peer_manager::PeerManager, PeerPacketFilter},
tunnel::packet_def::{PacketType, ZCPacket}, tunnel::packet_def::{PacketType, ZCPacket},
}; };
@@ -119,7 +118,7 @@ fn socket_recv_loop(
} }
}; };
if len == 0 { if len <= 0 {
tracing::error!("recv empty packet, len: {}", len); tracing::error!("recv empty packet, len: {}", len);
return; return;
} }
@@ -159,18 +158,20 @@ fn socket_recv_loop(
let payload_len = len - ipv4_packet.get_header_length() as usize * 4; let payload_len = len - ipv4_packet.get_header_length() as usize * 4;
let id = ipv4_packet.get_identification(); let id = ipv4_packet.get_identification();
let _ = compose_ipv4_packet( let _ = compose_ipv4_packet(
ComposeIpv4PacketArgs { &mut buf[..],
buf: &mut buf[..], &v.mapped_dst_ip,
src_v4: &v.mapped_dst_ip, &dest_ip,
dst_v4: &dest_ip, IpNextHeaderProtocols::Icmp,
next_protocol: IpNextHeaderProtocols::Icmp, payload_len,
payload_len, 1200,
payload_mtu: 1200, id,
ip_id: id,
},
|buf| { |buf| {
let mut p = ZCPacket::new_with_payload(buf); let mut p = ZCPacket::new_with_payload(buf);
p.fill_peer_manager_hdr(v.my_peer_id, v.src_peer_id, PacketType::Data as u8); p.fill_peer_manager_hdr(
v.my_peer_id.into(),
v.src_peer_id.into(),
PacketType::Data as u8,
);
p.mut_peer_manager_header().unwrap().set_no_proxy(true); p.mut_peer_manager_header().unwrap().set_no_proxy(true);
if let Err(e) = sender.send(p) { if let Err(e) = sender.send(p) {
@@ -185,7 +186,7 @@ fn socket_recv_loop(
#[async_trait::async_trait] #[async_trait::async_trait]
impl PeerPacketFilter for IcmpProxy { impl PeerPacketFilter for IcmpProxy {
async fn try_process_packet_from_peer(&self, packet: ZCPacket) -> Option<ZCPacket> { async fn try_process_packet_from_peer(&self, packet: ZCPacket) -> Option<ZCPacket> {
if self.try_handle_peer_packet(&packet).await.is_some() { if let Some(_) = self.try_handle_peer_packet(&packet).await {
return None; return None;
} else { } else {
return Some(packet); return Some(packet);
@@ -305,7 +306,7 @@ impl IcmpProxy {
return Err(anyhow::anyhow!("peer manager is gone").into()); return Err(anyhow::anyhow!("peer manager is gone").into());
}; };
pm.add_packet_process_pipeline(Box::new(self.clone())).await; pm.add_packet_process_pipeline(self.clone()).await;
Ok(()) Ok(())
} }
@@ -319,7 +320,10 @@ impl IcmpProxy {
.unwrap() .unwrap()
.as_ref() .as_ref()
.with_context(|| "icmp socket not created")? .with_context(|| "icmp socket not created")?
.send_to(icmp_packet.packet(), &SocketAddrV4::new(dst_ip, 0).into())?; .send_to(
icmp_packet.packet(),
&SocketAddrV4::new(dst_ip.into(), 0).into(),
)?;
Ok(()) Ok(())
} }
@@ -345,15 +349,13 @@ impl IcmpProxy {
let len = buf.len() - 20; let len = buf.len() - 20;
let _ = compose_ipv4_packet( let _ = compose_ipv4_packet(
ComposeIpv4PacketArgs { &mut buf[..],
buf: &mut buf[..], src_ip,
src_v4: src_ip, dst_ip,
dst_v4: dst_ip, IpNextHeaderProtocols::Icmp,
next_protocol: IpNextHeaderProtocols::Icmp, len,
payload_len: len, 1200,
payload_mtu: 1200, rand::random(),
ip_id: rand::random(),
},
|buf| { |buf| {
let mut packet = ZCPacket::new_with_payload(buf); let mut packet = ZCPacket::new_with_payload(buf);
packet.fill_peer_manager_hdr(src_peer_id, dst_peer_id, PacketType::Data as u8); packet.fill_peer_manager_hdr(src_peer_id, dst_peer_id, PacketType::Data as u8);
@@ -385,7 +387,7 @@ impl IcmpProxy {
return None; return None;
}; };
let ipv4 = Ipv4Packet::new(packet.payload())?; let ipv4 = Ipv4Packet::new(&packet.payload())?;
if ipv4.get_version() != 4 || ipv4.get_next_level_protocol() != IpNextHeaderProtocols::Icmp if ipv4.get_version() != 4 || ipv4.get_next_level_protocol() != IpNextHeaderProtocols::Icmp
{ {
@@ -394,17 +396,17 @@ impl IcmpProxy {
let mut real_dst_ip = ipv4.get_destination(); let mut real_dst_ip = ipv4.get_destination();
if !(self if !self
.cidr_set .cidr_set
.contains_v4(ipv4.get_destination(), &mut real_dst_ip) .contains_v4(ipv4.get_destination(), &mut real_dst_ip)
|| is_exit_node && !is_exit_node
|| (self.global_ctx.no_tun() && !(self.global_ctx.no_tun()
&& Some(ipv4.get_destination()) && Some(ipv4.get_destination())
== self == self
.global_ctx .global_ctx
.get_ipv4() .get_ipv4()
.as_ref() .as_ref()
.map(cidr::Ipv4Inet::address))) .map(cidr::Ipv4Inet::address))
{ {
return None; return None;
} }
@@ -414,10 +416,12 @@ impl IcmpProxy {
resembled_buf = resembled_buf =
self.ip_resemmbler self.ip_resemmbler
.add_fragment(ipv4.get_source(), ipv4.get_destination(), &ipv4); .add_fragment(ipv4.get_source(), ipv4.get_destination(), &ipv4);
resembled_buf.as_ref()?; if resembled_buf.is_none() {
return None;
};
icmp::echo_request::EchoRequestPacket::new(resembled_buf.as_ref().unwrap())? icmp::echo_request::EchoRequestPacket::new(resembled_buf.as_ref().unwrap())?
} else { } else {
icmp::echo_request::EchoRequestPacket::new(ipv4.payload())? icmp::echo_request::EchoRequestPacket::new(&ipv4.payload())?
}; };
if icmp_packet.get_icmp_type() != IcmpTypes::EchoRequest { if icmp_packet.get_icmp_type() != IcmpTypes::EchoRequest {
@@ -480,9 +484,10 @@ impl Drop for IcmpProxy {
"dropping icmp proxy, {:?}", "dropping icmp proxy, {:?}",
self.socket.lock().unwrap().as_ref() self.socket.lock().unwrap().as_ref()
); );
if let Some(s) = self.socket.lock().unwrap().as_ref() { self.socket.lock().unwrap().as_ref().and_then(|s| {
tracing::info!("shutting down icmp socket"); tracing::info!("shutting down icmp socket");
let _ = s.shutdown(std::net::Shutdown::Both); let _ = s.shutdown(std::net::Shutdown::Both);
} Some(())
});
} }
} }

View File

@@ -190,36 +190,33 @@ impl IpReassembler {
} }
} }
pub struct ComposeIpv4PacketArgs<'a> {
pub buf: &'a mut [u8],
pub src_v4: &'a Ipv4Addr,
pub dst_v4: &'a Ipv4Addr,
pub next_protocol: IpNextHeaderProtocol,
pub payload_len: usize,
pub payload_mtu: usize,
pub ip_id: u16,
}
// ip payload should be in buf[20..] // ip payload should be in buf[20..]
pub fn compose_ipv4_packet<F>(args: ComposeIpv4PacketArgs, cb: F) -> Result<(), Error> pub fn compose_ipv4_packet<F>(
buf: &mut [u8],
src_v4: &Ipv4Addr,
dst_v4: &Ipv4Addr,
next_protocol: IpNextHeaderProtocol,
payload_len: usize,
payload_mtu: usize,
ip_id: u16,
cb: F,
) -> Result<(), Error>
where where
F: Fn(&[u8]) -> Result<(), Error>, F: Fn(&[u8]) -> Result<(), Error>,
{ {
let total_pieces = args.payload_len.div_ceil(args.payload_mtu); let total_pieces = (payload_len + payload_mtu - 1) / payload_mtu;
let mut buf_offset = 0; let mut buf_offset = 0;
let mut fragment_offset = 0; let mut fragment_offset = 0;
let mut cur_piece = 0; let mut cur_piece = 0;
while fragment_offset < args.payload_len { while fragment_offset < payload_len {
let next_fragment_offset = let next_fragment_offset = std::cmp::min(fragment_offset + payload_mtu, payload_len);
std::cmp::min(fragment_offset + args.payload_mtu, args.payload_len);
let fragment_len = next_fragment_offset - fragment_offset; let fragment_len = next_fragment_offset - fragment_offset;
let mut ipv4_packet = let mut ipv4_packet =
MutableIpv4Packet::new(&mut args.buf[buf_offset..buf_offset + fragment_len + 20]) MutableIpv4Packet::new(&mut buf[buf_offset..buf_offset + fragment_len + 20]).unwrap();
.unwrap();
ipv4_packet.set_version(4); ipv4_packet.set_version(4);
ipv4_packet.set_header_length(5); ipv4_packet.set_header_length(5);
ipv4_packet.set_total_length((fragment_len + 20) as u16); ipv4_packet.set_total_length((fragment_len + 20) as u16);
ipv4_packet.set_identification(args.ip_id); ipv4_packet.set_identification(ip_id);
if total_pieces > 1 { if total_pieces > 1 {
if cur_piece != total_pieces - 1 { if cur_piece != total_pieces - 1 {
ipv4_packet.set_flags(Ipv4Flags::MoreFragments); ipv4_packet.set_flags(Ipv4Flags::MoreFragments);
@@ -235,9 +232,9 @@ where
ipv4_packet.set_ecn(0); ipv4_packet.set_ecn(0);
ipv4_packet.set_dscp(0); ipv4_packet.set_dscp(0);
ipv4_packet.set_ttl(32); ipv4_packet.set_ttl(32);
ipv4_packet.set_source(*args.src_v4); ipv4_packet.set_source(src_v4.clone());
ipv4_packet.set_destination(*args.dst_v4); ipv4_packet.set_destination(dst_v4.clone());
ipv4_packet.set_next_level_protocol(args.next_protocol); ipv4_packet.set_next_level_protocol(next_protocol);
ipv4_packet.set_checksum(ipv4::checksum(&ipv4_packet.to_immutable())); ipv4_packet.set_checksum(ipv4::checksum(&ipv4_packet.to_immutable()));
tracing::trace!(?ipv4_packet, "udp nat packet response send"); tracing::trace!(?ipv4_packet, "udp nat packet response send");
@@ -257,7 +254,7 @@ mod tests {
#[test] #[test]
fn resembler() { fn resembler() {
let raw_packets = [ let raw_packets = vec![
// last packet // last packet
vec![ vec![
0x45, 0x00, 0x00, 0x1c, 0x1c, 0x46, 0x20, 0x01, 0x40, 0x06, 0xb1, 0xe6, 0xc0, 0xa8, 0x45, 0x00, 0x00, 0x1c, 0x1c, 0x46, 0x20, 0x01, 0x40, 0x06, 0xb1, 0xe6, 0xc0, 0xa8,
@@ -285,7 +282,7 @@ mod tests {
let resembler = IpReassembler::new(Duration::from_secs(1)); let resembler = IpReassembler::new(Duration::from_secs(1));
for (idx, raw_packet) in raw_packets.iter().enumerate() { for (idx, raw_packet) in raw_packets.iter().enumerate() {
if let Some(packet) = Ipv4Packet::new(raw_packet) { if let Some(packet) = Ipv4Packet::new(&raw_packet) {
let ret = resembler.add_fragment(source, destination, &packet); let ret = resembler.add_fragment(source, destination, &packet);
if idx != 2 { if idx != 2 {
assert!(ret.is_none()); assert!(ret.is_none());

View File

@@ -20,12 +20,7 @@ use pnet::packet::{
Packet as _, Packet as _,
}; };
use prost::Message; use prost::Message;
use tokio::{ use tokio::{io::copy_bidirectional, select, task::JoinSet};
io::{copy_bidirectional, AsyncRead, AsyncWrite},
select,
task::JoinSet,
};
use tokio_util::io::InspectReader;
use super::{ use super::{
tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy}, tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy},
@@ -33,13 +28,11 @@ use super::{
}; };
use crate::{ use crate::{
common::{ common::{
acl_processor::PacketInfo,
error::Result, error::Result,
global_ctx::{ArcGlobalCtx, GlobalCtx}, global_ctx::{ArcGlobalCtx, GlobalCtx},
}, },
peers::{acl_filter::AclFilter, peer_manager::PeerManager, NicPacketFilter, PeerPacketFilter}, peers::{peer_manager::PeerManager, NicPacketFilter, PeerPacketFilter},
proto::{ proto::{
acl::{Action, ChainType, Protocol},
cli::{ cli::{
ListTcpProxyEntryRequest, ListTcpProxyEntryResponse, TcpProxyEntry, TcpProxyEntryState, ListTcpProxyEntryRequest, ListTcpProxyEntryResponse, TcpProxyEntry, TcpProxyEntryState,
TcpProxyEntryTransportType, TcpProxyRpc, TcpProxyEntryTransportType, TcpProxyRpc,
@@ -70,9 +63,7 @@ impl PeerPacketFilter for KcpEndpointFilter {
async fn try_process_packet_from_peer(&self, packet: ZCPacket) -> Option<ZCPacket> { async fn try_process_packet_from_peer(&self, packet: ZCPacket) -> Option<ZCPacket> {
let t = packet.peer_manager_header().unwrap().packet_type; let t = packet.peer_manager_header().unwrap().packet_type;
if t == PacketType::KcpSrc as u8 && !self.is_src { if t == PacketType::KcpSrc as u8 && !self.is_src {
// src packet, but we are dst
} else if t == PacketType::KcpDst as u8 && self.is_src { } else if t == PacketType::KcpDst as u8 && self.is_src {
// dst packet, but we are src
} else { } else {
return Some(packet); return Some(packet);
} }
@@ -105,7 +96,7 @@ async fn handle_kcp_output(
PacketType::KcpDst as u8 PacketType::KcpDst as u8
}; };
let mut packet = ZCPacket::new_with_payload(&packet.inner().freeze()); let mut packet = ZCPacket::new_with_payload(&packet.inner().freeze());
packet.fill_peer_manager_hdr(peer_mgr.my_peer_id(), dst_peer_id, packet_type); packet.fill_peer_manager_hdr(peer_mgr.my_peer_id(), dst_peer_id, packet_type as u8);
if let Err(e) = peer_mgr.send_msg(packet, dst_peer_id).await { if let Err(e) = peer_mgr.send_msg(packet, dst_peer_id).await {
tracing::error!("failed to send kcp packet to peer: {:?}", e); tracing::error!("failed to send kcp packet to peer: {:?}", e);
@@ -173,7 +164,7 @@ impl NatDstConnector for NatDstKcpConnector {
let kcp_endpoint = self.kcp_endpoint.clone(); let kcp_endpoint = self.kcp_endpoint.clone();
let my_peer_id = peer_mgr.my_peer_id(); let my_peer_id = peer_mgr.my_peer_id();
let conn_data_clone = conn_data; let conn_data_clone = conn_data.clone();
connect_tasks.spawn(async move { connect_tasks.spawn(async move {
kcp_endpoint kcp_endpoint
@@ -184,7 +175,9 @@ impl NatDstConnector for NatDstKcpConnector {
Bytes::from(conn_data_clone.encode_to_vec()), Bytes::from(conn_data_clone.encode_to_vec()),
) )
.await .await
.with_context(|| format!("failed to connect to nat dst: {}", nat_dst)) .with_context(|| {
format!("failed to connect to nat dst: {}", nat_dst.to_string())
})
}); });
} }
@@ -203,7 +196,7 @@ impl NatDstConnector for NatDstKcpConnector {
_ipv4: &Ipv4Packet, _ipv4: &Ipv4Packet,
_real_dst_ip: &mut Ipv4Addr, _real_dst_ip: &mut Ipv4Addr,
) -> bool { ) -> bool {
hdr.from_peer_id == hdr.to_peer_id && hdr.is_kcp_src_modified() return hdr.from_peer_id == hdr.to_peer_id && hdr.is_kcp_src_modified();
} }
fn transport_type(&self) -> TcpProxyEntryTransportType { fn transport_type(&self) -> TcpProxyEntryTransportType {
@@ -230,10 +223,15 @@ impl TcpProxyForKcpSrcTrait for TcpProxyForKcpSrc {
} }
async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool { async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool {
self.0 let peer_map: Arc<crate::peers::peer_map::PeerMap> =
.get_peer_manager() self.0.get_peer_manager().get_peer_map();
.check_allow_kcp_to_dst(&IpAddr::V4(*dst_ip)) let Some(dst_peer_id) = peer_map.get_peer_id_by_ipv4(dst_ip).await else {
.await return false;
};
let Some(peer_info) = peer_map.get_route_peer_info(dst_peer_id).await else {
return false;
};
peer_info.feature_flag.map(|x| x.kcp_input).unwrap_or(false)
} }
} }
@@ -343,13 +341,13 @@ impl KcpProxySrc {
pub async fn start(&self) { pub async fn start(&self) {
self.peer_manager self.peer_manager
.add_nic_packet_process_pipeline(Box::new(self.tcp_proxy.clone())) .add_nic_packet_process_pipeline(Arc::new(self.tcp_proxy.clone()))
.await; .await;
self.peer_manager self.peer_manager
.add_packet_process_pipeline(Box::new(self.tcp_proxy.0.clone())) .add_packet_process_pipeline(Arc::new(self.tcp_proxy.0.clone()))
.await; .await;
self.peer_manager self.peer_manager
.add_packet_process_pipeline(Box::new(KcpEndpointFilter { .add_packet_process_pipeline(Arc::new(KcpEndpointFilter {
kcp_endpoint: self.kcp_endpoint.clone(), kcp_endpoint: self.kcp_endpoint.clone(),
is_src: true, is_src: true,
})) }))
@@ -374,50 +372,6 @@ pub struct KcpProxyDst {
tasks: JoinSet<()>, tasks: JoinSet<()>,
} }
#[derive(Clone)]
pub struct ProxyAclHandler {
pub acl_filter: Arc<AclFilter>,
pub packet_info: PacketInfo,
pub chain_type: ChainType,
}
impl ProxyAclHandler {
pub fn handle_packet(&self, buf: &[u8]) -> Result<()> {
let mut packet_info = self.packet_info.clone();
packet_info.packet_size = buf.len();
let ret = self
.acl_filter
.get_processor()
.process_packet(&packet_info, self.chain_type);
self.acl_filter.handle_acl_result(
&ret,
&packet_info,
self.chain_type,
&self.acl_filter.get_processor(),
);
if !matches!(ret.action, Action::Allow) {
return Err(anyhow::anyhow!("acl denied").into());
}
Ok(())
}
pub async fn copy_bidirection_with_acl(
&self,
src: impl AsyncRead + AsyncWrite + Unpin,
mut dst: impl AsyncRead + AsyncWrite + Unpin,
) -> Result<()> {
let (src_reader, src_writer) = tokio::io::split(src);
let src_reader = InspectReader::new(src_reader, |buf| {
let _ = self.handle_packet(buf);
});
let mut src = tokio::io::join(src_reader, src_writer);
copy_bidirectional(&mut src, &mut dst).await?;
Ok(())
}
}
impl KcpProxyDst { impl KcpProxyDst {
pub async fn new(peer_manager: Arc<PeerManager>) -> Self { pub async fn new(peer_manager: Arc<PeerManager>) -> Self {
let mut kcp_endpoint = create_kcp_endpoint(); let mut kcp_endpoint = create_kcp_endpoint();
@@ -442,7 +396,7 @@ impl KcpProxyDst {
#[tracing::instrument(ret)] #[tracing::instrument(ret)]
async fn handle_one_in_stream( async fn handle_one_in_stream(
kcp_stream: KcpStream, mut kcp_stream: KcpStream,
global_ctx: ArcGlobalCtx, global_ctx: ArcGlobalCtx,
proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>, proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>,
cidr_set: Arc<CidrSet>, cidr_set: Arc<CidrSet>,
@@ -457,13 +411,15 @@ impl KcpProxyDst {
parsed_conn_data parsed_conn_data
))? ))?
.into(); .into();
let src_socket: SocketAddr = parsed_conn_data.src.unwrap_or_default().into();
if let IpAddr::V4(dst_v4_ip) = dst_socket.ip() { match dst_socket.ip() {
let mut real_ip = dst_v4_ip; IpAddr::V4(dst_v4_ip) => {
if cidr_set.contains_v4(dst_v4_ip, &mut real_ip) { let mut real_ip = dst_v4_ip;
dst_socket.set_ip(real_ip.into()); if cidr_set.contains_v4(dst_v4_ip, &mut real_ip) {
dst_socket.set_ip(real_ip.into());
}
} }
_ => {}
}; };
let conn_id = kcp_stream.conn_id(); let conn_id = kcp_stream.conn_id();
@@ -481,36 +437,17 @@ impl KcpProxyDst {
proxy_entries.remove(&conn_id); proxy_entries.remove(&conn_id);
} }
let send_to_self = if Some(dst_socket.ip()) == global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address()))
Some(dst_socket.ip()) == global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address())); && global_ctx.no_tun()
{
if send_to_self && global_ctx.no_tun() {
dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap(); dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap();
} }
let acl_handler = ProxyAclHandler {
acl_filter: global_ctx.get_acl_filter().clone(),
packet_info: PacketInfo {
src_ip: src_socket.ip(),
dst_ip: dst_socket.ip(),
src_port: Some(src_socket.port()),
dst_port: Some(dst_socket.port()),
protocol: Protocol::Tcp,
packet_size: conn_data.len(),
},
chain_type: if send_to_self {
ChainType::Inbound
} else {
ChainType::Forward
},
};
acl_handler.handle_packet(&conn_data)?;
tracing::debug!("kcp connect to dst socket: {:?}", dst_socket); tracing::debug!("kcp connect to dst socket: {:?}", dst_socket);
let _g = global_ctx.net_ns.guard(); let _g = global_ctx.net_ns.guard();
let connector = NatDstTcpConnector {}; let connector = NatDstTcpConnector {};
let ret = connector let mut ret = connector
.connect("0.0.0.0:0".parse().unwrap(), dst_socket) .connect("0.0.0.0:0".parse().unwrap(), dst_socket)
.await?; .await?;
@@ -518,10 +455,7 @@ impl KcpProxyDst {
e.state = TcpProxyEntryState::Connected.into(); e.state = TcpProxyEntryState::Connected.into();
} }
acl_handler copy_bidirectional(&mut ret, &mut kcp_stream).await?;
.copy_bidirection_with_acl(kcp_stream, ret)
.await?;
Ok(()) Ok(())
} }
@@ -550,7 +484,7 @@ impl KcpProxyDst {
pub async fn start(&mut self) { pub async fn start(&mut self) {
self.run_accept_task().await; self.run_accept_task().await;
self.peer_manager self.peer_manager
.add_packet_process_pipeline(Box::new(KcpEndpointFilter { .add_packet_process_pipeline(Arc::new(KcpEndpointFilter {
kcp_endpoint: self.kcp_endpoint.clone(), kcp_endpoint: self.kcp_endpoint.clone(),
is_src: false, is_src: false,
})) }))
@@ -578,7 +512,7 @@ impl TcpProxyRpc for KcpProxyDstRpcService {
let mut reply = ListTcpProxyEntryResponse::default(); let mut reply = ListTcpProxyEntryResponse::default();
if let Some(tcp_proxy) = self.0.upgrade() { if let Some(tcp_proxy) = self.0.upgrade() {
for item in tcp_proxy.iter() { for item in tcp_proxy.iter() {
reply.entries.push(*item.value()); reply.entries.push(item.value().clone());
} }
} }
Ok(reply) Ok(reply)

View File

@@ -56,11 +56,11 @@ impl CidrSet {
cidr_set.lock().unwrap().clear(); cidr_set.lock().unwrap().clear();
for cidr in cidrs.iter() { for cidr in cidrs.iter() {
let real_cidr = cidr.cidr; let real_cidr = cidr.cidr;
let mapped = cidr.mapped_cidr.unwrap_or(real_cidr); let mapped = cidr.mapped_cidr.unwrap_or(real_cidr.clone());
cidr_set.lock().unwrap().push(mapped); cidr_set.lock().unwrap().push(mapped.clone());
if mapped != real_cidr { if mapped != real_cidr {
mapped_to_real.insert(mapped, real_cidr); mapped_to_real.insert(mapped.clone(), real_cidr.clone());
} }
} }
} }
@@ -70,11 +70,11 @@ impl CidrSet {
} }
pub fn contains_v4(&self, ipv4: std::net::Ipv4Addr, real_ip: &mut std::net::Ipv4Addr) -> bool { pub fn contains_v4(&self, ipv4: std::net::Ipv4Addr, real_ip: &mut std::net::Ipv4Addr) -> bool {
let ip = ipv4; let ip = ipv4.into();
let s = self.cidr_set.lock().unwrap(); let s = self.cidr_set.lock().unwrap();
for cidr in s.iter() { for cidr in s.iter() {
if cidr.contains(&ip) { if cidr.contains(&ip) {
if let Some(real_cidr) = self.mapped_to_real.get(cidr).map(|v| *v.value()) { if let Some(real_cidr) = self.mapped_to_real.get(&cidr).map(|v| v.value().clone()) {
let origin_network_bits = real_cidr.first().address().to_bits(); let origin_network_bits = real_cidr.first().address().to_bits();
let network_mask = cidr.mask().to_bits(); let network_mask = cidr.mask().to_bits();

View File

@@ -7,21 +7,19 @@ use dashmap::DashMap;
use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::ipv4::Ipv4Packet;
use prost::Message as _; use prost::Message as _;
use quinn::{Endpoint, Incoming}; use quinn::{Endpoint, Incoming};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite}; use tokio::io::{copy_bidirectional, AsyncRead, AsyncReadExt, AsyncWrite};
use tokio::net::TcpStream; use tokio::net::TcpStream;
use tokio::task::JoinSet; use tokio::task::JoinSet;
use tokio::time::timeout; use tokio::time::timeout;
use crate::common::acl_processor::PacketInfo;
use crate::common::error::Result; use crate::common::error::Result;
use crate::common::global_ctx::{ArcGlobalCtx, GlobalCtx}; use crate::common::global_ctx::{ArcGlobalCtx, GlobalCtx};
use crate::common::join_joinset_background; use crate::common::join_joinset_background;
use crate::defer; use crate::defer;
use crate::gateway::kcp_proxy::{ProxyAclHandler, TcpProxyForKcpSrcTrait}; use crate::gateway::kcp_proxy::TcpProxyForKcpSrcTrait;
use crate::gateway::tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy}; use crate::gateway::tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy};
use crate::gateway::CidrSet; use crate::gateway::CidrSet;
use crate::peers::peer_manager::PeerManager; use crate::peers::peer_manager::PeerManager;
use crate::proto::acl::{ChainType, Protocol};
use crate::proto::cli::{ use crate::proto::cli::{
ListTcpProxyEntryRequest, ListTcpProxyEntryResponse, TcpProxyEntry, TcpProxyEntryState, ListTcpProxyEntryRequest, ListTcpProxyEntryResponse, TcpProxyEntry, TcpProxyEntryState,
TcpProxyEntryTransportType, TcpProxyRpc, TcpProxyEntryTransportType, TcpProxyRpc,
@@ -172,7 +170,7 @@ impl NatDstConnector for NatDstQUICConnector {
_ipv4: &Ipv4Packet, _ipv4: &Ipv4Packet,
_real_dst_ip: &mut Ipv4Addr, _real_dst_ip: &mut Ipv4Addr,
) -> bool { ) -> bool {
hdr.from_peer_id == hdr.to_peer_id && !hdr.is_kcp_src_modified() return hdr.from_peer_id == hdr.to_peer_id && !hdr.is_kcp_src_modified();
} }
fn transport_type(&self) -> TcpProxyEntryTransportType { fn transport_type(&self) -> TcpProxyEntryTransportType {
@@ -229,10 +227,10 @@ impl QUICProxySrc {
pub async fn start(&self) { pub async fn start(&self) {
self.peer_manager self.peer_manager
.add_nic_packet_process_pipeline(Box::new(self.tcp_proxy.clone())) .add_nic_packet_process_pipeline(Arc::new(self.tcp_proxy.clone()))
.await; .await;
self.peer_manager self.peer_manager
.add_packet_process_pipeline(Box::new(self.tcp_proxy.0.clone())) .add_packet_process_pipeline(Arc::new(self.tcp_proxy.0.clone()))
.await; .await;
self.tcp_proxy.0.start(false).await.unwrap(); self.tcp_proxy.0.start(false).await.unwrap();
} }
@@ -324,13 +322,12 @@ impl QUICProxyDst {
.await; .await;
match ret { match ret {
Ok(Ok((quic_stream, tcp_stream, acl))) => { Ok(Ok((mut quic_stream, mut tcp_stream))) => {
let remote_addr = quic_stream.connection.as_ref().map(|c| c.remote_address()); let ret = copy_bidirectional(&mut quic_stream, &mut tcp_stream).await;
let ret = acl.copy_bidirection_with_acl(quic_stream, tcp_stream).await;
tracing::info!( tracing::info!(
"QUIC connection handled, result: {:?}, remote addr: {:?}", "QUIC connection handled, result: {:?}, remote addr: {:?}",
ret, ret,
remote_addr, quic_stream.connection.as_ref().map(|c| c.remote_address())
); );
} }
Ok(Err(e)) => { Ok(Err(e)) => {
@@ -348,7 +345,7 @@ impl QUICProxyDst {
cidr_set: Arc<CidrSet>, cidr_set: Arc<CidrSet>,
proxy_entry_key: SocketAddr, proxy_entry_key: SocketAddr,
proxy_entries: Arc<DashMap<SocketAddr, TcpProxyEntry>>, proxy_entries: Arc<DashMap<SocketAddr, TcpProxyEntry>>,
) -> Result<(QUICStream, TcpStream, ProxyAclHandler)> { ) -> Result<(QUICStream, TcpStream)> {
let conn = incoming.await.with_context(|| "accept failed")?; let conn = incoming.await.with_context(|| "accept failed")?;
let addr = conn.remote_address(); let addr = conn.remote_address();
tracing::info!("Accepted QUIC connection from {}", addr); tracing::info!("Accepted QUIC connection from {}", addr);
@@ -379,8 +376,7 @@ impl QUICProxyDst {
dst_socket.set_ip(real_ip); dst_socket.set_ip(real_ip);
} }
let send_to_self = Some(*dst_socket.ip()) == ctx.get_ipv4().map(|ip| ip.address()); if Some(*dst_socket.ip()) == ctx.get_ipv4().map(|ip| ip.address()) && ctx.no_tun() {
if send_to_self && ctx.no_tun() {
dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap(); dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap();
} }
@@ -395,24 +391,6 @@ impl QUICProxyDst {
}, },
); );
let acl_handler = ProxyAclHandler {
acl_filter: ctx.get_acl_filter().clone(),
packet_info: PacketInfo {
src_ip: addr.ip(),
dst_ip: (*dst_socket.ip()).into(),
src_port: Some(addr.port()),
dst_port: Some(dst_socket.port()),
protocol: Protocol::Tcp,
packet_size: len as usize,
},
chain_type: if send_to_self {
ChainType::Inbound
} else {
ChainType::Forward
},
};
acl_handler.handle_packet(&buf)?;
let connector = NatDstTcpConnector {}; let connector = NatDstTcpConnector {};
let dst_stream = { let dst_stream = {
@@ -433,7 +411,7 @@ impl QUICProxyDst {
receiver: r, receiver: r,
}; };
Ok((quic_stream, dst_stream, acl_handler)) Ok((quic_stream, dst_stream))
} }
} }
@@ -457,7 +435,7 @@ impl TcpProxyRpc for QUICProxyDstRpcService {
let mut reply = ListTcpProxyEntryResponse::default(); let mut reply = ListTcpProxyEntryResponse::default();
if let Some(tcp_proxy) = self.0.upgrade() { if let Some(tcp_proxy) = self.0.upgrade() {
for item in tcp_proxy.iter() { for item in tcp_proxy.iter() {
reply.entries.push(*item.value()); reply.entries.push(item.value().clone());
} }
} }
Ok(reply) Ok(reply)

Some files were not shown because too many files have changed in this diff Show More