Compare commits

..

3 Commits

Author SHA1 Message Date
sijie.sun
ba3f36d22b remove lock on pipelines 2025-07-25 10:46:06 +08:00
sijie.sun
78d8848ede fix cond of rpc encrypt 2025-07-25 09:13:42 +08:00
sijie.sun
601a0bf719 remove lock of routes 2025-07-25 09:11:05 +08:00
178 changed files with 1892 additions and 6057 deletions

View File

@@ -19,10 +19,6 @@ SYSROOT = "/usr/local/ohos-sdk/linux/native/sysroot"
linker = "aarch64-unknown-linux-musl-gcc"
rustflags = ["-C", "target-feature=+crt-static"]
[target.riscv64gc-unknown-linux-musl]
linker = "riscv64-unknown-linux-musl-gcc"
rustflags = ["-C", "target-feature=+crt-static"]
[target.'cfg(all(windows, target_env = "msvc"))']
rustflags = ["-C", "target-feature=+crt-static"]

View File

@@ -10,24 +10,8 @@ RUN ARTIFACT_ARCH=""; \
ARTIFACT_ARCH="x86_64"; \
elif [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
ARTIFACT_ARCH="aarch64"; \
elif [ "$TARGETPLATFORM" = "linux/riscv64" ]; then \
ARTIFACT_ARCH="riscv64"; \
elif [ "$TARGETPLATFORM" = "linux/mips" ]; then \
ARTIFACT_ARCH="mips"; \
elif [ "$TARGETPLATFORM" = "linux/mipsel" ]; then \
ARTIFACT_ARCH="mipsel"; \
elif [ "$TARGETPLATFORM" = "linux/arm/v7" ]; then \
ARTIFACT_ARCH="armv7hf"; \
elif [ "$TARGETPLATFORM" = "linux/arm/v6" ]; then \
ARTIFACT_ARCH="armhf"; \
elif [ "$TARGETPLATFORM" = "linux/arm/v5" ]; then \
ARTIFACT_ARCH="arm"; \
elif [ "$TARGETPLATFORM" = "linux/arm" ]; then \
ARTIFACT_ARCH="armv7"; \
elif [ "$TARGETPLATFORM" = "linux/loong64" ]; then \
ARTIFACT_ARCH="loongarch64"; \
else \
echo "Unsupported architecture: $TARGETPLATFORM"; \
echo "Unsupported architecture: $TARGETARCH"; \
exit 1; \
fi; \
cp /tmp/artifacts/easytier-linux-${ARTIFACT_ARCH}/* /tmp/output;

View File

@@ -83,9 +83,6 @@ jobs:
- TARGET: x86_64-unknown-linux-musl
OS: ubuntu-22.04
ARTIFACT_NAME: linux-x86_64
- TARGET: riscv64gc-unknown-linux-musl
OS: ubuntu-22.04
ARTIFACT_NAME: linux-riscv64
- TARGET: mips-unknown-linux-musl
OS: ubuntu-22.04
ARTIFACT_NAME: linux-mips
@@ -192,8 +189,6 @@ jobs:
if [[ $OS =~ ^windows.*$ ]]; then
SUFFIX=.exe
CORE_FEATURES="--features=mimalloc"
elif [[ $TARGET =~ ^riscv64.*$ ]]; then
CORE_FEATURES="--features=mimalloc"
else
CORE_FEATURES="--features=jemalloc"
fi
@@ -229,8 +224,8 @@ jobs:
rustup set auto-self-update disable
rustup install 1.89
rustup default 1.89
rustup install 1.87
rustup default 1.87
export CC=clang
export CXX=clang++
@@ -260,7 +255,7 @@ jobs:
TAG=$GITHUB_SHA
fi
if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^.*freebsd$ && ! $TARGET =~ ^loongarch.*$ && ! $TARGET =~ ^riscv64.*$ ]]; then
if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^.*freebsd$ && ! $TARGET =~ ^loongarch.*$ ]]; then
UPX_VERSION=4.2.4
curl -L https://github.com/upx/upx/releases/download/v${UPX_VERSION}/upx-${UPX_VERSION}-amd64_linux.tar.xz -s | tar xJvf -
cp upx-${UPX_VERSION}-amd64_linux/upx .

View File

@@ -11,18 +11,13 @@ on:
image_tag:
description: 'Tag for this image build'
type: string
default: 'v2.4.2'
default: 'v2.4.0'
required: true
mark_latest:
description: 'Mark this image as latest'
type: boolean
default: false
required: true
mark_unstable:
description: 'Mark this image as unstable'
type: boolean
default: false
required: true
jobs:
docker:
@@ -32,13 +27,6 @@ jobs:
-
name: Checkout
uses: actions/checkout@v4
-
name: Validate inputs
run: |
if [[ "${{ inputs.mark_latest }}" == "true" && "${{ inputs.mark_unstable }}" == "true" ]]; then
echo "Error: mark_latest and mark_unstable cannot both be true"
exit 1
fi
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
@@ -68,36 +56,14 @@ jobs:
- name: List files
run: |
ls -l -R .
- name: Prepare Docker tags
id: tags
run: |
# Base tags with version
DOCKERHUB_TAGS="easytier/easytier:${{ inputs.image_tag }}"
GHCR_TAGS="ghcr.io/easytier/easytier:${{ inputs.image_tag }}"
# Add latest tags if requested
if [[ "${{ inputs.mark_latest }}" == "true" ]]; then
DOCKERHUB_TAGS="${DOCKERHUB_TAGS},easytier/easytier:latest"
GHCR_TAGS="${GHCR_TAGS},ghcr.io/easytier/easytier:latest"
fi
# Add unstable tags if requested
if [[ "${{ inputs.mark_unstable }}" == "true" ]]; then
DOCKERHUB_TAGS="${DOCKERHUB_TAGS},easytier/easytier:unstable"
GHCR_TAGS="${GHCR_TAGS},ghcr.io/easytier/easytier:unstable"
fi
# Combine all tags
ALL_TAGS="${DOCKERHUB_TAGS},${GHCR_TAGS}"
echo "tags=${ALL_TAGS}" >> $GITHUB_OUTPUT
echo "Generated tags: ${ALL_TAGS}"
-
name: Build and push
uses: docker/build-push-action@v6
with:
context: ./docker_context
platforms: linux/amd64,linux/arm64,linux/riscv64,linux/mips,linux/mipsel,linux/arm/v7,linux/arm/v6,linux/arm/v5,linux/arm,linux/loong64
platforms: linux/amd64,linux/arm64
push: true
file: .github/workflows/Dockerfile
tags: ${{ steps.tags.outputs.tags }}
tags: |
easytier/easytier:${{ inputs.image_tag }}${{ inputs.mark_latest && ',easytier/easytier:latest' || '' }},
ghcr.io/easytier/easytier:${{ inputs.image_tag }}${{ inputs.mark_latest && ',easytier/easytier:latest' || '' }},

View File

@@ -29,7 +29,7 @@ jobs:
concurrent_skipping: 'same_content_newer'
skip_after_successful_duplicate: 'true'
cancel_others: 'true'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", ".github/workflows/gui.yml", ".github/workflows/install_rust.sh", ".github/workflows/install_gui_dep.sh"]'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", ".github/workflows/gui.yml", ".github/workflows/install_rust.sh"]'
build-gui:
strategy:
fail-fast: false
@@ -78,11 +78,20 @@ jobs:
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v3
- name: Install GUI dependencies (x86 only)
if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }}
run: bash ./.github/workflows/install_gui_dep.sh
run: |
sudo apt update
sudo apt install -qq libwebkit2gtk-4.1-dev \
build-essential \
curl \
wget \
file \
libgtk-3-dev \
librsvg2-dev \
libxdo-dev \
libssl-dev \
patchelf
- name: Install GUI cross compile (aarch64 only)
if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }}
@@ -119,6 +128,8 @@ jobs:
echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV"
echo "PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig/" >> "$GITHUB_ENV"
- uses: actions/checkout@v3
- name: Set current ref as env variable
run: |
echo "GIT_DESC=$(git log -1 --format=%cd.%h --date=format:%Y-%m-%d_%H:%M:%S)" >> $GITHUB_ENV

View File

@@ -1,11 +0,0 @@
sudo apt update
sudo apt install -qq libwebkit2gtk-4.1-dev \
build-essential \
curl \
wget \
file \
libgtk-3-dev \
librsvg2-dev \
libxdo-dev \
libssl-dev \
patchelf

View File

@@ -15,8 +15,6 @@ if [[ $OS =~ ^ubuntu.*$ ]]; then
# if target is mips or mipsel, we should use soft-float version of musl
if [[ $TARGET =~ ^mips.*$ || $TARGET =~ ^mipsel.*$ ]]; then
MUSL_TARGET=${TARGET}sf
elif [[ $TARGET =~ ^riscv64gc-.*$ ]]; then
MUSL_TARGET=${TARGET/#riscv64gc-/riscv64-}
fi
if [[ $MUSL_TARGET =~ musl ]]; then
mkdir -p ./musl_gcc
@@ -31,8 +29,8 @@ fi
# see https://github.com/rust-lang/rustup/issues/3709
rustup set auto-self-update disable
rustup install 1.89
rustup default 1.89
rustup install 1.87
rustup default 1.87
# mips/mipsel cannot add target from rustup, need compile by ourselves
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then

View File

@@ -21,7 +21,7 @@ on:
version:
description: 'Version for this release'
type: string
default: 'v2.4.2'
default: 'v2.4.0'
required: true
make_latest:
description: 'Mark this release as latest'

View File

@@ -28,7 +28,7 @@ jobs:
# All of these options are optional, so you can remove them if you are happy with the defaults
concurrent_skipping: 'never'
skip_after_successful_duplicate: 'true'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/test.yml", ".github/workflows/install_gui_dep.sh", ".github/workflows/install_rust.sh"]'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/test.yml"]'
test:
runs-on: ubuntu-22.04
needs: pre_job
@@ -89,24 +89,6 @@ jobs:
./target
key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }}
- name: Install GUI dependencies (Used by clippy)
run: |
bash ./.github/workflows/install_gui_dep.sh
bash ./.github/workflows/install_rust.sh
rustup component add rustfmt
rustup component add clippy
- name: Check formatting
if: ${{ !cancelled() }}
run: cargo fmt --all -- --check
- name: Check Clippy
if: ${{ !cancelled() }}
# NOTE: tauri need `dist` dir in build.rs
run: |
mkdir -p easytier-gui/dist
cargo clippy --all-targets --all-features --all -- -D warnings
- name: Run tests
run: |
sudo prlimit --pid $$ --nofile=1048576:1048576

1
.gitignore vendored
View File

@@ -12,7 +12,6 @@ target-*/
.vscode
/.idea
/.direnv/
# perf & flamegraph
perf.data

View File

@@ -26,7 +26,7 @@ Thank you for your interest in contributing to EasyTier! This document provides
#### Required Tools
- Node.js v21 or higher
- pnpm v9 or higher
- Rust toolchain (version 1.89)
- Rust toolchain (version 1.87)
- LLVM and Clang
- Protoc (Protocol Buffers compiler)
@@ -37,6 +37,7 @@ Thank you for your interest in contributing to EasyTier! This document provides
# Core build dependencies
sudo apt-get update && sudo apt-get install -y \
musl-tools \
libappindicator3-dev \
llvm \
clang \
protobuf-compiler
@@ -52,7 +53,6 @@ sudo apt install -y \
librsvg2-dev \
libxdo-dev \
libssl-dev \
libappindicator3-dev \
patchelf
# Testing dependencies
@@ -79,8 +79,8 @@ sudo apt install -y bridge-utils
2. Install dependencies:
```bash
# Install Rust toolchain
rustup install 1.89
rustup default 1.89
rustup install 1.87
rustup default 1.87
# Install project dependencies
pnpm -r install

View File

@@ -34,7 +34,7 @@
#### 必需工具
- Node.js v21 或更高版本
- pnpm v9 或更高版本
- Rust 工具链(版本 1.89
- Rust 工具链(版本 1.87
- LLVM 和 Clang
- ProtocProtocol Buffers 编译器)
@@ -45,6 +45,7 @@
# 核心构建依赖
sudo apt-get update && sudo apt-get install -y \
musl-tools \
libappindicator3-dev \
llvm \
clang \
protobuf-compiler
@@ -60,7 +61,6 @@ sudo apt install -y \
librsvg2-dev \
libxdo-dev \
libssl-dev \
libappindicator3-dev \
patchelf
# 测试依赖
@@ -87,8 +87,8 @@ sudo apt install -y bridge-utils
2. 安装依赖:
```bash
# 安装 Rust 工具链
rustup install 1.89
rustup default 1.89
rustup install 1.87
rustup default 1.87
# 安装项目依赖
pnpm -r install

42
Cargo.lock generated
View File

@@ -1876,7 +1876,7 @@ dependencies = [
"libc",
"option-ext",
"redox_users 0.5.0",
"windows-sys 0.60.2",
"windows-sys 0.59.0",
]
[[package]]
@@ -1979,7 +1979,7 @@ checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
[[package]]
name = "easytier"
version = "2.4.2"
version = "2.4.0"
dependencies = [
"aes-gcm",
"anyhow",
@@ -2033,7 +2033,6 @@ dependencies = [
"network-interface",
"nix 0.29.0",
"once_cell",
"openssl",
"parking_lot",
"percent-encoding",
"petgraph 0.8.1",
@@ -2113,17 +2112,16 @@ dependencies = [
[[package]]
name = "easytier-gui"
version = "2.4.2"
version = "2.4.0"
dependencies = [
"anyhow",
"chrono",
"dashmap",
"dunce",
"easytier",
"elevated-command",
"gethostname 1.0.2",
"libc",
"once_cell",
"security-framework-sys",
"serde",
"serde_json",
"tauri",
@@ -2139,8 +2137,6 @@ dependencies = [
"thunk-rs",
"tokio",
"uuid",
"winapi",
"windows 0.52.0",
]
[[package]]
@@ -2163,7 +2159,7 @@ dependencies = [
[[package]]
name = "easytier-web"
version = "2.4.2"
version = "2.4.0"
dependencies = [
"anyhow",
"async-trait",
@@ -2211,6 +2207,20 @@ dependencies = [
"serde",
]
[[package]]
name = "elevated-command"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54c410eccdcc5b759704fdb6a792afe6b01ab8a062e2c003ff2567e2697a94aa"
dependencies = [
"anyhow",
"base64 0.21.7",
"libc",
"log",
"winapi",
"windows 0.52.0",
]
[[package]]
name = "embed-resource"
version = "3.0.5"
@@ -5235,15 +5245,6 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
[[package]]
name = "openssl-src"
version = "300.5.2+3.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d270b79e2926f5150189d475bc7e9d2c69f9c4697b185fa917d5a32b792d21b4"
dependencies = [
"cc",
]
[[package]]
name = "openssl-sys"
version = "0.9.103"
@@ -5252,7 +5253,6 @@ checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6"
dependencies = [
"cc",
"libc",
"openssl-src",
"pkg-config",
"vcpkg",
]
@@ -7557,9 +7557,9 @@ dependencies = [
[[package]]
name = "socket2"
version = "0.5.10"
version = "0.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678"
checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c"
dependencies = [
"libc",
"windows-sys 0.52.0",

View File

@@ -44,7 +44,5 @@
"prettier.enable": false,
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modifications",
"editor.formatOnPaste": false,
"editor.formatOnType": true,
}
}

View File

@@ -105,9 +105,9 @@ After successful execution, you can check the network status using `easytier-cli
```text
| ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version |
| ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- |
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.4.2-70e69a38~ |
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.4.2-70e69a38~ |
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.4.2-70e69a38~ |
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.4.0-70e69a38~ |
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.4.0-70e69a38~ |
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.4.0-70e69a38~ |
```
You can test connectivity between nodes:
@@ -302,18 +302,14 @@ CDN acceleration and security protection for this project are sponsored by Tence
</a>
</p>
Special thanks to [Langlang Cloud](https://langlangy.cn/?i26c5a5) and [RainCloud](https://www.rainyun.com/NjM0NzQ1_) for sponsoring our public servers.
Special thanks to [Langlang Cloud](https://langlang.cloud/) for sponsoring our public servers.
<p align="center">
<a href="https://langlangy.cn/?i26c5a5" target="_blank">
<img src="assets/langlang.png" width="200">
</a>
<a href="https://langlangy.cn/?i26c5a5" target="_blank">
<img src="assets/raincloud.png" width="200">
</a>
</p>
If you find EasyTier helpful, please consider sponsoring us. Software development and maintenance require a lot of time and effort, and your sponsorship will help us better maintain and improve EasyTier.
<p align="center">

View File

@@ -106,9 +106,9 @@ sudo easytier-core -d --network-name abc --network-secret abc -p tcp://public.ea
```text
| ipv4 | hostname | cost | lat_ms | loss_rate | rx_bytes | tx_bytes | tunnel_proto | nat_type | id | version |
| ------------ | -------------- | ----- | ------ | --------- | -------- | -------- | ------------ | -------- | ---------- | --------------- |
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.4.2-70e69a38~ |
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.4.2-70e69a38~ |
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.4.2-70e69a38~ |
| 10.126.126.1 | abc-1 | Local | * | * | * | * | udp | FullCone | 439804259 | 2.4.0-70e69a38~ |
| 10.126.126.2 | abc-2 | p2p | 3.452 | 0 | 17.33 kB | 20.42 kB | udp | FullCone | 390879727 | 2.4.0-70e69a38~ |
| | PublicServer_a | p2p | 27.796 | 0.000 | 50.01 kB | 67.46 kB | tcp | Unknown | 3771642457 | 2.4.0-70e69a38~ |
```
您可以测试节点之间的连通性:
@@ -303,15 +303,12 @@ EasyTier 在 [LGPL-3.0](https://github.com/EasyTier/EasyTier/blob/main/LICENSE)
</a>
</p>
特别感谢 [浪浪云](https://langlangy.cn/?i26c5a5) 和 [雨云](https://www.rainyun.com/NjM0NzQ1_) 赞助我们的公共服务器。
特别感谢 [浪浪云](https://langlang.cloud/) 赞助我们的公共服务器。
<p align="center">
<a href="https://langlangy.cn/?i26c5a5" target="_blank">
<img src="assets/langlang.png" width="200">
</a>
<a href="https://langlangy.cn/?i26c5a5" target="_blank">
<img src="assets/raincloud.png" width="200">
</a>
</p>
如果您觉得 EasyTier 有帮助,请考虑赞助我们。软件开发和维护需要大量的时间和精力,您的赞助将帮助我们更好地维护和改进 EasyTier。

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

View File

@@ -29,10 +29,8 @@ fn set_error_msg(msg: &str) {
msg_buf[..len].copy_from_slice(bytes);
}
/// # Safety
/// Set the tun fd
#[no_mangle]
pub unsafe extern "C" fn set_tun_fd(
pub extern "C" fn set_tun_fd(
inst_name: *const std::ffi::c_char,
fd: std::ffi::c_int,
) -> std::ffi::c_int {
@@ -45,23 +43,18 @@ pub unsafe extern "C" fn set_tun_fd(
if !INSTANCE_NAME_ID_MAP.contains_key(&inst_name) {
return -1;
}
let inst_id = *INSTANCE_NAME_ID_MAP
.get(&inst_name)
.as_ref()
.unwrap()
.value();
match INSTANCE_MANAGER.set_tun_fd(&inst_id, fd) {
Ok(_) => 0,
Err(_) => -1,
match INSTANCE_MANAGER.set_tun_fd(&INSTANCE_NAME_ID_MAP.get(&inst_name).unwrap().value(), fd) {
Ok(_) => {
0
}
Err(_) => {
-1
}
}
}
/// # Safety
/// Get the last error message
#[no_mangle]
pub unsafe extern "C" fn get_error_msg(out: *mut *const std::ffi::c_char) {
pub extern "C" fn get_error_msg(out: *mut *const std::ffi::c_char) {
let msg_buf = ERROR_MSG.lock().unwrap();
if msg_buf.is_empty() {
unsafe {
@@ -85,10 +78,8 @@ pub extern "C" fn free_string(s: *const std::ffi::c_char) {
}
}
/// # Safety
/// Parse the config
#[no_mangle]
pub unsafe extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
pub extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
let cfg_str = unsafe {
assert!(!cfg_str.is_null());
std::ffi::CStr::from_ptr(cfg_str)
@@ -104,10 +95,8 @@ pub unsafe extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::
0
}
/// # Safety
/// Run the network instance
#[no_mangle]
pub unsafe extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
pub extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
let cfg_str = unsafe {
assert!(!cfg_str.is_null());
std::ffi::CStr::from_ptr(cfg_str)
@@ -142,10 +131,8 @@ pub unsafe extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char)
0
}
/// # Safety
/// Retain the network instance
#[no_mangle]
pub unsafe extern "C" fn retain_network_instance(
pub extern "C" fn retain_network_instance(
inst_names: *const *const std::ffi::c_char,
length: usize,
) -> std::ffi::c_int {
@@ -181,15 +168,13 @@ pub unsafe extern "C" fn retain_network_instance(
return -1;
}
INSTANCE_NAME_ID_MAP.retain(|k, _| inst_names.contains(k));
let _ = INSTANCE_NAME_ID_MAP.retain(|k, _| inst_names.contains(k));
0
}
/// # Safety
/// Collect the network infos
#[no_mangle]
pub unsafe extern "C" fn collect_network_infos(
pub extern "C" fn collect_network_infos(
infos: *mut KeyValuePair,
max_length: usize,
) -> std::ffi::c_int {
@@ -248,10 +233,8 @@ mod tests {
network = "test_network"
"#;
let cstr = std::ffi::CString::new(cfg_str).unwrap();
unsafe {
assert_eq!(parse_config(cstr.as_ptr()), 0);
}
}
#[test]
fn test_run_network_instance() {
@@ -260,8 +243,6 @@ mod tests {
network = "test_network"
"#;
let cstr = std::ffi::CString::new(cfg_str).unwrap();
unsafe {
assert_eq!(run_network_instance(cstr.as_ptr()), 0);
}
}
}

View File

@@ -1,6 +1,6 @@
id=easytier_magisk
name=EasyTier_Magisk
version=v2.4.2
version=v2.4.0
versionCode=1
author=EasyTier
description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier)

View File

@@ -1010,7 +1010,7 @@ dependencies = [
[[package]]
name = "easytier"
version = "2.4.2"
version = "2.4.0"
source = "git+https://github.com/EasyTier/EasyTier.git#a4bb555fac1046d0099c44676fa9d0d8cca55c99"
dependencies = [
"anyhow",

View File

@@ -1,7 +1,7 @@
{
"name": "easytier-gui",
"type": "module",
"version": "2.4.2",
"version": "2.4.0",
"private": true,
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4",
"scripts": {

View File

@@ -1,6 +1,6 @@
[package]
name = "easytier-gui"
version = "2.4.2"
version = "2.4.0"
description = "EasyTier GUI"
authors = ["you"]
edition = "2021"
@@ -40,6 +40,7 @@ chrono = { version = "0.4.37", features = ["serde"] }
once_cell = "1.18.0"
dashmap = "6.0"
elevated-command = "1.1.2"
gethostname = "1.0.2"
dunce = "1.0.4"
@@ -53,15 +54,6 @@ tauri-plugin-os = "2.3.0"
tauri-plugin-autostart = "2.5.0"
uuid = "1.17.0"
[target.'cfg(target_os = "windows")'.dependencies]
windows = { version = "0.52", features = ["Win32_Foundation", "Win32_UI_Shell", "Win32_UI_WindowsAndMessaging"] }
winapi = { version = "0.3.9", features = ["securitybaseapi", "processthreadsapi"] }
[target.'cfg(target_family = "unix")'.dependencies]
libc = "0.2"
[target.'cfg(target_os = "macos")'.dependencies]
security-framework-sys = "2.9.0"
[features]
# This feature is used for production builds or when a dev server is not specified, DO NOT REMOVE!!

View File

@@ -1,10 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android">
<uses-permission android:name="android.permission.INTERNET" />
<uses-permission android:name="android.permission.FOREGROUND_SERVICE" />
<uses-permission android:name="android.permission.POST_NOTIFICATIONS" />
<uses-permission android:name="android.permission.FOREGROUND_SERVICE_DATA_SYNC" />
<application
android:icon="@mipmap/ic_launcher"
android:label="@string/app_name"
@@ -22,12 +18,6 @@
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
<service
android:name=".MainForegroundService"
android:foregroundServiceType="dataSync"
android:enabled="true"
android:exported="false">
</service>
<provider
android:name="androidx.core.content.FileProvider"

View File

@@ -1,20 +1,3 @@
package com.kkrainbow.easytier
import android.content.Intent
import android.os.Build
import android.os.Bundle
class MainActivity : TauriActivity() {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
initService()
}
private fun initService() {
val serviceIntent = Intent(this, MainForegroundService::class.java)
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
startForegroundService(serviceIntent)
} else {
startService(serviceIntent)
}
}
}
class MainActivity : TauriActivity()

View File

@@ -1,64 +0,0 @@
package com.kkrainbow.easytier
import android.app.Notification
import android.app.NotificationChannel
import android.app.NotificationManager
import android.app.Service
import android.content.Intent
import android.content.pm.ServiceInfo
import android.os.Build
import android.os.IBinder
import androidx.core.app.NotificationCompat
import android.util.Log
class MainForegroundService : Service() {
companion object {
const val CHANNEL_ID = "easytier_channel"
const val NOTIFICATION_ID = 1355
// You can add more constants if needed
}
override fun onCreate() {
super.onCreate()
}
override fun onStartCommand(intent: Intent?, flags: Int, startId: Int): Int {
createNotificationChannel()
val notification = NotificationCompat.Builder(this, CHANNEL_ID)
.setContentTitle("easytier Running")
.setContentText("easytier is available on localhost")
.setSmallIcon(android.R.drawable.ic_menu_manage)
.build()
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) {
startForeground(
NOTIFICATION_ID,
notification,
ServiceInfo.FOREGROUND_SERVICE_TYPE_DATA_SYNC
)
} else {
startForeground(NOTIFICATION_ID, notification)
}
return START_STICKY
}
override fun onDestroy() {
super.onDestroy()
}
override fun onBind(intent: Intent?): IBinder? = null
private fun createNotificationChannel() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
try {
val channel = NotificationChannel(
CHANNEL_ID,
"easytier notice",
NotificationManager.IMPORTANCE_DEFAULT
)
val manager = getSystemService(NotificationManager::class.java)
manager?.createNotificationChannel(channel)
} catch (e: Exception) {
Log.e("MainForegroundService", "Failed to create notification channel", e)
}
}
}
}

View File

@@ -1,67 +0,0 @@
/*---------------------------------------------------------------------------------------------
* Copyright (c) Luis Liu. All rights reserved.
* Licensed under the MIT License. See License in the project root for license information.
*--------------------------------------------------------------------------------------------*/
use super::Command;
use anyhow::{anyhow, Result};
use std::env;
use std::ffi::OsStr;
use std::path::PathBuf;
use std::process::{Command as StdCommand, Output};
use std::str::FromStr;
/// The implementation of state check and elevated executing varies on each platform
impl Command {
/// Check the state the current program running
///
/// Return `true` if the program is running as root, otherwise false
pub fn is_elevated() -> bool {
let uid = unsafe { libc::getuid() };
uid == 0
}
/// Prompting the user with a graphical OS dialog for the root password,
/// excuting the command with escalated privileges, and return the output
pub fn output(&self) -> Result<Output> {
let pkexec = PathBuf::from_str("/bin/pkexec")?;
let mut command = StdCommand::new(pkexec);
let display = env::var("DISPLAY");
let xauthority = env::var("XAUTHORITY");
let home = env::var("HOME");
command.arg("--disable-internal-agent");
if display.is_ok() || xauthority.is_ok() || home.is_ok() {
command.arg("env");
if let Ok(display) = display {
command.arg(format!("DISPLAY={}", display));
}
if let Ok(xauthority) = xauthority {
command.arg(format!("XAUTHORITY={}", xauthority));
}
if let Ok(home) = home {
command.arg(format!("HOME={}", home));
}
} else if self.cmd.get_envs().any(|(_, v)| v.is_some()) {
command.arg("env");
}
for (k, v) in self.cmd.get_envs() {
if let Some(value) = v {
command.arg(format!(
"{}={}",
k.to_str().ok_or(anyhow!("invalid key"))?,
value.to_str().ok_or(anyhow!("invalid value"))?
));
}
}
command.arg(self.cmd.get_program());
let args: Vec<&OsStr> = self.cmd.get_args().collect();
if !args.is_empty() {
command.args(args);
}
let output = command.output()?;
Ok(output)
}
}

View File

@@ -1,182 +0,0 @@
/*---------------------------------------------------------------------------------------------
* Copyright (c) Luis Liu. All rights reserved.
* Licensed under the MIT License. See License in the project root for license information.
*--------------------------------------------------------------------------------------------*/
// Thanks to https://github.com/jorangreef/sudo-prompt/blob/master/index.js
// MIT License
//
// Copyright (c) 2015 Joran Dirk Greef
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// ...
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
use super::Command;
use anyhow::Result;
use std::env;
use std::path::PathBuf;
use std::process::{ExitStatus, Output};
use std::ffi::{CString, OsString};
use std::io;
use std::mem;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use std::ptr;
use libc::{fcntl, fileno, waitpid, EINTR, F_GETOWN};
use security_framework_sys::authorization::{
errAuthorizationSuccess, kAuthorizationFlagDefaults, kAuthorizationFlagDestroyRights,
AuthorizationCreate, AuthorizationExecuteWithPrivileges, AuthorizationFree, AuthorizationRef,
};
const ENV_PATH: &str = "PATH";
fn get_exe_path<P: AsRef<Path>>(exe_name: P) -> Option<PathBuf> {
let exe_name = exe_name.as_ref();
if exe_name.has_root() {
return Some(exe_name.into());
}
if let Ok(abs_path) = exe_name.canonicalize() {
if abs_path.is_file() {
return Some(abs_path);
}
}
env::var_os(ENV_PATH).and_then(|paths| {
env::split_paths(&paths)
.filter_map(|dir| {
let full_path = dir.join(exe_name);
if full_path.is_file() {
Some(full_path)
} else {
None
}
})
.next()
})
}
macro_rules! make_cstring {
($s:expr) => {
match CString::new($s.as_bytes()) {
Ok(s) => s,
Err(_) => {
return Err(io::Error::new(io::ErrorKind::Other, "null byte in string"));
}
}
};
}
unsafe fn gui_runas(prog: *const i8, argv: *const *const i8) -> i32 {
let mut authref: AuthorizationRef = ptr::null_mut();
let mut pipe: *mut libc::FILE = ptr::null_mut();
if AuthorizationCreate(
ptr::null(),
ptr::null(),
kAuthorizationFlagDefaults,
&mut authref,
) != errAuthorizationSuccess
{
return -1;
}
if AuthorizationExecuteWithPrivileges(
authref,
prog,
kAuthorizationFlagDefaults,
argv as *const *mut _,
&mut pipe,
) != errAuthorizationSuccess
{
AuthorizationFree(authref, kAuthorizationFlagDestroyRights);
return -1;
}
let pid = fcntl(fileno(pipe), F_GETOWN, 0);
let mut status = 0;
loop {
let r = waitpid(pid, &mut status, 0);
if r == -1 && io::Error::last_os_error().raw_os_error() == Some(EINTR) {
continue;
} else {
break;
}
}
AuthorizationFree(authref, kAuthorizationFlagDestroyRights);
status
}
fn runas_root_gui(cmd: &Command) -> io::Result<ExitStatus> {
let exe: OsString = match get_exe_path(&cmd.cmd.get_program()) {
Some(exe) => exe.into(),
None => unsafe {
return Ok(mem::transmute(!0));
},
};
let prog = make_cstring!(exe);
let mut args = vec![];
for arg in cmd.cmd.get_args() {
args.push(make_cstring!(arg))
}
let mut argv: Vec<_> = args.iter().map(|x| x.as_ptr()).collect();
argv.push(ptr::null());
unsafe { Ok(mem::transmute(gui_runas(prog.as_ptr(), argv.as_ptr()))) }
}
/// The implementation of state check and elevated executing varies on each platform
impl Command {
/// Check the state the current program running
///
/// Return `true` if the program is running as root, otherwise false
///
/// # Examples
///
/// ```no_run
/// use elevated_command::Command;
///
/// fn main() {
/// let is_elevated = Command::is_elevated();
///
/// }
/// ```
pub fn is_elevated() -> bool {
let uid = unsafe { libc::getuid() };
let euid = unsafe { libc::geteuid() };
match (uid, euid) {
(0, 0) => true,
(_, 0) => true,
(_, _) => false,
}
}
/// Prompting the user with a graphical OS dialog for the root password,
/// excuting the command with escalated privileges, and return the output
///
/// # Examples
///
/// ```no_run
/// use elevated_command::Command;
/// use std::process::Command as StdCommand;
///
/// fn main() {
/// let mut cmd = StdCommand::new("path to the application");
/// let elevated_cmd = Command::new(cmd);
/// let output = elevated_cmd.output().unwrap();
/// }
/// ```
pub fn output(&self) -> Result<Output> {
let status = runas_root_gui(self)?;
Ok(Output {
status,
stdout: Vec::new(),
stderr: Vec::new(),
})
}
}

View File

@@ -1,101 +0,0 @@
#![allow(dead_code)]
/*---------------------------------------------------------------------------------------------
* Copyright (c) Luis Liu. All rights reserved.
* Licensed under the MIT License. See License in the project root for license information.
*--------------------------------------------------------------------------------------------*/
use std::convert::From;
use std::process::Command as StdCommand;
/// Wrap of std::process::command and escalate privileges while executing
pub struct Command {
cmd: StdCommand,
#[allow(dead_code)]
icon: Option<Vec<u8>>,
#[allow(dead_code)]
name: Option<String>,
}
/// Command initialization shares the same logic across all the platforms
impl Command {
/// Constructs a new `Command` from a std::process::Command
/// instance, it would read the following configuration from
/// the instance while executing:
///
/// * The instance's path to the program
/// * The instance's arguments
/// * The instance's environment variables
///
/// So far, the new `Command` would only take the environment variables explicitly
/// set by std::process::Command::env and std::process::Command::env,
/// without the ones inherited from the parent process
///
/// And the environment variables would only be taken on Linux and MacOS,
/// they would be ignored on Windows
///
/// Current working directory would be the following while executing the command:
/// - %SystemRoot%\System32 on Windows
/// - /root on Linux
/// - $TMPDIR/sudo_prompt_applet/applet.app/Contents/MacOS on MacOS
///
/// To pass environment variables on Windows,
/// to inherit environment variables from the parent process and
/// to change the working directory will be supported in later versions
pub fn new(cmd: StdCommand) -> Self {
Self {
cmd,
icon: None,
name: None,
}
}
/// Consumes the `Take`, returning the wrapped std::process::Command
///
/// # Examples
pub fn into_inner(self) -> StdCommand {
self.cmd
}
/// Gets a mutable reference to the underlying std::process::Command
pub fn get_ref(&self) -> &StdCommand {
&self.cmd
}
/// Gets a reference to the underlying std::process::Command
pub fn get_mut(&mut self) -> &mut StdCommand {
&mut self.cmd
}
/// Set the `icon` for the pop-up graphical OS dialog
pub fn icon(&mut self, icon: Vec<u8>) -> &mut Self {
self.icon = Some(icon);
self
}
/// Set the name for the pop-up graphical OS dialog
///
/// This method is only applicable on `MacOS`
pub fn name(&mut self, name: String) -> &mut Self {
self.name = Some(name);
self
}
}
impl From<StdCommand> for Command {
/// Converts from a std::process::Command
///
/// It is similiar with the construct method
fn from(cmd: StdCommand) -> Self {
Self {
cmd,
icon: None,
name: None,
}
}
}
#[cfg(target_os = "linux")]
mod linux;
#[cfg(target_os = "macos")]
mod macos;
#[cfg(target_os = "windows")]
mod windows;

View File

@@ -1,114 +0,0 @@
/*---------------------------------------------------------------------------------------------
* Copyright (c) Luis Liu. All rights reserved.
* Licensed under the MIT License. See License in the project root for license information.
*--------------------------------------------------------------------------------------------*/
use super::Command;
use anyhow::Result;
use std::mem;
use std::os::windows::process::ExitStatusExt;
use std::process::{ExitStatus, Output};
use winapi::shared::minwindef::{DWORD, LPVOID};
use winapi::um::processthreadsapi::{GetCurrentProcess, OpenProcessToken};
use winapi::um::securitybaseapi::GetTokenInformation;
use winapi::um::winnt::{TokenElevation, HANDLE, TOKEN_ELEVATION, TOKEN_QUERY};
use windows::core::{w, HSTRING, PCWSTR};
use windows::Win32::Foundation::HWND;
use windows::Win32::UI::Shell::ShellExecuteW;
use windows::Win32::UI::WindowsAndMessaging::SW_HIDE;
/// The implementation of state check and elevated executing varies on each platform
impl Command {
/// Check the state the current program running
///
/// Return `true` if the program is running as root, otherwise false
///
/// # Examples
///
/// ```no_run
/// use elevated_command::Command;
///
/// fn main() {
/// let is_elevated = Command::is_elevated();
///
/// }
/// ```
pub fn is_elevated() -> bool {
// Thanks to https://stackoverflow.com/a/8196291
unsafe {
let mut current_token_ptr: HANDLE = mem::zeroed();
let mut token_elevation: TOKEN_ELEVATION = mem::zeroed();
let token_elevation_type_ptr: *mut TOKEN_ELEVATION = &mut token_elevation;
let mut size: DWORD = 0;
let result = OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &mut current_token_ptr);
if result != 0 {
let result = GetTokenInformation(
current_token_ptr,
TokenElevation,
token_elevation_type_ptr as LPVOID,
mem::size_of::<winapi::um::winnt::TOKEN_ELEVATION_TYPE>() as u32,
&mut size,
);
if result != 0 {
return token_elevation.TokenIsElevated != 0;
}
}
}
false
}
/// Prompting the user with a graphical OS dialog for the root password,
/// excuting the command with escalated privileges, and return the output
///
/// On Windows, according to https://learn.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-shellexecutew#return-value,
/// Output.status.code() shoudl be greater than 32 if the function succeeds,
/// otherwise the value indicates the cause of the failure
///
/// On Windows, Output.stdout and Output.stderr will always be empty as of now
///
/// # Examples
///
/// ```no_run
/// use elevated_command::Command;
/// use std::process::Command as StdCommand;
///
/// fn main() {
/// let mut cmd = StdCommand::new("path to the application");
/// let elevated_cmd = Command::new(cmd);
/// let output = elevated_cmd.output().unwrap();
/// }
/// ```
pub fn output(&self) -> Result<Output> {
let args = self
.cmd
.get_args()
.map(|c| c.to_str().unwrap().to_string())
.collect::<Vec<String>>();
let parameters = if args.is_empty() {
HSTRING::new()
} else {
let arg_str = args.join(" ");
HSTRING::from(arg_str)
};
// according to https://stackoverflow.com/a/38034535
// the cwd always point to %SystemRoot%\System32 and cannot be changed by settting lpdirectory param
let r = unsafe {
ShellExecuteW(
HWND(0),
w!("runas"),
&HSTRING::from(self.cmd.get_program()),
&HSTRING::from(parameters),
PCWSTR::null(),
SW_HIDE,
)
};
Ok(Output {
status: ExitStatus::from_raw(r.0 as u32),
stdout: Vec::<u8>::new(),
stderr: Vec::<u8>::new(),
})
}
}

View File

@@ -1,8 +1,6 @@
// Prevents additional console window on Windows in release, DO NOT REMOVE!!
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
mod elevate;
use std::collections::BTreeMap;
use easytier::{
@@ -130,7 +128,7 @@ fn toggle_window_visibility<R: tauri::Runtime>(app: &tauri::AppHandle<R>) {
#[cfg(not(target_os = "android"))]
fn check_sudo() -> bool {
let is_elevated = elevate::Command::is_elevated();
let is_elevated = elevated_command::Command::is_elevated();
if !is_elevated {
let exe_path = std::env::var("APPIMAGE")
.ok()
@@ -141,7 +139,7 @@ fn check_sudo() -> bool {
if args.contains(&AUTOSTART_ARG.to_owned()) {
stdcmd.arg(AUTOSTART_ARG);
}
elevate::Command::new(stdcmd)
elevated_command::Command::new(stdcmd)
.output()
.expect("Failed to run elevated command");
}

View File

@@ -17,7 +17,7 @@
"createUpdaterArtifacts": false
},
"productName": "easytier-gui",
"version": "2.4.2",
"version": "2.4.0",
"identifier": "com.kkrainbow.easytier",
"plugins": {},
"app": {

View File

@@ -115,11 +115,6 @@ function getRoutesForVpn(routes: Route[]): string[] {
async function onNetworkInstanceChange() {
console.error('vpn service watch network instance change ids', JSON.stringify(networkStore.networkInstanceIds))
const insts = networkStore.networkInstanceIds
const no_tun = networkStore.isNoTunEnabled(insts[0])
if (no_tun) {
await doStopVpn()
return
}
if (!insts) {
await doStopVpn()
return
@@ -137,6 +132,14 @@ async function onNetworkInstanceChange() {
return
}
// if use no tun mode, stop the vpn service
const no_tun = networkStore.isNoTunEnabled(insts[0])
if (no_tun) {
console.error('no tun mode, stop vpn service')
await doStopVpn()
return
}
let network_length = curNetworkInfo?.my_node_info?.virtual_ipv4.network_length
if (!network_length) {
network_length = 24
@@ -184,26 +187,12 @@ async function watchNetworkInstance() {
console.error('vpn service watch network instance')
}
function isNoTunEnabled(instanceId: string | undefined) {
if (!instanceId) {
return false
}
const no_tun = networkStore.isNoTunEnabled(instanceId)
if (no_tun) {
return true
}
return false
}
export async function initMobileVpnService() {
await registerVpnServiceListener()
await watchNetworkInstance()
}
export async function prepareVpnService(instanceId: string) {
if (isNoTunEnabled(instanceId)) {
return
}
export async function prepareVpnService() {
console.log('prepare vpn')
const prepare_ret = await prepare_vpn()
console.log('prepare vpn', JSON.stringify((prepare_ret)))

View File

@@ -102,7 +102,7 @@ networkStore.$subscribe(async () => {
async function runNetworkCb(cfg: NetworkTypes.NetworkConfig, cb: () => void) {
if (type() === 'android') {
await prepareVpnService(cfg.instance_id)
await prepareVpnService()
networkStore.clearNetworkInstances()
}
else {

View File

@@ -8,7 +8,7 @@ repository = "https://github.com/EasyTier/EasyTier"
authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"]
categories = ["network-programming", "command-line-utilities"]
rust-version = "1.89.0"
rust-version = "1.87.0"
license-file = "LICENSE"
readme = "README.md"

View File

@@ -14,11 +14,18 @@ const NAMESPACE: &str = "easytier::proto::rpc_types";
///
/// See the crate-level documentation for more info.
#[allow(missing_copy_implementations)]
#[derive(Clone, Debug, Default)]
#[derive(Clone, Debug)]
pub struct ServiceGenerator {
_private: (),
}
impl ServiceGenerator {
/// Create a new `ServiceGenerator` instance with the default options set.
pub fn new() -> ServiceGenerator {
ServiceGenerator { _private: () }
}
}
impl prost_build::ServiceGenerator for ServiceGenerator {
fn generate(&mut self, service: prost_build::Service, mut buf: &mut String) {
use std::fmt::Write;
@@ -71,7 +78,7 @@ impl prost_build::ServiceGenerator for ServiceGenerator {
enum_methods,
" {name} = {index},",
name = method.proto_name,
index = idx + 1
index = format!("{}", idx + 1)
)
.unwrap();
@@ -80,7 +87,7 @@ impl prost_build::ServiceGenerator for ServiceGenerator {
" {index} => Ok({service_name}MethodDescriptor::{name}),",
service_name = service.name,
name = method.proto_name,
index = idx + 1,
index = format!("{}", idx + 1),
)
.unwrap();
@@ -95,12 +102,12 @@ impl prost_build::ServiceGenerator for ServiceGenerator {
writeln!(
client_methods,
r#" async fn {name}(&self, ctrl: H::Controller, input: {input_type}) -> {namespace}::error::Result<{output_type}> {{
{client_name}Client::{name}_inner(self.0.clone(), ctrl, input).await
{client_name}::{name}_inner(self.0.clone(), ctrl, input).await
}}"#,
name = method.name,
input_type = method.input_type,
output_type = method.output_type,
client_name = service.name,
client_name = format!("{}Client", service.name),
namespace = NAMESPACE,
)
.unwrap();

View File

@@ -1,6 +1,6 @@
[package]
name = "easytier-web"
version = "2.4.2"
version = "2.4.0"
edition = "2021"
description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server."

View File

@@ -1,10 +1,7 @@
fn main() {
// enable thunk-rs when target os is windows and arch is x86_64 or i686
#[cfg(target_os = "windows")]
if !std::env::var("TARGET")
.unwrap_or_default()
.contains("aarch64")
{
if !std::env::var("TARGET").unwrap_or_default().contains("aarch64"){
thunk::thunk();
}
}

View File

@@ -2,13 +2,7 @@
import InputGroup from 'primevue/inputgroup'
import InputGroupAddon from 'primevue/inputgroupaddon'
import { SelectButton, Checkbox, InputText, InputNumber, AutoComplete, Panel, Divider, ToggleButton, Button, Password } from 'primevue'
import {
addRow,
DEFAULT_NETWORK_CONFIG,
NetworkConfig,
NetworkingMethod,
removeRow
} from '../types/network'
import { DEFAULT_NETWORK_CONFIG, NetworkConfig, NetworkingMethod } from '../types/network'
import { defineProps, defineEmits, ref, } from 'vue'
import { useI18n } from 'vue-i18n'
@@ -169,8 +163,6 @@ const bool_flags: BoolFlag[] = [
{ field: 'enable_private_mode', help: 'enable_private_mode_help' },
]
const portForwardProtocolOptions = ref(["tcp","udp"]);
</script>
<template>
@@ -424,73 +416,6 @@ const portForwardProtocolOptions = ref(["tcp","udp"]);
</div>
</Panel>
<Divider />
<Panel :header="t('port_forwards')" toggleable collapsed>
<div class="flex flex-col gap-y-2">
<div class="flex flex-row gap-x-9 flex-wrap w-full">
<div class="flex flex-col gap-2 grow p-fluid">
<div class="flex">
<label for="port_forwards">{{ t('port_forwards_help') }}</label>
</div>
<div v-for="(row, index) in curNetwork.port_forwards" class="form-row">
<div style="display: flex; gap: 0.5rem; align-items: flex-end;">
<SelectButton v-model="row.proto" :options="portForwardProtocolOptions" :allow-empty="false"/>
<div style="flex-grow: 4;">
<InputGroup>
<InputText
v-model="row.bind_ip"
:placeholder="t('port_forwards_bind_addr')"
/>
<InputGroupAddon>
<span style="font-weight: bold">:</span>
</InputGroupAddon>
<InputNumber v-model="row.bind_port" :format="false"
inputId="horizontal-buttons" :step="1" mode="decimal" :min="1"
:max="65535" fluid
class="max-w-20"/>
</InputGroup>
</div>
<div style="flex-grow: 4;">
<InputGroup>
<InputText
v-model="row.dst_ip"
:placeholder="t('port_forwards_dst_addr')"
/>
<InputGroupAddon>
<span style="font-weight: bold">:</span>
</InputGroupAddon>
<InputNumber v-model="row.dst_port" :format="false"
inputId="horizontal-buttons" :step="1" mode="decimal" :min="1"
:max="65535" fluid
class="max-w-20"/>
</InputGroup>
</div>
<div style="flex-grow: 1;">
<Button
v-if="curNetwork.port_forwards.length > 0"
icon="pi pi-trash"
severity="danger"
text
rounded
@click="removeRow(index,curNetwork.port_forwards)"
/>
</div>
</div>
</div>
<div class="flex justify-content-end mt-4">
<Button
icon="pi pi-plus"
:label="t('port_forwards_add_btn')"
severity="success"
@click="addRow(curNetwork.port_forwards)"
/>
</div>
</div>
</div>
</div>
</Panel>
<div class="flex pt-6 justify-center">
<Button :label="t('run_network')" icon="pi pi-arrow-right" icon-pos="right" :disabled="configInvalid"
@click="$emit('runNetwork', curNetwork)" />

View File

@@ -151,12 +151,6 @@ socks5_help: |
exit_nodes: 出口节点列表
exit_nodes_help: 转发所有流量的出口节点虚拟IPv4地址优先级由列表顺序决定
port_forwards: 端口转发
port_forwards_help: "将本地端口转发到虚拟网络中的远程端口。例如udp://0.0.0.0:12345/10.126.126.1:23456表示将本地UDP端口12345转发到虚拟网络中的10.126.126.1:23456。可以指定多个。"
port_forwards_bind_addr: "绑定地址0.0.0.0"
port_forwards_dst_addr: "目标地址10.126.126.1"
port_forwards_add_btn: "添加"
mtu: MTU
mtu_help: |
TUN设备的MTU默认为非加密时为1380加密时为1360。范围400-1380

View File

@@ -151,12 +151,6 @@ socks5_help: |
exit_nodes: Exit Nodes
exit_nodes_help: Exit nodes to forward all traffic to, a virtual ipv4 address, priority is determined by the order of the list
port_forwards: Port Forward
port_forwards_help: "forward local port to remote port in virtual network. e.g.: udp://0.0.0.0:12345/10.126.126.1:23456, means forward local udp port 12345 to 10.126.126.1:23456 in the virtual network. can specify multiple."
port_forwards_bind_addr: "Bind address, e.g.: 0.0.0.0"
port_forwards_dst_addr: "Destination address, e.g.: 10.126.126.1"
port_forwards_add_btn: "Add"
mtu: MTU
mtu_help: |
MTU of the TUN device, default is 1380 for non-encryption, 1360 for encryption. Range:400-1380

View File

@@ -70,8 +70,6 @@ export interface NetworkConfig {
enable_private_mode?: boolean
rpc_portal_whitelists: string[]
port_forwards: PortForwardConfig[]
}
export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
@@ -134,7 +132,6 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
enable_magic_dns: false,
enable_private_mode: false,
rpc_portal_whitelists: [],
port_forwards: [],
}
}
@@ -258,30 +255,6 @@ export interface PeerConnStats {
latency_us: number
}
export interface PortForwardConfig {
bind_ip: string,
bind_port: number,
dst_ip: string,
dst_port: number,
proto: string
}
// 添加新行
export const addRow = (rows: PortForwardConfig[]) => {
rows.push({
proto: 'tcp',
bind_ip: '',
bind_port: 65535,
dst_ip: '',
dst_port: 65535,
});
};
// 删除行
export const removeRow = (index: number, rows: PortForwardConfig[]) => {
rows.splice(index, 1);
};
export enum EventType {
TunDeviceReady = 'TunDeviceReady', // string
TunDeviceError = 'TunDeviceError', // string

View File

@@ -25,7 +25,7 @@ fn load_geoip_db(geoip_db: Option<String>) -> Option<maxminddb::Reader<Vec<u8>>>
match maxminddb::Reader::open_readfile(&path) {
Ok(reader) => {
tracing::info!("Successfully loaded GeoIP2 database from {}", path);
Some(reader)
return Some(reader);
}
Err(err) => {
tracing::debug!("Failed to load GeoIP2 database from {}: {}", path, err);
@@ -207,8 +207,10 @@ impl ClientManager {
let region = city.subdivisions.map(|r| {
r.iter()
.filter_map(|x| x.names.as_ref())
.filter_map(|x| x.get("zh-CN").or_else(|| x.get("en")))
.map(|x| x.names.as_ref())
.flatten()
.map(|x| x.get("zh-CN").or_else(|| x.get("en")))
.flatten()
.map(|x| x.to_string())
.collect::<Vec<_>>()
.join(",")

View File

@@ -94,7 +94,11 @@ impl SessionRpcService {
return Ok(HeartbeatResponse {});
};
let machine_id: uuid::Uuid = req.machine_id.map(Into::into).ok_or(anyhow::anyhow!(
let machine_id: uuid::Uuid =
req.machine_id
.clone()
.map(Into::into)
.ok_or(anyhow::anyhow!(
"Machine id is not set correctly, expect uuid but got: {:?}",
req.machine_id
))?;
@@ -117,7 +121,7 @@ impl SessionRpcService {
if data.req.replace(req.clone()).is_none() {
assert!(data.storage_token.is_none());
data.storage_token = Some(StorageToken {
token: req.user_token.clone(),
token: req.user_token.clone().into(),
client_url: data.client_url.clone(),
machine_id,
user_id,

View File

@@ -34,7 +34,7 @@ impl TryFrom<WeakRefStorage> for Storage {
type Error = ();
fn try_from(weak: Weak<StorageInner>) -> Result<Self, Self::Error> {
weak.upgrade().map(Storage).ok_or(())
weak.upgrade().map(|inner| Storage(inner)).ok_or(())
}
}
@@ -51,7 +51,9 @@ impl Storage {
machine_id: &uuid::Uuid,
client_url: &url::Url,
) {
map.remove_if(machine_id, |_, v| v.storage_token.client_url == *client_url);
map.remove_if(&machine_id, |_, v| {
v.storage_token.client_url == *client_url
});
}
fn update_mid_to_client_info_map(
@@ -72,7 +74,11 @@ impl Storage {
}
pub fn update_client(&self, stoken: StorageToken, report_time: i64) {
let inner = self.0.user_clients_map.entry(stoken.user_id).or_default();
let inner = self
.0
.user_clients_map
.entry(stoken.user_id)
.or_insert_with(DashMap::new);
let client_info = ClientInfo {
storage_token: stoken.clone(),

View File

@@ -151,7 +151,7 @@ async fn get_dual_stack_listener(
} else {
None
};
let v4_listener = if local_ipv4().await.is_ok() {
let v4_listener = if let Ok(_) = local_ipv4().await {
get_listener_by_url(&format!("{}://0.0.0.0:{}", protocol, port).parse().unwrap()).ok()
} else {
None

View File

@@ -137,7 +137,7 @@ mod post {
mod get {
use crate::restful::{
captcha::{
builder::spec::SpecCaptcha,
captcha::spec::SpecCaptcha,
extension::{axum_tower_sessions::CaptchaAxumTowerSessionExt as _, CaptchaUtil},
NewCaptcha as _,
},

View File

@@ -46,22 +46,22 @@ pub(crate) struct Captcha {
/// 验证码文本类型 The character type of the captcha
pub enum CaptchaType {
/// 字母数字混合
Default = 1,
TypeDefault = 1,
/// 纯数字
OnlyNumber,
TypeOnlyNumber,
/// 纯字母
OnlyChar,
TypeOnlyChar,
/// 纯大写字母
OnlyUpper,
TypeOnlyUpper,
/// 纯小写字母
OnlyLower,
TypeOnlyLower,
/// 数字大写字母
NumAndUpper,
TypeNumAndUpper,
}
/// 内置字体 Fonts shipped with the library
@@ -92,29 +92,29 @@ impl Captcha {
/// 生成随机验证码
pub fn alphas(&mut self) -> Vec<char> {
let mut cs = vec!['\0'; self.len];
for cs_i in cs.iter_mut() {
for i in 0..self.len {
match self.char_type {
CaptchaType::Default => *cs_i = self.randoms.alpha(),
CaptchaType::OnlyNumber => {
*cs_i = self.randoms.alpha_under(self.randoms.num_max_index)
CaptchaType::TypeDefault => cs[i] = self.randoms.alpha(),
CaptchaType::TypeOnlyNumber => {
cs[i] = self.randoms.alpha_under(self.randoms.num_max_index)
}
CaptchaType::OnlyChar => {
*cs_i = self
CaptchaType::TypeOnlyChar => {
cs[i] = self
.randoms
.alpha_between(self.randoms.char_min_index, self.randoms.char_max_index)
}
CaptchaType::OnlyUpper => {
*cs_i = self
CaptchaType::TypeOnlyUpper => {
cs[i] = self
.randoms
.alpha_between(self.randoms.upper_min_index, self.randoms.upper_max_index)
}
CaptchaType::OnlyLower => {
*cs_i = self
CaptchaType::TypeOnlyLower => {
cs[i] = self
.randoms
.alpha_between(self.randoms.lower_min_index, self.randoms.lower_max_index)
}
CaptchaType::NumAndUpper => {
*cs_i = self.randoms.alpha_under(self.randoms.upper_max_index)
CaptchaType::TypeNumAndUpper => {
cs[i] = self.randoms.alpha_under(self.randoms.upper_max_index)
}
}
}
@@ -142,7 +142,7 @@ impl Captcha {
}
}
pub fn get_font(&'_ mut self) -> Arc<Font<'_>> {
pub fn get_font(&mut self) -> Arc<Font> {
if let Some(font) = font::get_font(&self.font_name) {
font
} else {
@@ -185,7 +185,6 @@ where
/// 特别地/In particular:
///
/// - 对算术验证码[ArithmeticCaptcha](crate::captcha::arithmetic::ArithmeticCaptcha)而言,这里的`len`是验证码中数字的数量。
///
/// For [ArithmeticCaptcha](crate::captcha::arithmetic::ArithmeticCaptcha), the `len` presents the count of the digits
/// in the Captcha.
fn with_size_and_len(width: i32, height: i32, len: usize) -> Self;
@@ -227,7 +226,7 @@ impl NewCaptcha for Captcha {
let len = 5;
let width = 130;
let height = 48;
let char_type = CaptchaType::Default;
let char_type = CaptchaType::TypeDefault;
let chars = None;
Self {

View File

@@ -1,4 +1,6 @@
use rand::random;
use rand::{random};
/// 随机数工具类
pub(crate) struct Randoms {

View File

@@ -10,7 +10,7 @@ use axum::response::Response;
use std::fmt::Debug;
use tower_sessions::Session;
const CAPTCHA_KEY: &str = "ez-captcha";
const CAPTCHA_KEY: &'static str = "ez-captcha";
/// Axum & Tower_Sessions
#[async_trait]
@@ -32,7 +32,7 @@ pub trait CaptchaAxumTowerSessionStaticExt {
/// Verify the Captcha code, and return whether user's code is correct.
async fn ver(code: &str, session: &Session) -> bool {
match session.get::<String>(CAPTCHA_KEY).await {
Ok(Some(ans)) => ans.eq_ignore_ascii_case(code),
Ok(Some(ans)) => ans.to_ascii_lowercase() == code.to_ascii_lowercase(),
_ => false,
}
}

View File

@@ -1,7 +1,7 @@
pub mod axum_tower_sessions;
use super::base::captcha::AbstractCaptcha;
use super::builder::spec::SpecCaptcha;
use super::captcha::spec::SpecCaptcha;
use super::{CaptchaFont, NewCaptcha};
/// 验证码工具类 - Captcha Utils

View File

@@ -117,7 +117,7 @@
#![allow(dead_code)]
pub(crate) mod base;
pub mod builder;
pub mod captcha;
pub mod extension;
mod utils;

View File

@@ -32,24 +32,21 @@ impl From<(u8, u8, u8)> for Color {
}
}
impl From<Color> for (u8, u8, u8, u8) {
fn from(val: Color) -> Self {
impl Into<(u8, u8, u8, u8)> for Color {
fn into(self) -> (u8, u8, u8, u8) {
(
(val.0 * 255.0) as u8,
(val.1 * 255.0) as u8,
(val.2 * 255.0) as u8,
(val.3 * 255.0) as u8,
(self.0 * 255.0) as u8,
(self.1 * 255.0) as u8,
(self.2 * 255.0) as u8,
(self.3 * 255.0) as u8,
)
}
}
impl From<Color> for u32 {
fn from(val: Color) -> Self {
let color: (u8, u8, u8, u8) = val.into();
(color.0 as u32)
<< (24 + (color.1 as u32))
<< (16 + (color.2 as u32))
<< (8 + (color.3 as u32))
impl Into<u32> for Color {
fn into(self) -> u32 {
let color: (u8, u8, u8, u8) = self.into();
(color.0 as u32) << 24 + (color.1 as u32) << 16 + (color.2 as u32) << 8 + (color.3 as u32)
}
}

View File

@@ -11,7 +11,7 @@ struct FontAssets;
// pub(crate) static ref FONTS: RwLock<HashMap<String, Arc<Font>>> = Default::default();
// }
pub fn get_font(font_name: &'_ str) -> Option<Arc<Font<'_>>> {
pub fn get_font(font_name: &str) -> Option<Arc<Font>> {
// let fonts_cell = FONTS.get_or_init(|| Default::default());
// let guard = fonts_cell.read();
//
@@ -31,7 +31,7 @@ pub fn get_font(font_name: &'_ str) -> Option<Arc<Font<'_>>> {
// }
}
pub fn load_font(font_name: &'_ str) -> Result<Option<Font<'_>>, Box<dyn Error>> {
pub fn load_font(font_name: &str) -> Result<Option<Font>, Box<dyn Error>> {
match FontAssets::get(font_name) {
Some(assets) => {
let font = Font::try_from_vec(Vec::from(assets.data)).unwrap();

View File

@@ -143,7 +143,7 @@ impl RestfulServer {
return Err((StatusCode::UNAUTHORIZED, other_error("No such user").into()));
};
let machines = client_mgr.list_machine_by_user_id(user.id()).await;
let machines = client_mgr.list_machine_by_user_id(user.id().clone()).await;
Ok(GetSummaryJsonResp {
device_count: machines.len() as u32,

View File

@@ -8,7 +8,7 @@ use axum_login::AuthUser;
use easytier::launcher::NetworkConfig;
use easytier::proto::common::Void;
use easytier::proto::rpc_types::controller::BaseController;
use easytier::proto::{self, web::*};
use easytier::proto::web::*;
use crate::client_manager::session::{Location, Session};
use crate::client_manager::ClientManager;
@@ -85,7 +85,7 @@ impl NetworkApi {
let Some(user_id) = auth_session.user.as_ref().map(|x| x.id()) else {
return Err((
StatusCode::UNAUTHORIZED,
other_error("No user id found".to_string()).into(),
other_error(format!("No user id found")).into(),
));
};
Ok(user_id)
@@ -108,7 +108,7 @@ impl NetworkApi {
let Some(token) = result.get_token().await else {
return Err((
StatusCode::UNAUTHORIZED,
other_error("No token reported".to_string()).into(),
other_error(format!("No token reported")).into(),
));
};
@@ -120,7 +120,7 @@ impl NetworkApi {
{
return Err((
StatusCode::FORBIDDEN,
other_error("Token mismatch".to_string()).into(),
other_error(format!("Token mismatch")).into(),
));
}
@@ -177,7 +177,7 @@ impl NetworkApi {
.insert_or_update_user_network_config(
auth_session.user.as_ref().unwrap().id(),
machine_id,
resp.inst_id.unwrap_or_default().into(),
resp.inst_id.clone().unwrap_or_default().into(),
serde_json::to_string(&config).unwrap(),
)
.await
@@ -248,7 +248,7 @@ impl NetworkApi {
.await
.map_err(convert_rpc_error)?;
let running_inst_ids = ret.inst_ids.clone().into_iter().collect();
let running_inst_ids = ret.inst_ids.clone().into_iter().map(Into::into).collect();
// collect networks that are disabled
let disabled_inst_ids = client_mgr
@@ -261,7 +261,7 @@ impl NetworkApi {
.await
.map_err(convert_db_error)?
.iter()
.map(|x| Into::<proto::common::Uuid>::into(x.network_instance_id.clone()))
.filter_map(|x| x.network_instance_id.clone().try_into().ok())
.collect::<Vec<_>>();
Ok(ListNetworkInstanceIdsJsonResp {
@@ -330,8 +330,9 @@ impl NetworkApi {
// not implement disable all
return Err((
StatusCode::NOT_IMPLEMENTED,
other_error("Not implemented".to_string()).into(),
));
other_error(format!("Not implemented")).into(),
))
.into();
};
let sess = Self::get_session_by_machine_id(&auth_session, &client_mgr, &machine_id).await?;

View File

@@ -76,32 +76,32 @@ impl Backend {
pub async fn register_new_user(&self, new_user: &RegisterNewUser) -> anyhow::Result<()> {
let hashed_password = password_auth::generate_hash(new_user.credentials.password.as_str());
let txn = self.db.orm_db().begin().await?;
let mut txn = self.db.orm_db().begin().await?;
entity::users::ActiveModel {
username: Set(new_user.credentials.username.clone()),
password: Set(hashed_password.clone()),
..Default::default()
}
.save(&txn)
.save(&mut txn)
.await?;
entity::users_groups::ActiveModel {
user_id: Set(entity::users::Entity::find()
.filter(entity::users::Column::Username.eq(new_user.credentials.username.as_str()))
.one(&txn)
.one(&mut txn)
.await?
.unwrap()
.id),
group_id: Set(entity::groups::Entity::find()
.filter(entity::groups::Column::Name.eq("users"))
.one(&txn)
.one(&mut txn)
.await?
.unwrap()
.id),
..Default::default()
}
.save(&txn)
.save(&mut txn)
.await?;
txn.commit().await?;

View File

@@ -52,7 +52,9 @@ pub fn build_router(api_host: Option<url::Url>) -> Router {
router
};
router.fallback_service(service)
let router = router.fallback_service(service);
router
}
pub struct WebServer {

View File

@@ -3,12 +3,12 @@ name = "easytier"
description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
homepage = "https://github.com/EasyTier/EasyTier"
repository = "https://github.com/EasyTier/EasyTier"
version = "2.4.2"
version = "2.4.0"
edition = "2021"
authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"]
categories = ["network-programming", "command-line-utilities"]
rust-version = "1.89.0"
rust-version = "1.87.0"
license-file = "LICENSE"
readme = "README.md"
@@ -115,7 +115,7 @@ byteorder = "1.5.0"
# for proxy
cidr = { version = "0.2.2", features = ["serde"] }
socket2 = { version = "0.5.10", features = ["all"] }
socket2 = "0.5.5"
# for hole punching
stun_codec = "0.3.4"
@@ -150,7 +150,6 @@ boringtun = { package = "boringtun-easytier", version = "0.6.1", optional = true
ring = { version = "0.17", optional = true }
bitflags = "2.5"
aes-gcm = { version = "0.10.3", optional = true }
openssl = { version = "0.10", optional = true, features = ["vendored"] }
# for cli
tabled = "0.16"
@@ -250,9 +249,7 @@ windows-sys = { version = "0.52", features = [
winapi = { version = "0.3.9", features = ["impl-default"] }
[target.'cfg(not(windows))'.dependencies]
jemallocator = { package = "tikv-jemallocator", version = "0.6.0", optional = true, features = [
"unprefixed_malloc_on_supported_platforms"
] }
jemallocator = { package = "tikv-jemallocator", version = "0.6.0", optional = true }
jemalloc-ctl = { package = "tikv-jemalloc-ctl", version = "0.6.0", optional = true, features = [
] }
jemalloc-sys = { package = "tikv-jemalloc-sys", version = "0.6.0", features = [
@@ -299,7 +296,6 @@ full = [
"websocket",
"wireguard",
"aes-gcm",
"openssl-crypto", # need openssl-dev libs
"smoltcp",
"tun",
"socks5",
@@ -308,7 +304,6 @@ wireguard = ["dep:boringtun", "dep:ring"]
quic = ["dep:quinn", "dep:rustls", "dep:rcgen"]
mimalloc = ["dep:mimalloc"]
aes-gcm = ["dep:aes-gcm"]
openssl-crypto = ["dep:openssl"]
tun = ["dep:tun"]
websocket = [
"dep:tokio-websockets",

View File

@@ -116,7 +116,7 @@ fn check_locale() {
if let Ok(globs) = globwalk::glob(locale_path) {
for entry in globs {
if let Err(e) = entry {
println!("cargo:i18n-error={e}");
println!("cargo:i18n-error={}", e);
continue;
}
@@ -151,7 +151,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
];
for proto_file in proto_files.iter().chain(proto_files_reflect.iter()) {
println!("cargo:rerun-if-changed={proto_file}");
println!("cargo:rerun-if-changed={}", proto_file);
}
let mut config = prost_build::Config::new();
@@ -173,7 +173,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
.field_attribute(".web.NetworkConfig", "#[serde(default)]")
.service_generator(Box::new(rpc_build::ServiceGenerator::new()))
.btree_map(["."])
.skip_debug([".common.Ipv4Addr", ".common.Ipv6Addr", ".common.UUID"]);
.skip_debug(&[".common.Ipv4Addr", ".common.Ipv6Addr", ".common.UUID"]);
config.compile_protos(&proto_files, &["src/proto/"])?;

View File

@@ -95,9 +95,6 @@ core_clap:
disable_encryption:
en: "disable encryption for peers communication, default is false, must be same with peers"
zh-CN: "禁用对等节点通信的加密默认为false必须与对等节点相同"
encryption_algorithm:
en: "encryption algorithm to use, supported: '', 'xor', 'chacha20', 'aes-gcm', 'aes-gcm-256', 'openssl-aes128-gcm', 'openssl-aes256-gcm', 'openssl-chacha20'. Empty string means default (aes-gcm)"
zh-CN: "要使用的加密算法,支持:''默认aes-gcm、'xor'、'chacha20'、'aes-gcm'、'aes-gcm-256'、'openssl-aes128-gcm'、'openssl-aes256-gcm'、'openssl-chacha20'"
multi_thread:
en: "use multi-thread runtime, default is single-thread"
zh-CN: "使用多线程运行时,默认为单线程"
@@ -193,18 +190,6 @@ core_clap:
foreign_relay_bps_limit:
en: "the maximum bps limit for foreign network relay, default is no limit. unit: BPS (bytes per second)"
zh-CN: "作为共享节点时,限制非本地网络的流量转发速率,默认无限制,单位 BPS (字节每秒)"
tcp_whitelist:
en: "tcp port whitelist. Supports single ports (80) and ranges (8000-9000)"
zh-CN: "TCP 端口白名单。支持单个端口80和范围8000-9000"
udp_whitelist:
en: "udp port whitelist. Supports single ports (53) and ranges (5000-6000)"
zh-CN: "UDP 端口白名单。支持单个端口53和范围5000-6000"
disable_relay_kcp:
en: "if true, disable relay kcp packets. avoid consuming too many bandwidth. default is false"
zh-CN: "如果为true则禁止节点转发 KCP 数据包防止过度消耗流量。默认值为false"
enable_relay_foreign_network_kcp:
en: "if true, allow relay kcp packets from foreign network. default is false (not forward foreign network kcp packets)"
zh-CN: "如果为true则作为共享节点时也可以转发其他网络的 KCP 数据包。默认值为false不转发"
core_app:
panic_backtrace_save:

View File

@@ -6,9 +6,8 @@ use std::{
time::{Duration, SystemTime, UNIX_EPOCH},
};
use crate::common::{config::ConfigLoader, global_ctx::ArcGlobalCtx, token_bucket::TokenBucket};
use crate::common::token_bucket::TokenBucket;
use crate::proto::acl::*;
use anyhow::Context as _;
use dashmap::DashMap;
use tokio::task::JoinSet;
@@ -178,12 +177,6 @@ impl AclLogContext {
}
}
pub type SharedState = (
Arc<DashMap<String, ConnTrackEntry>>,
Arc<DashMap<RateLimitKey, Arc<TokenBucket>>>,
Arc<DashMap<AclStatKey, u64>>,
);
// High-performance ACL processor - No more internal locks!
pub struct AclProcessor {
// Immutable rule vectors - no locks needed since they're never modified after creation
@@ -327,7 +320,7 @@ impl AclProcessor {
.rules
.iter()
.filter(|rule| rule.enabled)
.map(Self::convert_to_fast_lookup_rule)
.map(|rule| Self::convert_to_fast_lookup_rule(rule))
.collect::<Vec<_>>();
// Sort by priority (higher priority first)
@@ -428,7 +421,7 @@ impl AclProcessor {
self.inc_cache_entry_stats(cache_entry, packet_info);
cache_entry.acl_result.clone().unwrap()
return cache_entry.acl_result.clone().unwrap();
}
fn inc_cache_entry_stats(&self, cache_entry: &AclCacheEntry, packet_info: &PacketInfo) {
@@ -477,7 +470,6 @@ impl AclProcessor {
let rules = match chain_type {
ChainType::Inbound => &self.inbound_rules,
ChainType::Outbound => &self.outbound_rules,
ChainType::Forward => &self.forward_rules,
_ => {
return AclResult {
action: Action::Drop,
@@ -545,7 +537,7 @@ impl AclProcessor {
cache_entry.rule_stats_vec.push(rule.rule_stats.clone());
cache_entry.matched_rule = RuleId::Priority(rule.priority);
cache_entry.acl_result = Some(AclResult {
action: rule.action,
action: rule.action.clone(),
matched_rule: Some(RuleId::Priority(rule.priority)),
should_log: false,
log_context: Some(AclLogContext::RuleMatch {
@@ -601,7 +593,13 @@ impl AclProcessor {
}
/// Get shared state for preserving across hot reloads
pub fn get_shared_state(&self) -> SharedState {
pub fn get_shared_state(
&self,
) -> (
Arc<DashMap<String, ConnTrackEntry>>,
Arc<DashMap<RateLimitKey, Arc<TokenBucket>>>,
Arc<DashMap<AclStatKey, u64>>,
) {
(
self.conn_track.clone(),
self.rate_limiters.clone(),
@@ -698,9 +696,9 @@ impl AclProcessor {
}
/// Check connection state for stateful rules
fn check_connection_state(&self, conn_track_key: &str, packet_info: &PacketInfo) {
fn check_connection_state(&self, conn_track_key: &String, packet_info: &PacketInfo) {
self.conn_track
.entry(conn_track_key.to_string())
.entry(conn_track_key.clone())
.and_modify(|x| {
x.last_seen = SystemTime::now()
.duration_since(UNIX_EPOCH)
@@ -764,13 +762,13 @@ impl AclProcessor {
let src_ip_ranges = rule
.source_ips
.iter()
.filter_map(|x| Self::convert_ip_inet_to_cidr(x.as_str()))
.filter_map(|ip_inet| Self::convert_ip_inet_to_cidr(ip_inet))
.collect();
let dst_ip_ranges = rule
.destination_ips
.iter()
.filter_map(|x| Self::convert_ip_inet_to_cidr(x.as_str()))
.filter_map(|ip_inet| Self::convert_ip_inet_to_cidr(ip_inet))
.collect();
let src_port_ranges = rule
@@ -820,8 +818,8 @@ impl AclProcessor {
}
/// Convert IpInet to CIDR for fast lookup
fn convert_ip_inet_to_cidr(input: &str) -> Option<cidr::IpCidr> {
cidr::IpCidr::from_str(input).ok()
fn convert_ip_inet_to_cidr(input: &String) -> Option<cidr::IpCidr> {
cidr::IpCidr::from_str(input.as_str()).ok()
}
/// Increment statistics counter
@@ -898,13 +896,17 @@ impl AclProcessor {
}
// 新增辅助函数
fn parse_port_start(port_strs: &[String]) -> Option<u16> {
fn parse_port_start(
port_strs: &::prost::alloc::vec::Vec<::prost::alloc::string::String>,
) -> Option<u16> {
port_strs
.iter()
.filter_map(|s| parse_port_range(s).map(|(start, _)| start))
.min()
}
fn parse_port_end(port_strs: &[String]) -> Option<u16> {
fn parse_port_end(
port_strs: &::prost::alloc::vec::Vec<::prost::alloc::string::String>,
) -> Option<u16> {
port_strs
.iter()
.filter_map(|s| parse_port_range(s).map(|(_, end)| end))
@@ -990,146 +992,6 @@ impl AclStatKey {
}
}
pub struct AclRuleBuilder {
pub acl: Option<Acl>,
pub tcp_whitelist: Vec<String>,
pub udp_whitelist: Vec<String>,
pub whitelist_priority: Option<u32>,
}
impl AclRuleBuilder {
fn parse_port_list(port_list: &[String]) -> anyhow::Result<Vec<String>> {
let mut ports = Vec::new();
for port_spec in port_list {
if port_spec.contains('-') {
// Handle port range like "8000-9000"
let parts: Vec<&str> = port_spec.split('-').collect();
if parts.len() != 2 {
return Err(anyhow::anyhow!("Invalid port range format: {}", port_spec));
}
let start: u16 = parts[0]
.parse()
.with_context(|| format!("Invalid start port in range: {}", port_spec))?;
let end: u16 = parts[1]
.parse()
.with_context(|| format!("Invalid end port in range: {}", port_spec))?;
if start > end {
return Err(anyhow::anyhow!(
"Start port must be <= end port in range: {}",
port_spec
));
}
// acl can handle port range
ports.push(port_spec.clone());
} else {
// Handle single port
let port: u16 = port_spec
.parse()
.with_context(|| format!("Invalid port number: {}", port_spec))?;
ports.push(port.to_string());
}
}
Ok(ports)
}
fn generate_acl_from_whitelists(&mut self) -> anyhow::Result<()> {
if self.tcp_whitelist.is_empty() && self.udp_whitelist.is_empty() {
return Ok(());
}
// Create inbound chain for whitelist rules
let mut inbound_chain = Chain {
name: "inbound_whitelist".to_string(),
chain_type: ChainType::Inbound as i32,
description: "Auto-generated inbound whitelist from CLI".to_string(),
enabled: true,
rules: vec![],
default_action: Action::Drop as i32, // Default deny
};
let mut rule_priority = self.whitelist_priority.unwrap_or(1000u32);
// Add TCP whitelist rules
if !self.tcp_whitelist.is_empty() {
let tcp_ports = Self::parse_port_list(&self.tcp_whitelist)?;
let tcp_rule = Rule {
name: "tcp_whitelist".to_string(),
description: "Auto-generated TCP whitelist rule".to_string(),
priority: rule_priority,
enabled: true,
protocol: Protocol::Tcp as i32,
ports: tcp_ports,
source_ips: vec![],
destination_ips: vec![],
source_ports: vec![],
action: Action::Allow as i32,
rate_limit: 0,
burst_limit: 0,
stateful: true,
};
inbound_chain.rules.push(tcp_rule);
rule_priority -= 1;
}
// Add UDP whitelist rules
if !self.udp_whitelist.is_empty() {
let udp_ports = Self::parse_port_list(&self.udp_whitelist)?;
let udp_rule = Rule {
name: "udp_whitelist".to_string(),
description: "Auto-generated UDP whitelist rule".to_string(),
priority: rule_priority,
enabled: true,
protocol: Protocol::Udp as i32,
ports: udp_ports,
source_ips: vec![],
destination_ips: vec![],
source_ports: vec![],
action: Action::Allow as i32,
rate_limit: 0,
burst_limit: 0,
stateful: false,
};
inbound_chain.rules.push(udp_rule);
}
if self.acl.is_none() {
self.acl = Some(Acl::default());
}
let acl = self.acl.as_mut().unwrap();
if let Some(ref mut acl_v1) = acl.acl_v1 {
acl_v1.chains.push(inbound_chain);
} else {
acl.acl_v1 = Some(AclV1 {
chains: vec![inbound_chain],
});
}
Ok(())
}
fn do_build(mut self) -> anyhow::Result<Option<Acl>> {
self.generate_acl_from_whitelists()?;
Ok(self.acl.clone())
}
pub fn build(global_ctx: &ArcGlobalCtx) -> anyhow::Result<Option<Acl>> {
let builder = AclRuleBuilder {
acl: global_ctx.config.get_acl(),
tcp_whitelist: global_ctx.config.get_tcp_whitelist(),
udp_whitelist: global_ctx.config.get_udp_whitelist(),
whitelist_priority: None,
};
builder.do_build()
}
}
#[derive(Debug, Clone, Copy)]
pub enum AclStatType {
Total,
@@ -1150,22 +1012,18 @@ mod tests {
let mut acl_v1 = AclV1::default();
// Create inbound chain
let mut chain = Chain {
name: "test_inbound".to_string(),
chain_type: ChainType::Inbound as i32,
enabled: true,
..Default::default()
};
let mut chain = Chain::default();
chain.name = "test_inbound".to_string();
chain.chain_type = ChainType::Inbound as i32;
chain.enabled = true;
// Allow all rule
let rule = Rule {
name: "allow_all".to_string(),
priority: 100,
enabled: true,
action: Action::Allow as i32,
protocol: Protocol::Any as i32,
..Default::default()
};
let mut rule = Rule::default();
rule.name = "allow_all".to_string();
rule.priority = 100;
rule.enabled = true;
rule.action = Action::Allow as i32;
rule.protocol = Protocol::Any as i32;
chain.rules.push(rule);
acl_v1.chains.push(chain);
@@ -1278,14 +1136,12 @@ mod tests {
// 创建新配置(模拟热加载)
let mut new_config = create_test_acl_config();
if let Some(ref mut acl_v1) = new_config.acl_v1 {
let drop_rule = Rule {
name: "drop_all".to_string(),
priority: 200,
enabled: true,
action: Action::Drop as i32,
protocol: Protocol::Any as i32,
..Default::default()
};
let mut drop_rule = Rule::default();
drop_rule.name = "drop_all".to_string();
drop_rule.priority = 200;
drop_rule.enabled = true;
drop_rule.action = Action::Drop as i32;
drop_rule.protocol = Protocol::Any as i32;
acl_v1.chains[0].rules.push(drop_rule);
}
@@ -1323,48 +1179,40 @@ mod tests {
let mut acl_config = Acl::default();
let mut acl_v1 = AclV1::default();
let mut chain = Chain {
name: "performance_test".to_string(),
chain_type: ChainType::Inbound as i32,
enabled: true,
..Default::default()
};
let mut chain = Chain::default();
chain.name = "performance_test".to_string();
chain.chain_type = ChainType::Inbound as i32;
chain.enabled = true;
// 1. High-priority simple rule for UDP (can be cached efficiently)
let simple_rule = Rule {
name: "simple_udp".to_string(),
priority: 300,
enabled: true,
action: Action::Allow as i32,
protocol: Protocol::Udp as i32,
..Default::default()
};
let mut simple_rule = Rule::default();
simple_rule.name = "simple_udp".to_string();
simple_rule.priority = 300;
simple_rule.enabled = true;
simple_rule.action = Action::Allow as i32;
simple_rule.protocol = Protocol::Udp as i32;
// No stateful or rate limit - can benefit from full cache optimization
chain.rules.push(simple_rule);
// 2. Medium-priority stateful + rate-limited rule for TCP (security critical)
let security_rule = Rule {
name: "security_tcp".to_string(),
priority: 200,
enabled: true,
action: Action::Allow as i32,
protocol: Protocol::Tcp as i32,
stateful: true,
rate_limit: 100,
burst_limit: 200,
..Default::default()
};
let mut security_rule = Rule::default();
security_rule.name = "security_tcp".to_string();
security_rule.priority = 200;
security_rule.enabled = true;
security_rule.action = Action::Allow as i32;
security_rule.protocol = Protocol::Tcp as i32;
security_rule.stateful = true;
security_rule.rate_limit = 100; // 100 packets/sec
security_rule.burst_limit = 200;
chain.rules.push(security_rule);
// 3. Low-priority default allow rule for Any
let default_rule = Rule {
name: "default_allow".to_string(),
priority: 100,
enabled: true,
action: Action::Allow as i32,
protocol: Protocol::Any as i32,
..Default::default()
};
let mut default_rule = Rule::default();
default_rule.name = "default_allow".to_string();
default_rule.priority = 100;
default_rule.enabled = true;
default_rule.action = Action::Allow as i32;
default_rule.protocol = Protocol::Any as i32;
chain.rules.push(default_rule);
acl_v1.chains.push(chain);
@@ -1451,16 +1299,15 @@ mod tests {
// Create a very restrictive rate-limited rule
if let Some(ref mut acl_v1) = acl_config.acl_v1 {
let rule = Rule {
name: "strict_rate_limit".to_string(),
priority: 200,
enabled: true,
action: Action::Allow as i32,
protocol: Protocol::Any as i32,
rate_limit: 1, // Allow only 1 packet per second
burst_limit: 1, // Burst of 1 packet
..Default::default()
};
let mut rule = Rule::default();
rule.name = "strict_rate_limit".to_string();
rule.priority = 200;
rule.enabled = true;
rule.action = Action::Allow as i32;
rule.protocol = Protocol::Any as i32;
rule.rate_limit = 1; // Allow only 1 packet per second
rule.burst_limit = 1; // Burst of 1 packet
acl_v1.chains[0].rules.push(rule);
}

View File

@@ -21,12 +21,6 @@ pub trait Compressor {
pub struct DefaultCompressor {}
impl Default for DefaultCompressor {
fn default() -> Self {
Self::new()
}
}
impl DefaultCompressor {
pub fn new() -> Self {
DefaultCompressor {}
@@ -201,11 +195,11 @@ pub mod tests {
packet,
packet.payload_len()
);
assert!(packet.peer_manager_header().unwrap().is_compressed());
assert_eq!(packet.peer_manager_header().unwrap().is_compressed(), true);
compressor.decompress(&mut packet).await.unwrap();
assert_eq!(packet.payload(), text);
assert!(!packet.peer_manager_header().unwrap().is_compressed());
assert_eq!(packet.peer_manager_header().unwrap().is_compressed(), false);
}
#[tokio::test]
@@ -221,10 +215,10 @@ pub mod tests {
.compress(&mut packet, CompressorAlgo::ZstdDefault)
.await
.unwrap();
assert!(!packet.peer_manager_header().unwrap().is_compressed());
assert_eq!(packet.peer_manager_header().unwrap().is_compressed(), false);
compressor.decompress(&mut packet).await.unwrap();
assert_eq!(packet.payload(), text);
assert!(!packet.peer_manager_header().unwrap().is_compressed());
assert_eq!(packet.peer_manager_header().unwrap().is_compressed(), false);
}
}

View File

@@ -1,8 +1,8 @@
use std::{
hash::Hasher,
net::{IpAddr, SocketAddr},
net::{Ipv4Addr, SocketAddr},
path::PathBuf,
sync::{Arc, Mutex},
u64,
};
use anyhow::Context;
@@ -40,87 +40,16 @@ pub fn gen_default_flags() -> Flags {
bind_device: true,
enable_kcp_proxy: false,
disable_kcp_input: false,
disable_relay_kcp: false,
enable_relay_foreign_network_kcp: false,
disable_relay_kcp: true,
accept_dns: false,
private_mode: false,
enable_quic_proxy: false,
disable_quic_input: false,
foreign_relay_bps_limit: u64::MAX,
multi_thread_count: 2,
encryption_algorithm: "aes-gcm".to_string(),
}
}
pub enum EncryptionAlgorithm {
AesGcm,
Aes256Gcm,
Xor,
#[cfg(feature = "wireguard")]
ChaCha20,
#[cfg(feature = "openssl-crypto")]
OpensslAesGcm,
#[cfg(feature = "openssl-crypto")]
OpensslChacha20,
#[cfg(feature = "openssl-crypto")]
OpensslAes256Gcm,
}
impl std::fmt::Display for EncryptionAlgorithm {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::AesGcm => write!(f, "aes-gcm"),
Self::Aes256Gcm => write!(f, "aes-256-gcm"),
Self::Xor => write!(f, "xor"),
#[cfg(feature = "wireguard")]
Self::ChaCha20 => write!(f, "chacha20"),
#[cfg(feature = "openssl-crypto")]
Self::OpensslAesGcm => write!(f, "openssl-aes-gcm"),
#[cfg(feature = "openssl-crypto")]
Self::OpensslChacha20 => write!(f, "openssl-chacha20"),
#[cfg(feature = "openssl-crypto")]
Self::OpensslAes256Gcm => write!(f, "openssl-aes-256-gcm"),
}
}
}
impl TryFrom<&str> for EncryptionAlgorithm {
type Error = anyhow::Error;
fn try_from(value: &str) -> Result<Self, Self::Error> {
match value {
"aes-gcm" => Ok(Self::AesGcm),
"aes-256-gcm" => Ok(Self::Aes256Gcm),
"xor" => Ok(Self::Xor),
#[cfg(feature = "wireguard")]
"chacha20" => Ok(Self::ChaCha20),
#[cfg(feature = "openssl-crypto")]
"openssl-aes-gcm" => Ok(Self::OpensslAesGcm),
#[cfg(feature = "openssl-crypto")]
"openssl-chacha20" => Ok(Self::OpensslChacha20),
#[cfg(feature = "openssl-crypto")]
"openssl-aes-256-gcm" => Ok(Self::OpensslAes256Gcm),
_ => Err(anyhow::anyhow!("invalid encryption algorithm")),
}
}
}
pub fn get_avaliable_encrypt_methods() -> Vec<&'static str> {
let mut r = vec!["aes-gcm", "aes-256-gcm", "xor"];
if cfg!(feature = "wireguard") {
r.push("chacha20");
}
if cfg!(feature = "openssl-crypto") {
r.extend(vec![
"openssl-aes-gcm",
"openssl-chacha20",
"openssl-aes-256-gcm",
]);
}
r
}
#[auto_impl::auto_impl(Box, &)]
pub trait ConfigLoader: Send + Sync {
fn get_id(&self) -> uuid::Uuid;
@@ -178,8 +107,8 @@ pub trait ConfigLoader: Send + Sync {
fn get_flags(&self) -> Flags;
fn set_flags(&self, flags: Flags);
fn get_exit_nodes(&self) -> Vec<IpAddr>;
fn set_exit_nodes(&self, nodes: Vec<IpAddr>);
fn get_exit_nodes(&self) -> Vec<Ipv4Addr>;
fn set_exit_nodes(&self, nodes: Vec<Ipv4Addr>);
fn get_routes(&self) -> Option<Vec<cidr::Ipv4Cidr>>;
fn set_routes(&self, routes: Option<Vec<cidr::Ipv4Cidr>>);
@@ -193,12 +122,6 @@ pub trait ConfigLoader: Send + Sync {
fn get_acl(&self) -> Option<Acl>;
fn set_acl(&self, acl: Option<Acl>);
fn get_tcp_whitelist(&self) -> Vec<String>;
fn set_tcp_whitelist(&self, whitelist: Vec<String>);
fn get_udp_whitelist(&self) -> Vec<String>;
fn set_udp_whitelist(&self, whitelist: Vec<String>);
fn dump(&self) -> String;
}
@@ -210,7 +133,7 @@ pub trait LoggingConfigLoader {
pub type NetworkSecretDigest = [u8; 32];
#[derive(Debug, Clone, Deserialize, Serialize)]
#[derive(Debug, Clone, Deserialize, Serialize, Default, Eq, Hash)]
pub struct NetworkIdentity {
pub network_name: String,
pub network_secret: Option<String>,
@@ -218,53 +141,27 @@ pub struct NetworkIdentity {
pub network_secret_digest: Option<NetworkSecretDigest>,
}
#[derive(Eq, PartialEq, Hash)]
struct NetworkIdentityWithOnlyDigest {
network_name: String,
network_secret_digest: Option<NetworkSecretDigest>,
}
impl From<NetworkIdentity> for NetworkIdentityWithOnlyDigest {
fn from(identity: NetworkIdentity) -> Self {
if identity.network_secret_digest.is_some() {
Self {
network_name: identity.network_name,
network_secret_digest: identity.network_secret_digest,
}
} else if identity.network_secret.is_some() {
let mut network_secret_digest = [0u8; 32];
generate_digest_from_str(
&identity.network_name,
identity.network_secret.as_ref().unwrap(),
&mut network_secret_digest,
);
Self {
network_name: identity.network_name,
network_secret_digest: Some(network_secret_digest),
}
} else {
Self {
network_name: identity.network_name,
network_secret_digest: None,
}
}
}
}
impl PartialEq for NetworkIdentity {
fn eq(&self, other: &Self) -> bool {
let self_with_digest = NetworkIdentityWithOnlyDigest::from(self.clone());
let other_with_digest = NetworkIdentityWithOnlyDigest::from(other.clone());
self_with_digest == other_with_digest
if self.network_name != other.network_name {
return false;
}
}
impl Eq for NetworkIdentity {}
if self.network_secret.is_some()
&& other.network_secret.is_some()
&& self.network_secret != other.network_secret
{
return false;
}
impl std::hash::Hash for NetworkIdentity {
fn hash<H: Hasher>(&self, state: &mut H) {
let self_with_digest = NetworkIdentityWithOnlyDigest::from(self.clone());
self_with_digest.hash(state);
if self.network_secret_digest.is_some()
&& other.network_secret_digest.is_some()
&& self.network_secret_digest != other.network_secret_digest
{
return false;
}
return true;
}
}
@@ -279,10 +176,8 @@ impl NetworkIdentity {
network_secret_digest: Some(network_secret_digest),
}
}
}
impl Default for NetworkIdentity {
fn default() -> Self {
pub fn default() -> Self {
Self::new("default".to_string(), "".to_string())
}
}
@@ -335,7 +230,7 @@ pub struct VpnPortalConfig {
pub wireguard_listen: SocketAddr,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Hash)]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
pub struct PortForwardConfig {
pub bind_addr: SocketAddr,
pub dst_addr: SocketAddr,
@@ -356,12 +251,12 @@ impl From<PortForwardConfigPb> for PortForwardConfig {
}
}
impl From<PortForwardConfig> for PortForwardConfigPb {
fn from(val: PortForwardConfig) -> Self {
impl Into<PortForwardConfigPb> for PortForwardConfig {
fn into(self) -> PortForwardConfigPb {
PortForwardConfigPb {
bind_addr: Some(val.bind_addr.into()),
dst_addr: Some(val.dst_addr.into()),
socket_type: match val.proto.to_lowercase().as_str() {
bind_addr: Some(self.bind_addr.into()),
dst_addr: Some(self.dst_addr.into()),
socket_type: match self.proto.to_lowercase().as_str() {
"tcp" => SocketType::Tcp as i32,
"udp" => SocketType::Udp as i32,
_ => SocketType::Tcp as i32,
@@ -382,7 +277,7 @@ struct Config {
network_identity: Option<NetworkIdentity>,
listeners: Option<Vec<url::Url>>,
mapped_listeners: Option<Vec<url::Url>>,
exit_nodes: Option<Vec<IpAddr>>,
exit_nodes: Option<Vec<Ipv4Addr>>,
peer: Option<Vec<PeerConfig>>,
proxy_network: Option<Vec<ProxyNetworkConfig>>,
@@ -404,9 +299,6 @@ struct Config {
flags_struct: Option<Flags>,
acl: Option<Acl>,
tcp_whitelist: Option<Vec<String>>,
udp_whitelist: Option<Vec<String>>,
}
#[derive(Debug, Clone)]
@@ -521,7 +413,8 @@ impl ConfigLoader for TomlConfigLoader {
locked_config
.ipv4
.as_ref()
.and_then(|s| s.parse().ok())
.map(|s| s.parse().ok())
.flatten()
.map(|c: cidr::Ipv4Inet| {
if c.network_length() == 32 {
cidr::Ipv4Inet::new(c.address(), 24).unwrap()
@@ -532,16 +425,28 @@ impl ConfigLoader for TomlConfigLoader {
}
fn set_ipv4(&self, addr: Option<cidr::Ipv4Inet>) {
self.config.lock().unwrap().ipv4 = addr.map(|addr| addr.to_string());
self.config.lock().unwrap().ipv4 = if let Some(addr) = addr {
Some(addr.to_string())
} else {
None
};
}
fn get_ipv6(&self) -> Option<cidr::Ipv6Inet> {
let locked_config = self.config.lock().unwrap();
locked_config.ipv6.as_ref().and_then(|s| s.parse().ok())
locked_config
.ipv6
.as_ref()
.map(|s| s.parse().ok())
.flatten()
}
fn set_ipv6(&self, addr: Option<cidr::Ipv6Inet>) {
self.config.lock().unwrap().ipv6 = addr.map(|addr| addr.to_string());
self.config.lock().unwrap().ipv6 = if let Some(addr) = addr {
Some(addr.to_string())
} else {
None
};
}
fn get_dhcp(&self) -> bool {
@@ -615,7 +520,7 @@ impl ConfigLoader for TomlConfigLoader {
locked_config.instance_id = Some(id);
id
} else {
*locked_config.instance_id.as_ref().unwrap()
locked_config.instance_id.as_ref().unwrap().clone()
}
}
@@ -629,7 +534,7 @@ impl ConfigLoader for TomlConfigLoader {
.unwrap()
.network_identity
.clone()
.unwrap_or_default()
.unwrap_or_else(NetworkIdentity::default)
}
fn set_network_identity(&self, identity: NetworkIdentity) {
@@ -710,7 +615,7 @@ impl ConfigLoader for TomlConfigLoader {
self.config.lock().unwrap().flags_struct = Some(flags);
}
fn get_exit_nodes(&self) -> Vec<IpAddr> {
fn get_exit_nodes(&self) -> Vec<Ipv4Addr> {
self.config
.lock()
.unwrap()
@@ -719,7 +624,7 @@ impl ConfigLoader for TomlConfigLoader {
.unwrap_or_default()
}
fn set_exit_nodes(&self, nodes: Vec<IpAddr>) {
fn set_exit_nodes(&self, nodes: Vec<Ipv4Addr>) {
self.config.lock().unwrap().exit_nodes = Some(nodes);
}
@@ -760,32 +665,6 @@ impl ConfigLoader for TomlConfigLoader {
self.config.lock().unwrap().acl = acl;
}
fn get_tcp_whitelist(&self) -> Vec<String> {
self.config
.lock()
.unwrap()
.tcp_whitelist
.clone()
.unwrap_or_default()
}
fn set_tcp_whitelist(&self, whitelist: Vec<String>) {
self.config.lock().unwrap().tcp_whitelist = Some(whitelist);
}
fn get_udp_whitelist(&self) -> Vec<String> {
self.config
.lock()
.unwrap()
.udp_whitelist
.clone()
.unwrap_or_default()
}
fn set_udp_whitelist(&self, whitelist: Vec<String>) {
self.config.lock().unwrap().udp_whitelist = Some(whitelist);
}
fn dump(&self) -> String {
let default_flags_json = serde_json::to_string(&gen_default_flags()).unwrap();
let default_flags_hashmap =

View File

@@ -8,14 +8,14 @@ macro_rules! define_global_var {
#[macro_export]
macro_rules! use_global_var {
($name:ident) => {
$crate::common::constants::$name.lock().unwrap().to_owned()
crate::common::constants::$name.lock().unwrap().to_owned()
};
}
#[macro_export]
macro_rules! set_global_var {
($name:ident, $val:expr) => {
*$crate::common::constants::$name.lock().unwrap() = $val
*crate::common::constants::$name.lock().unwrap() = $val
};
}

View File

@@ -12,9 +12,7 @@ impl<F: FnOnce()> Defer<F> {
impl<F: FnOnce()> Drop for Defer<F> {
fn drop(&mut self) {
if let Some(f) = self.func.take() {
f()
}
self.func.take().map(|f| f());
}
}

View File

@@ -48,15 +48,19 @@ pub static RESOLVER: Lazy<Arc<Resolver<GenericConnector<TokioRuntimeProvider>>>>
pub async fn resolve_txt_record(domain_name: &str) -> Result<String, Error> {
let r = RESOLVER.clone();
let response = r
.txt_lookup(domain_name)
.await
.with_context(|| format!("txt_lookup failed, domain_name: {}", domain_name))?;
let response = r.txt_lookup(domain_name).await.with_context(|| {
format!(
"txt_lookup failed, domain_name: {}",
domain_name.to_string()
)
})?;
let txt_record = response
.iter()
.next()
.with_context(|| format!("no txt record found, domain_name: {}", domain_name))?;
let txt_record = response.iter().next().with_context(|| {
format!(
"no txt record found, domain_name: {}",
domain_name.to_string()
)
})?;
let txt_data = String::from_utf8_lossy(&txt_record.txt_data()[0]);
tracing::info!(?txt_data, ?domain_name, "get txt record");

View File

@@ -5,7 +5,6 @@ use std::{
};
use crate::common::config::ProxyNetworkConfig;
use crate::common::stats_manager::StatsManager;
use crate::common::token_bucket::TokenBucketManager;
use crate::peers::acl_filter::AclFilter;
use crate::proto::cli::PeerConnInfo;
@@ -84,8 +83,6 @@ pub struct GlobalCtx {
token_bucket_manager: TokenBucketManager,
stats_manager: Arc<StatsManager>,
acl_filter: Arc<AclFilter>,
}
@@ -104,7 +101,7 @@ impl std::fmt::Debug for GlobalCtx {
pub type ArcGlobalCtx = std::sync::Arc<GlobalCtx>;
impl GlobalCtx {
pub fn new(config_fs: impl ConfigLoader + 'static) -> Self {
pub fn new(config_fs: impl ConfigLoader + 'static + Send + Sync) -> Self {
let id = config_fs.get_id();
let network = config_fs.get_network_identity();
let net_ns = NetNS::new(config_fs.get_netns());
@@ -118,11 +115,9 @@ impl GlobalCtx {
let proxy_forward_by_system = config_fs.get_flags().proxy_forward_by_system;
let no_tun = config_fs.get_flags().no_tun;
let feature_flags = PeerFeatureFlag {
kcp_input: !config_fs.get_flags().disable_kcp_input,
no_relay_kcp: config_fs.get_flags().disable_relay_kcp,
..Default::default()
};
let mut feature_flags = PeerFeatureFlag::default();
feature_flags.kcp_input = !config_fs.get_flags().disable_kcp_input;
feature_flags.no_relay_kcp = config_fs.get_flags().disable_relay_kcp;
GlobalCtx {
inst_name: config_fs.get_inst_name(),
@@ -156,8 +151,6 @@ impl GlobalCtx {
token_bucket_manager: TokenBucketManager::new(),
stats_manager: Arc::new(StatsManager::new()),
acl_filter: Arc::new(AclFilter::new()),
}
}
@@ -187,7 +180,7 @@ impl GlobalCtx {
{
Ok(())
} else {
Err(anyhow::anyhow!("network {} not in whitelist", network_name))
Err(anyhow::anyhow!("network {} not in whitelist", network_name).into())
}
}
@@ -196,8 +189,8 @@ impl GlobalCtx {
return Some(ret);
}
let addr = self.config.get_ipv4();
self.cached_ipv4.store(addr);
addr
self.cached_ipv4.store(addr.clone());
return addr;
}
pub fn set_ipv4(&self, addr: Option<cidr::Ipv4Inet>) {
@@ -210,8 +203,8 @@ impl GlobalCtx {
return Some(ret);
}
let addr = self.config.get_ipv6();
self.cached_ipv6.store(addr);
addr
self.cached_ipv6.store(addr.clone());
return addr;
}
pub fn set_ipv6(&self, addr: Option<cidr::Ipv6Inet>) {
@@ -298,29 +291,6 @@ impl GlobalCtx {
key
}
pub fn get_256_key(&self) -> [u8; 32] {
let mut key = [0u8; 32];
let secret = self
.config
.get_network_identity()
.network_secret
.unwrap_or_default();
// fill key according to network secret
let mut hasher = DefaultHasher::new();
hasher.write(secret.as_bytes());
hasher.write(b"easytier-256bit-key"); // 添加固定盐值以区分128位和256位密钥
// 生成32字节密钥
for i in 0..4 {
let chunk_start = i * 8;
let chunk_end = chunk_start + 8;
hasher.write(&key[0..chunk_start]);
hasher.write(&[i as u8]); // 添加索引以确保每个8字节块都不同
key[chunk_start..chunk_end].copy_from_slice(&hasher.finish().to_be_bytes());
}
key
}
pub fn enable_exit_node(&self) -> bool {
self.enable_exit_node
}
@@ -353,10 +323,6 @@ impl GlobalCtx {
&self.token_bucket_manager
}
pub fn stats_manager(&self) -> &Arc<StatsManager> {
&self.stats_manager
}
pub fn get_acl_filter(&self) -> &Arc<AclFilter> {
&self.acl_filter
}
@@ -378,18 +344,18 @@ pub mod tests {
let mut subscriber = global_ctx.subscribe();
let peer_id = new_peer_id();
global_ctx.issue_event(GlobalCtxEvent::PeerAdded(peer_id));
global_ctx.issue_event(GlobalCtxEvent::PeerRemoved(peer_id));
global_ctx.issue_event(GlobalCtxEvent::PeerAdded(peer_id.clone()));
global_ctx.issue_event(GlobalCtxEvent::PeerRemoved(peer_id.clone()));
global_ctx.issue_event(GlobalCtxEvent::PeerConnAdded(PeerConnInfo::default()));
global_ctx.issue_event(GlobalCtxEvent::PeerConnRemoved(PeerConnInfo::default()));
assert_eq!(
subscriber.recv().await.unwrap(),
GlobalCtxEvent::PeerAdded(peer_id)
GlobalCtxEvent::PeerAdded(peer_id.clone())
);
assert_eq!(
subscriber.recv().await.unwrap(),
GlobalCtxEvent::PeerRemoved(peer_id)
GlobalCtxEvent::PeerRemoved(peer_id.clone())
);
assert_eq!(
subscriber.recv().await.unwrap(),
@@ -406,7 +372,7 @@ pub mod tests {
) -> ArcGlobalCtx {
let config_fs = TomlConfigLoader::default();
config_fs.set_inst_name(format!("test_{}", config_fs.get_id()));
config_fs.set_network_identity(network_identy.unwrap_or_default());
config_fs.set_network_identity(network_identy.unwrap_or(NetworkIdentity::default()));
let ctx = Arc::new(GlobalCtx::new(config_fs));
ctx.replace_stun_info_collector(Box::new(MockStunInfoCollector {

View File

@@ -1,6 +1,6 @@
#[cfg(any(target_os = "macos", target_os = "freebsd"))]
mod darwin;
#[cfg(target_os = "linux")]
#[cfg(any(target_os = "linux"))]
mod netlink;
#[cfg(target_os = "windows")]
mod win;
@@ -141,7 +141,7 @@ pub struct DummyIfConfiger {}
#[async_trait]
impl IfConfiguerTrait for DummyIfConfiger {}
#[cfg(target_os = "linux")]
#[cfg(any(target_os = "linux"))]
pub type IfConfiger = netlink::NetlinkIfConfiger;
#[cfg(any(target_os = "macos", target_os = "freebsd"))]

View File

@@ -85,14 +85,14 @@ fn send_netlink_req_and_wait_one_resp<T: NetlinkDeserializable + NetlinkSerializ
match ret.payload {
NetlinkPayload::Error(e) => {
if e.code == NonZero::new(0) {
Ok(())
return Ok(());
} else {
Err(e.to_io().into())
return Err(e.to_io().into());
}
}
p => {
tracing::error!("Unexpected netlink response: {:?}", p);
Err(anyhow::anyhow!("Unexpected netlink response").into())
return Err(anyhow::anyhow!("Unexpected netlink response").into());
}
}
}
@@ -263,8 +263,8 @@ impl NetlinkIfConfiger {
let (address, netmask) = match (address.family(), netmask.family()) {
(Some(Inet), Some(Inet)) => (
IpAddr::V4(address.as_sockaddr_in().unwrap().ip()),
IpAddr::V4(netmask.as_sockaddr_in().unwrap().ip()),
IpAddr::V4(address.as_sockaddr_in().unwrap().ip().into()),
IpAddr::V4(netmask.as_sockaddr_in().unwrap().ip().into()),
),
(Some(Inet6), Some(Inet6)) => (
IpAddr::V6(address.as_sockaddr_in6().unwrap().ip()),
@@ -333,7 +333,7 @@ impl NetlinkIfConfiger {
let mut resp = Vec::<u8>::new();
loop {
if resp.is_empty() {
if resp.len() == 0 {
let (new_resp, _) = s.recv_from_full()?;
resp = new_resp;
}

View File

@@ -1,3 +1,3 @@
pub mod luid;
pub mod netsh;
pub mod types;
pub mod luid;

View File

@@ -22,7 +22,6 @@ pub mod ifcfg;
pub mod netns;
pub mod network;
pub mod scoped_task;
pub mod stats_manager;
pub mod stun;
pub mod stun_codec_ext;
pub mod token_bucket;
@@ -140,8 +139,8 @@ pub fn get_machine_id() -> uuid::Uuid {
)))]
let gen_mid = None;
if let Some(mid) = gen_mid {
return mid;
if gen_mid.is_some() {
return gen_mid.unwrap();
}
let gen_mid = uuid::Uuid::new_v4();

View File

@@ -34,12 +34,13 @@ impl NetNSGuard {
return;
}
let ns_path: String;
let name = name.unwrap();
let ns_path: String = if name == ROOT_NETNS_NAME {
"/proc/1/ns/net".to_string()
if name == ROOT_NETNS_NAME {
ns_path = "/proc/1/ns/net".to_string();
} else {
format!("/var/run/netns/{}", name)
};
ns_path = format!("/var/run/netns/{}", name);
}
let ns = std::fs::File::open(ns_path).unwrap();
tracing::info!(

View File

@@ -211,7 +211,7 @@ impl IPCollector {
cached_ip_list.read().await.public_ipv6
);
let sleep_sec = if cached_ip_list.read().await.public_ipv4.is_some() {
let sleep_sec = if !cached_ip_list.read().await.public_ipv4.is_none() {
CACHED_IP_LIST_TIMEOUT_SEC
} else {
3
@@ -252,12 +252,15 @@ impl IPCollector {
for iface in ifaces {
for ip in iface.ips {
let ip: std::net::IpAddr = ip.ip();
if let std::net::IpAddr::V4(v4) = ip {
match ip {
std::net::IpAddr::V4(v4) => {
if ip.is_loopback() || ip.is_multicast() {
continue;
}
ret.interface_ipv4s.push(v4.into());
}
_ => {}
}
}
}
@@ -266,12 +269,15 @@ impl IPCollector {
for iface in ifaces {
for ip in iface.ips {
let ip: std::net::IpAddr = ip.ip();
if let std::net::IpAddr::V6(v6) = ip {
match ip {
std::net::IpAddr::V6(v6) => {
if v6.is_multicast() || v6.is_loopback() || v6.is_unicast_link_local() {
continue;
}
ret.interface_ipv6s.push(v6.into());
}
_ => {}
}
}
}

View File

@@ -1,886 +0,0 @@
use dashmap::DashMap;
use serde::{Deserialize, Serialize};
use std::cell::UnsafeCell;
use std::fmt;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::time::interval;
use crate::common::scoped_task::ScopedTask;
/// Predefined metric names for type safety
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum MetricName {
/// RPC calls sent to peers
PeerRpcClientTx,
/// RPC calls received from peers
PeerRpcClientRx,
/// RPC calls sent to peers
PeerRpcServerTx,
/// RPC calls received from peers
PeerRpcServerRx,
/// RPC call duration in milliseconds
PeerRpcDuration,
/// RPC errors
PeerRpcErrors,
/// Traffic bytes sent
TrafficBytesTx,
/// Traffic bytes received
TrafficBytesRx,
/// Traffic bytes forwarded
TrafficBytesForwarded,
/// Traffic bytes sent to self
TrafficBytesSelfTx,
/// Traffic bytes received from self
TrafficBytesSelfRx,
/// Traffic bytes forwarded for foreign network, rx to local
TrafficBytesForeignForwardRx,
/// Traffic bytes forwarded for foreign network, tx from local
TrafficBytesForeignForwardTx,
/// Traffic bytes forwarded for foreign network, forward
TrafficBytesForeignForwardForwarded,
/// Traffic packets sent
TrafficPacketsTx,
/// Traffic packets received
TrafficPacketsRx,
/// Traffic packets forwarded
TrafficPacketsForwarded,
/// Traffic packets sent to self
TrafficPacketsSelfTx,
/// Traffic packets received from self
TrafficPacketsSelfRx,
/// Traffic packets forwarded for foreign network, rx to local
TrafficPacketsForeignForwardRx,
/// Traffic packets forwarded for foreign network, tx from local
TrafficPacketsForeignForwardTx,
/// Traffic packets forwarded for foreign network, forward
TrafficPacketsForeignForwardForwarded,
/// Compression bytes before compression
CompressionBytesRxBefore,
/// Compression bytes after compression
CompressionBytesRxAfter,
/// Compression bytes before compression
CompressionBytesTxBefore,
/// Compression bytes after compression
CompressionBytesTxAfter,
}
impl fmt::Display for MetricName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MetricName::PeerRpcClientTx => write!(f, "peer_rpc_client_tx"),
MetricName::PeerRpcClientRx => write!(f, "peer_rpc_client_rx"),
MetricName::PeerRpcServerTx => write!(f, "peer_rpc_server_tx"),
MetricName::PeerRpcServerRx => write!(f, "peer_rpc_server_rx"),
MetricName::PeerRpcDuration => write!(f, "peer_rpc_duration_ms"),
MetricName::PeerRpcErrors => write!(f, "peer_rpc_errors"),
MetricName::TrafficBytesTx => write!(f, "traffic_bytes_tx"),
MetricName::TrafficBytesRx => write!(f, "traffic_bytes_rx"),
MetricName::TrafficBytesForwarded => write!(f, "traffic_bytes_forwarded"),
MetricName::TrafficBytesSelfTx => write!(f, "traffic_bytes_self_tx"),
MetricName::TrafficBytesSelfRx => write!(f, "traffic_bytes_self_rx"),
MetricName::TrafficBytesForeignForwardRx => {
write!(f, "traffic_bytes_foreign_forward_rx")
}
MetricName::TrafficBytesForeignForwardTx => {
write!(f, "traffic_bytes_foreign_forward_tx")
}
MetricName::TrafficBytesForeignForwardForwarded => {
write!(f, "traffic_bytes_foreign_forward_forwarded")
}
MetricName::TrafficPacketsTx => write!(f, "traffic_packets_tx"),
MetricName::TrafficPacketsRx => write!(f, "traffic_packets_rx"),
MetricName::TrafficPacketsForwarded => write!(f, "traffic_packets_forwarded"),
MetricName::TrafficPacketsSelfTx => write!(f, "traffic_packets_self_tx"),
MetricName::TrafficPacketsSelfRx => write!(f, "traffic_packets_self_rx"),
MetricName::TrafficPacketsForeignForwardRx => {
write!(f, "traffic_packets_foreign_forward_rx")
}
MetricName::TrafficPacketsForeignForwardTx => {
write!(f, "traffic_packets_foreign_forward_tx")
}
MetricName::TrafficPacketsForeignForwardForwarded => {
write!(f, "traffic_packets_foreign_forward_forwarded")
}
MetricName::CompressionBytesRxBefore => write!(f, "compression_bytes_rx_before"),
MetricName::CompressionBytesRxAfter => write!(f, "compression_bytes_rx_after"),
MetricName::CompressionBytesTxBefore => write!(f, "compression_bytes_tx_before"),
MetricName::CompressionBytesTxAfter => write!(f, "compression_bytes_tx_after"),
}
}
}
/// Predefined label types for type safety
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum LabelType {
/// Network Name
NetworkName(String),
/// Source peer ID
SrcPeerId(u32),
/// Destination peer ID
DstPeerId(u32),
/// Service name
ServiceName(String),
/// Method name
MethodName(String),
/// Protocol type
Protocol(String),
/// Direction (tx/rx)
Direction(String),
/// Compression algorithm
CompressionAlgo(String),
/// Error type
ErrorType(String),
/// Status
Status(String),
}
impl fmt::Display for LabelType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
LabelType::NetworkName(name) => write!(f, "network_name={}", name),
LabelType::SrcPeerId(id) => write!(f, "src_peer_id={}", id),
LabelType::DstPeerId(id) => write!(f, "dst_peer_id={}", id),
LabelType::ServiceName(name) => write!(f, "service_name={}", name),
LabelType::MethodName(name) => write!(f, "method_name={}", name),
LabelType::Protocol(proto) => write!(f, "protocol={}", proto),
LabelType::Direction(dir) => write!(f, "direction={}", dir),
LabelType::CompressionAlgo(algo) => write!(f, "compression_algo={}", algo),
LabelType::ErrorType(err) => write!(f, "error_type={}", err),
LabelType::Status(status) => write!(f, "status={}", status),
}
}
}
impl LabelType {
pub fn key(&self) -> &'static str {
match self {
LabelType::NetworkName(_) => "network_name",
LabelType::SrcPeerId(_) => "src_peer_id",
LabelType::DstPeerId(_) => "dst_peer_id",
LabelType::ServiceName(_) => "service_name",
LabelType::MethodName(_) => "method_name",
LabelType::Protocol(_) => "protocol",
LabelType::Direction(_) => "direction",
LabelType::CompressionAlgo(_) => "compression_algo",
LabelType::ErrorType(_) => "error_type",
LabelType::Status(_) => "status",
}
}
pub fn value(&self) -> String {
match self {
LabelType::NetworkName(name) => name.clone(),
LabelType::SrcPeerId(id) => id.to_string(),
LabelType::DstPeerId(id) => id.to_string(),
LabelType::ServiceName(name) => name.clone(),
LabelType::MethodName(name) => name.clone(),
LabelType::Protocol(proto) => proto.clone(),
LabelType::Direction(dir) => dir.clone(),
LabelType::CompressionAlgo(algo) => algo.clone(),
LabelType::ErrorType(err) => err.clone(),
LabelType::Status(status) => status.clone(),
}
}
}
/// Label represents a key-value pair for metric identification
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct Label {
pub key: String,
pub value: String,
}
impl Label {
pub fn new(key: impl Into<String>, value: impl Into<String>) -> Self {
Self {
key: key.into(),
value: value.into(),
}
}
pub fn from_label_type(label_type: &LabelType) -> Self {
Self {
key: label_type.key().to_string(),
value: label_type.value(),
}
}
}
/// LabelSet represents a collection of labels for a metric
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct LabelSet {
labels: Vec<Label>,
}
impl LabelSet {
pub fn new() -> Self {
Self { labels: Vec::new() }
}
pub fn with_label(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
self.labels.push(Label::new(key, value));
self.labels.sort_by(|a, b| a.key.cmp(&b.key)); // Keep labels sorted for consistent hashing
self
}
/// Add a typed label to the set
pub fn with_label_type(mut self, label_type: LabelType) -> Self {
self.labels.push(Label::from_label_type(&label_type));
self.labels.sort_by(|a, b| a.key.cmp(&b.key)); // Keep labels sorted for consistent hashing
self
}
/// Create a LabelSet from multiple LabelTypes
pub fn from_label_types(label_types: &[LabelType]) -> Self {
let mut labels = Vec::new();
for label_type in label_types {
labels.push(Label::from_label_type(label_type));
}
labels.sort_by(|a, b| a.key.cmp(&b.key)); // Keep labels sorted for consistent hashing
Self { labels }
}
pub fn labels(&self) -> &[Label] {
&self.labels
}
/// Generate a string key for this label set
pub fn to_key(&self) -> String {
if self.labels.is_empty() {
return String::new();
}
let mut parts = Vec::with_capacity(self.labels.len());
for label in &self.labels {
parts.push(format!("{}={}", label.key, label.value));
}
parts.join(",")
}
}
impl Default for LabelSet {
fn default() -> Self {
Self::new()
}
}
/// UnsafeCounter provides a high-performance counter using UnsafeCell
#[derive(Debug)]
pub struct UnsafeCounter {
value: UnsafeCell<u64>,
}
impl Default for UnsafeCounter {
fn default() -> Self {
Self::new()
}
}
impl UnsafeCounter {
pub fn new() -> Self {
Self {
value: UnsafeCell::new(0),
}
}
pub fn new_with_value(initial: u64) -> Self {
Self {
value: UnsafeCell::new(initial),
}
}
/// Increment the counter by the given amount
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is accessing this counter simultaneously.
pub unsafe fn add(&self, delta: u64) {
let ptr = self.value.get();
*ptr = (*ptr).saturating_add(delta);
}
/// Increment the counter by 1
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is accessing this counter simultaneously.
pub unsafe fn inc(&self) {
self.add(1);
}
/// Get the current value of the counter
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is modifying this counter simultaneously.
pub unsafe fn get(&self) -> u64 {
let ptr = self.value.get();
*ptr
}
/// Reset the counter to zero
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is accessing this counter simultaneously.
pub unsafe fn reset(&self) {
let ptr = self.value.get();
*ptr = 0;
}
/// Set the counter to a specific value
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is accessing this counter simultaneously.
pub unsafe fn set(&self, value: u64) {
let ptr = self.value.get();
*ptr = value;
}
}
// UnsafeCounter is Send + Sync because the safety is guaranteed by the caller
unsafe impl Send for UnsafeCounter {}
unsafe impl Sync for UnsafeCounter {}
/// MetricData contains both the counter and last update timestamp
/// Uses UnsafeCell for lock-free access
#[derive(Debug)]
struct MetricData {
counter: UnsafeCounter,
last_updated: UnsafeCell<Instant>,
}
impl MetricData {
fn new() -> Self {
Self {
counter: UnsafeCounter::new(),
last_updated: UnsafeCell::new(Instant::now()),
}
}
fn new_with_value(initial: u64) -> Self {
Self {
counter: UnsafeCounter::new_with_value(initial),
last_updated: UnsafeCell::new(Instant::now()),
}
}
/// Update the last_updated timestamp
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is accessing this timestamp simultaneously.
unsafe fn touch(&self) {
let ptr = self.last_updated.get();
*ptr = Instant::now();
}
/// Get the last updated timestamp
/// # Safety
/// This method is unsafe because it uses UnsafeCell. The caller must ensure
/// that no other thread is modifying this timestamp simultaneously.
unsafe fn get_last_updated(&self) -> Instant {
let ptr = self.last_updated.get();
*ptr
}
}
// MetricData is Send + Sync because the safety is guaranteed by the caller
unsafe impl Send for MetricData {}
unsafe impl Sync for MetricData {}
/// MetricKey uniquely identifies a metric with its name and labels
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct MetricKey {
name: MetricName,
labels: LabelSet,
}
impl MetricKey {
fn new(name: MetricName, labels: LabelSet) -> Self {
Self { name, labels }
}
}
impl fmt::Display for MetricKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let label_str = self.labels.to_key();
if label_str.is_empty() {
f.write_str(self.name.to_string().as_str())
} else {
f.write_str(format!("{}[{}]", self.name, label_str).as_str())
}
}
}
/// CounterHandle provides a safe interface to a MetricData
/// It ensures thread-local access patterns for performance
#[derive(Clone)]
pub struct CounterHandle {
metric_data: Arc<MetricData>,
_key: MetricKey, // Keep key for debugging purposes
}
impl CounterHandle {
fn new(metric_data: Arc<MetricData>, key: MetricKey) -> Self {
Self {
metric_data,
_key: key,
}
}
/// Increment the counter by the given amount
pub fn add(&self, delta: u64) {
unsafe {
self.metric_data.counter.add(delta);
self.metric_data.touch();
}
}
/// Increment the counter by 1
pub fn inc(&self) {
unsafe {
self.metric_data.counter.inc();
self.metric_data.touch();
}
}
/// Get the current value of the counter
pub fn get(&self) -> u64 {
unsafe { self.metric_data.counter.get() }
}
/// Reset the counter to zero
pub fn reset(&self) {
unsafe {
self.metric_data.counter.reset();
self.metric_data.touch();
}
}
/// Set the counter to a specific value
pub fn set(&self, value: u64) {
unsafe {
self.metric_data.counter.set(value);
self.metric_data.touch();
}
}
}
/// MetricSnapshot represents a point-in-time view of a metric
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MetricSnapshot {
pub name: MetricName,
pub labels: LabelSet,
pub value: u64,
}
impl MetricSnapshot {
pub fn name_str(&self) -> String {
self.name.to_string()
}
}
/// StatsManager manages global statistics with high performance counters
pub struct StatsManager {
counters: Arc<DashMap<MetricKey, Arc<MetricData>>>,
cleanup_task: ScopedTask<()>,
}
impl StatsManager {
/// Create a new StatsManager
pub fn new() -> Self {
let counters = Arc::new(DashMap::new());
// Start cleanup task only if we're in a tokio runtime
let counters_clone = Arc::downgrade(&counters.clone());
let cleanup_task = tokio::spawn(async move {
let mut interval = interval(Duration::from_secs(60)); // Check every minute
loop {
interval.tick().await;
let cutoff_time = Instant::now() - Duration::from_secs(180); // 3 minutes
let Some(counters) = counters_clone.upgrade() else {
break;
};
// Remove entries that haven't been updated for 3 minutes
counters.retain(|_, metric_data: &mut Arc<MetricData>| unsafe {
metric_data.get_last_updated() > cutoff_time
});
}
});
Self {
counters,
cleanup_task: cleanup_task.into(),
}
}
/// Get or create a counter with the given name and labels
pub fn get_counter(&self, name: MetricName, labels: LabelSet) -> CounterHandle {
let key = MetricKey::new(name, labels);
let metric_data = self
.counters
.entry(key.clone())
.or_insert_with(|| Arc::new(MetricData::new()))
.clone();
CounterHandle::new(metric_data, key)
}
/// Get a counter with no labels
pub fn get_simple_counter(&self, name: MetricName) -> CounterHandle {
self.get_counter(name, LabelSet::new())
}
/// Get all metric snapshots
pub fn get_all_metrics(&self) -> Vec<MetricSnapshot> {
let mut metrics = Vec::new();
for entry in self.counters.iter() {
let key = entry.key();
let metric_data = entry.value();
let value = unsafe { metric_data.counter.get() };
metrics.push(MetricSnapshot {
name: key.name,
labels: key.labels.clone(),
value,
});
}
// Sort by metric name and then by labels for consistent output
metrics.sort_by(|a, b| {
a.name
.to_string()
.cmp(&b.name.to_string())
.then_with(|| a.labels.to_key().cmp(&b.labels.to_key()))
});
metrics
}
/// Get metrics filtered by name prefix
pub fn get_metrics_by_prefix(&self, prefix: &str) -> Vec<MetricSnapshot> {
self.get_all_metrics()
.into_iter()
.filter(|m| m.name.to_string().starts_with(prefix))
.collect()
}
/// Get a specific metric by name and labels
pub fn get_metric(&self, name: MetricName, labels: &LabelSet) -> Option<MetricSnapshot> {
let key = MetricKey::new(name, labels.clone());
if let Some(metric_data) = self.counters.get(&key) {
let value = unsafe { metric_data.counter.get() };
Some(MetricSnapshot {
name,
labels: labels.clone(),
value,
})
} else {
None
}
}
/// Clear all metrics
pub fn clear(&self) {
self.counters.clear();
}
/// Get the number of tracked metrics
pub fn metric_count(&self) -> usize {
self.counters.len()
}
/// Export metrics in Prometheus format
pub fn export_prometheus(&self) -> String {
let metrics = self.get_all_metrics();
let mut output = String::new();
let mut current_metric = String::new();
for metric in metrics {
let metric_name_str = metric.name.to_string();
if metric_name_str != current_metric {
if !current_metric.is_empty() {
output.push('\n');
}
output.push_str(&format!("# TYPE {} counter\n", metric_name_str));
current_metric = metric_name_str.clone();
}
if metric.labels.labels().is_empty() {
output.push_str(&format!("{} {}\n", metric_name_str, metric.value));
} else {
let label_str = metric
.labels
.labels()
.iter()
.map(|l| format!("{}=\"{}\"", l.key, l.value))
.collect::<Vec<_>>()
.join(",");
output.push_str(&format!(
"{}{{{}}} {}\n",
metric_name_str, label_str, metric.value
));
}
}
output
}
}
impl Default for StatsManager {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common::stats_manager::{LabelSet, LabelType, MetricName, StatsManager};
use crate::proto::cli::{
GetPrometheusStatsRequest, GetPrometheusStatsResponse, GetStatsRequest, GetStatsResponse,
};
use std::collections::BTreeMap;
#[tokio::test]
async fn test_label_set() {
let labels = LabelSet::new()
.with_label("peer_id", "peer1")
.with_label("method", "ping");
assert_eq!(labels.to_key(), "method=ping,peer_id=peer1");
}
#[tokio::test]
async fn test_unsafe_counter() {
let counter = UnsafeCounter::new();
unsafe {
assert_eq!(counter.get(), 0);
counter.inc();
assert_eq!(counter.get(), 1);
counter.add(5);
assert_eq!(counter.get(), 6);
counter.set(10);
assert_eq!(counter.get(), 10);
counter.reset();
assert_eq!(counter.get(), 0);
}
}
#[tokio::test]
async fn test_stats_manager() {
let stats = StatsManager::new();
// Test simple counter
let counter1 = stats.get_simple_counter(MetricName::PeerRpcClientTx);
counter1.inc();
counter1.add(5);
// Test counter with labels
let labels = LabelSet::new()
.with_label("peer_id", "peer1")
.with_label("method", "ping");
let counter2 = stats.get_counter(MetricName::PeerRpcClientTx, labels.clone());
counter2.add(3);
// Check metrics
let metrics = stats.get_all_metrics();
assert_eq!(metrics.len(), 2);
// Find the simple counter
let simple_metric = metrics
.iter()
.find(|m| m.labels.labels().is_empty())
.unwrap();
assert_eq!(simple_metric.name, MetricName::PeerRpcClientTx);
assert_eq!(simple_metric.value, 6);
// Find the labeled counter
let labeled_metric = metrics
.iter()
.find(|m| !m.labels.labels().is_empty())
.unwrap();
assert_eq!(labeled_metric.name, MetricName::PeerRpcClientTx);
assert_eq!(labeled_metric.value, 3);
assert_eq!(labeled_metric.labels, labels);
}
#[tokio::test]
async fn test_prometheus_export() {
let stats = StatsManager::new();
let counter1 = stats.get_simple_counter(MetricName::TrafficBytesTx);
counter1.set(100);
let labels = LabelSet::new().with_label("status", "success");
let counter2 = stats.get_counter(MetricName::PeerRpcClientTx, labels);
counter2.set(50);
let prometheus_output = stats.export_prometheus();
assert!(prometheus_output.contains("# TYPE peer_rpc_client_tx counter"));
assert!(prometheus_output.contains("peer_rpc_client_tx{status=\"success\"} 50"));
assert!(prometheus_output.contains("# TYPE traffic_bytes_tx counter"));
assert!(prometheus_output.contains("traffic_bytes_tx 100"));
}
#[tokio::test]
async fn test_get_metric() {
let stats = StatsManager::new();
let labels = LabelSet::new().with_label("peer", "test");
let counter = stats.get_counter(MetricName::PeerRpcClientTx, labels.clone());
counter.set(42);
let metric = stats
.get_metric(MetricName::PeerRpcClientTx, &labels)
.unwrap();
assert_eq!(metric.value, 42);
let non_existent = stats.get_metric(MetricName::PeerRpcErrors, &LabelSet::new());
assert!(non_existent.is_none());
}
#[tokio::test]
async fn test_metrics_by_prefix() {
let stats = StatsManager::new();
stats
.get_simple_counter(MetricName::PeerRpcClientTx)
.set(10);
stats.get_simple_counter(MetricName::PeerRpcErrors).set(2);
stats
.get_simple_counter(MetricName::TrafficBytesTx)
.set(100);
let rpc_metrics = stats.get_metrics_by_prefix("peer_rpc");
assert_eq!(rpc_metrics.len(), 2);
let traffic_metrics = stats.get_metrics_by_prefix("traffic_");
assert_eq!(traffic_metrics.len(), 1);
}
#[tokio::test]
async fn test_cleanup_mechanism() {
let stats = StatsManager::new();
// 创建一些计数器
let counter1 = stats.get_simple_counter(MetricName::PeerRpcClientTx);
counter1.set(10);
let labels = LabelSet::new().with_label("test", "value");
let counter2 = stats.get_counter(MetricName::TrafficBytesTx, labels);
counter2.set(20);
// 验证计数器存在
assert_eq!(stats.metric_count(), 2);
// 注意实际的清理测试需要等待3分钟这在单元测试中不现实
// 这里我们只验证清理机制的基本结构是否正确
// 清理逻辑在后台线程中运行会自动删除超过3分钟未更新的条目
// 验证计数器仍然可以正常工作
counter1.inc();
assert_eq!(counter1.get(), 11);
counter2.add(5);
assert_eq!(counter2.get(), 25);
}
#[tokio::test]
async fn test_stats_rpc_data_structures() {
// Test GetStatsRequest
let request = GetStatsRequest {};
assert_eq!(request, GetStatsRequest {});
// Test GetStatsResponse
let response = GetStatsResponse { metrics: vec![] };
assert!(response.metrics.is_empty());
// Test GetPrometheusStatsRequest
let prometheus_request = GetPrometheusStatsRequest {};
assert_eq!(prometheus_request, GetPrometheusStatsRequest {});
// Test GetPrometheusStatsResponse
let prometheus_response = GetPrometheusStatsResponse {
prometheus_text: "# Test metrics\n".to_string(),
};
assert_eq!(prometheus_response.prometheus_text, "# Test metrics\n");
}
#[tokio::test]
async fn test_metric_snapshot_creation() {
let stats_manager = StatsManager::new();
// Create some test metrics
let counter1 = stats_manager.get_counter(
MetricName::PeerRpcClientTx,
LabelSet::new()
.with_label_type(LabelType::SrcPeerId(123))
.with_label_type(LabelType::ServiceName("test_service".to_string())),
);
counter1.add(100);
let counter2 = stats_manager.get_counter(
MetricName::TrafficBytesTx,
LabelSet::new().with_label_type(LabelType::Protocol("tcp".to_string())),
);
counter2.add(1024);
// Get all metrics
let metrics = stats_manager.get_all_metrics();
assert_eq!(metrics.len(), 2);
// Verify the metrics can be converted to the format expected by RPC
for metric in metrics {
let mut labels = BTreeMap::new();
for label in metric.labels.labels() {
labels.insert(label.key.clone(), label.value.clone());
}
// This simulates what the RPC service would do
let _metric_snapshot = crate::proto::cli::MetricSnapshot {
name: metric.name.to_string(),
value: metric.value,
labels,
};
}
}
#[tokio::test]
async fn test_prometheus_export_format() {
let stats_manager = StatsManager::new();
// Create test metrics
let counter = stats_manager.get_counter(
MetricName::PeerRpcClientTx,
LabelSet::new()
.with_label_type(LabelType::SrcPeerId(123))
.with_label_type(LabelType::ServiceName("test".to_string())),
);
counter.add(42);
// Export to Prometheus format
let prometheus_text = stats_manager.export_prometheus();
println!("{}", prometheus_text);
// Verify the format
assert!(prometheus_text.contains("peer_rpc_client_tx"));
assert!(prometheus_text.contains("42"));
assert!(prometheus_text.contains("src_peer_id=\"123\""));
assert!(prometheus_text.contains("service_name=\"test\""));
}
}

View File

@@ -282,7 +282,9 @@ impl StunClient {
.with_context(|| "encode stun message")?;
tids.push(tid);
tracing::trace!(?message, ?msg, tid, "send stun request");
self.socket.send_to(msg.as_slice(), &stun_host).await?;
self.socket
.send_to(msg.as_slice().into(), &stun_host)
.await?;
}
let now = Instant::now();
@@ -370,7 +372,7 @@ impl StunClientBuilder {
pub async fn stop(&mut self) {
self.task_set.abort_all();
while self.task_set.join_next().await.is_some() {}
while let Some(_) = self.task_set.join_next().await {}
}
}
@@ -415,7 +417,7 @@ impl UdpNatTypeDetectResult {
return true;
}
}
false
return false;
}
fn is_pat(&self) -> bool {
@@ -455,16 +457,16 @@ impl UdpNatTypeDetectResult {
if self.is_cone() {
if self.has_ip_changed_resp() {
if self.is_open_internet() {
NatType::OpenInternet
return NatType::OpenInternet;
} else if self.is_pat() {
NatType::NoPat
return NatType::NoPat;
} else {
NatType::FullCone
return NatType::FullCone;
}
} else if self.has_port_changed_resp() {
NatType::Restricted
return NatType::Restricted;
} else {
NatType::PortRestricted
return NatType::PortRestricted;
}
} else if !self.stun_resps.is_empty() {
if self.public_ips().len() != 1
@@ -478,7 +480,7 @@ impl UdpNatTypeDetectResult {
.mapped_socket_addr
.is_none()
{
NatType::Symmetric
return NatType::Symmetric;
} else {
let extra_bind_test = self.extra_bind_test.as_ref().unwrap();
let extra_port = extra_bind_test.mapped_socket_addr.unwrap().port();
@@ -486,15 +488,15 @@ impl UdpNatTypeDetectResult {
let max_port_diff = extra_port.saturating_sub(self.max_port());
let min_port_diff = self.min_port().saturating_sub(extra_port);
if max_port_diff != 0 && max_port_diff < 100 {
NatType::SymmetricEasyInc
return NatType::SymmetricEasyInc;
} else if min_port_diff != 0 && min_port_diff < 100 {
NatType::SymmetricEasyDec
return NatType::SymmetricEasyDec;
} else {
NatType::Symmetric
return NatType::Symmetric;
}
}
} else {
NatType::Unknown
return NatType::Unknown;
}
}
@@ -677,7 +679,7 @@ impl StunInfoCollectorTrait for StunInfoCollector {
.unwrap()
.clone()
.map(|x| x.collect_available_stun_server())
.unwrap_or_default();
.unwrap_or(vec![]);
if stun_servers.is_empty() {
let mut host_resolver =
@@ -738,7 +740,7 @@ impl StunInfoCollector {
pub fn get_default_servers() -> Vec<String> {
// NOTICE: we may need to choose stun stun server based on geo location
// stun server cross nation may return a external ip address with high latency and loss rate
[
vec![
"txt:stun.easytier.cn",
"stun.miwifi.com",
"stun.chat.bilibili.com",
@@ -750,16 +752,16 @@ impl StunInfoCollector {
}
pub fn get_default_servers_v6() -> Vec<String> {
["txt:stun-v6.easytier.cn"]
vec!["txt:stun-v6.easytier.cn"]
.iter()
.map(|x| x.to_string())
.collect()
}
async fn get_public_ipv6(servers: &[String]) -> Option<Ipv6Addr> {
async fn get_public_ipv6(servers: &Vec<String>) -> Option<Ipv6Addr> {
let mut ips = HostResolverIter::new(servers.to_vec(), 10, true);
while let Some(ip) = ips.next().await {
let Ok(udp_socket) = UdpSocket::bind("[::]:0".to_string()).await else {
let Ok(udp_socket) = UdpSocket::bind(format!("[::]:0")).await else {
break;
};
let udp = Arc::new(udp_socket);
@@ -768,9 +770,12 @@ impl StunInfoCollector {
.bind_request(false, false)
.await;
tracing::debug!(?ret, "finish ipv6 udp nat type detect");
if let Ok(Some(IpAddr::V6(v6))) = ret.map(|x| x.mapped_socket_addr.map(|x| x.ip())) {
match ret.map(|x| x.mapped_socket_addr.map(|x| x.ip())) {
Ok(Some(IpAddr::V6(v6))) => {
return Some(v6);
}
_ => {}
}
}
None
}
@@ -849,9 +854,9 @@ impl StunInfoCollector {
self.tasks.lock().unwrap().spawn(async move {
loop {
let servers = stun_servers.read().unwrap().clone();
if let Some(x) = Self::get_public_ipv6(&servers).await {
stored_ipv6.store(Some(x))
}
Self::get_public_ipv6(&servers)
.await
.map(|x| stored_ipv6.store(Some(x)));
let sleep_sec = if stored_ipv6.load().is_none() {
60

View File

@@ -34,7 +34,7 @@ impl From<LimiterConfig> for BucketConfig {
.unwrap_or(Duration::from_millis(10));
BucketConfig {
capacity: burst_rate * fill_rate,
fill_rate,
fill_rate: fill_rate,
refill_interval,
}
}
@@ -162,12 +162,6 @@ pub struct TokenBucketManager {
retain_task: ScopedTask<()>,
}
impl Default for TokenBucketManager {
fn default() -> Self {
Self::new()
}
}
impl TokenBucketManager {
/// Creates a new TokenBucketManager
pub fn new() -> Self {
@@ -324,7 +318,7 @@ mod tests {
// Should have accumulated about 100 tokens (10,000 tokens/s * 0.001s)
let tokens = bucket.available_tokens.load(Ordering::Relaxed);
assert!(
(100..=200).contains(&tokens),
tokens >= 100 && tokens <= 200,
"Unexpected token count: {}",
tokens
);
@@ -361,7 +355,8 @@ mod tests {
.list_foreign_networks()
.await
.foreign_networks
.is_empty()
.len()
== 0
},
Duration::from_secs(5),
)
@@ -375,7 +370,8 @@ mod tests {
.get_global_ctx()
.token_bucket_manager()
.buckets
.is_empty()
.len()
== 0
},
Duration::from_secs(10),
)

View File

@@ -16,7 +16,6 @@ use crate::{
dns::socket_addrs, error::Error, global_ctx::ArcGlobalCtx, stun::StunInfoCollectorTrait,
PeerId,
},
connector::udp_hole_punch::handle_rpc_result,
peers::{
peer_conn::PeerConnId,
peer_manager::PeerManager,
@@ -92,7 +91,6 @@ struct DirectConnectorManagerData {
global_ctx: ArcGlobalCtx,
peer_manager: Arc<PeerManager>,
dst_listener_blacklist: timedmap::TimedMap<DstListenerUrlBlackListItem, ()>,
peer_black_list: timedmap::TimedMap<PeerId, ()>,
}
impl DirectConnectorManagerData {
@@ -101,7 +99,6 @@ impl DirectConnectorManagerData {
global_ctx,
peer_manager,
dst_listener_blacklist: timedmap::TimedMap::new(),
peer_black_list: timedmap::TimedMap::new(),
}
}
@@ -180,12 +177,15 @@ impl DirectConnectorManagerData {
// ask remote to send v6 hole punch packet
// and no matter what the result is, continue to connect
let _ = self
.remote_send_v6_hole_punch_packet(dst_peer_id, &local_socket, remote_url)
.remote_send_v6_hole_punch_packet(dst_peer_id, &local_socket, &remote_url)
.await;
let udp_connector = UdpTunnelConnector::new(remote_url.clone());
let remote_addr =
super::check_scheme_and_get_socket_addr::<SocketAddr>(remote_url, "udp", IpVersion::V6)
let remote_addr = super::check_scheme_and_get_socket_addr::<SocketAddr>(
&remote_url,
"udp",
IpVersion::V6,
)
.await?;
let ret = udp_connector
.try_connect_with_socket(local_socket, remote_addr)
@@ -230,8 +230,8 @@ impl DirectConnectorManagerData {
dst_peer_id: PeerId,
addr: String,
) -> Result<(), Error> {
let mut rand_gen = rand::rngs::OsRng;
let backoff_ms = [1000, 2000, 4000];
let mut rand_gen = rand::rngs::OsRng::default();
let backoff_ms = vec![1000, 2000, 4000];
let mut backoff_idx = 0;
tracing::debug!(?dst_peer_id, ?addr, "try_connect_to_ip start");
@@ -240,7 +240,10 @@ impl DirectConnectorManagerData {
if self
.dst_listener_blacklist
.contains(&DstListenerUrlBlackListItem(dst_peer_id, addr.clone()))
.contains(&DstListenerUrlBlackListItem(
dst_peer_id.clone(),
addr.clone(),
))
{
return Err(Error::UrlInBlacklist);
}
@@ -275,7 +278,7 @@ impl DirectConnectorManagerData {
continue;
} else {
self.dst_listener_blacklist.insert(
DstListenerUrlBlackListItem(dst_peer_id, addr),
DstListenerUrlBlackListItem(dst_peer_id.clone(), addr),
(),
std::time::Duration::from_secs(DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC),
);
@@ -309,7 +312,7 @@ impl DirectConnectorManagerData {
if addr.set_host(Some(ip.to_string().as_str())).is_ok() {
tasks.spawn(Self::try_connect_to_ip(
self.clone(),
dst_peer_id,
dst_peer_id.clone(),
addr.to_string(),
));
} else {
@@ -324,7 +327,7 @@ impl DirectConnectorManagerData {
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
tasks.spawn(Self::try_connect_to_ip(
self.clone(),
dst_peer_id,
dst_peer_id.clone(),
listener.to_string(),
));
}
@@ -349,10 +352,13 @@ impl DirectConnectorManagerData {
.iter()
.for_each(|ip| {
let mut addr = (*listener).clone();
if addr.set_host(Some(format!("[{}]", ip).as_str())).is_ok() {
if addr
.set_host(Some(format!("[{}]", ip.to_string()).as_str()))
.is_ok()
{
tasks.spawn(Self::try_connect_to_ip(
self.clone(),
dst_peer_id,
dst_peer_id.clone(),
addr.to_string(),
));
} else {
@@ -367,7 +373,7 @@ impl DirectConnectorManagerData {
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
tasks.spawn(Self::try_connect_to_ip(
self.clone(),
dst_peer_id,
dst_peer_id.clone(),
listener.to_string(),
));
}
@@ -427,7 +433,12 @@ impl DirectConnectorManagerData {
}
tracing::debug!("try direct connect to peer with listener: {}", listener);
self.spawn_direct_connect_task(dst_peer_id, &ip_list, listener, &mut tasks)
self.spawn_direct_connect_task(
dst_peer_id.clone(),
&ip_list,
&listener,
&mut tasks,
)
.await;
listener_list.push(listener.clone().to_string());
@@ -462,17 +473,7 @@ impl DirectConnectorManagerData {
) -> Result<(), Error> {
let mut backoff =
udp_hole_punch::BackOff::new(vec![1000, 2000, 2000, 5000, 5000, 10000, 30000, 60000]);
let mut attempt = 0;
loop {
if self.peer_black_list.contains(&dst_peer_id) {
return Err(anyhow::anyhow!("peer {} is blacklisted", dst_peer_id).into());
}
if attempt > 0 {
tokio::time::sleep(Duration::from_millis(backoff.next_backoff())).await;
}
attempt += 1;
let peer_manager = self.peer_manager.clone();
tracing::debug!("try direct connect to peer: {}", dst_peer_id);
@@ -485,11 +486,17 @@ impl DirectConnectorManagerData {
self.global_ctx.get_network_name(),
);
let ip_list = rpc_stub
let ip_list = match rpc_stub
.get_ip_list(BaseController::default(), GetIpListRequest {})
.await;
let ip_list = handle_rpc_result(ip_list, dst_peer_id, &self.peer_black_list)
.with_context(|| format!("get ip list from peer {}", dst_peer_id))?;
.await
.with_context(|| format!("get ip list from peer {}", dst_peer_id))
{
Ok(ip_list) => ip_list,
Err(e) => {
tracing::error!(?e, "failed to get ip list from peer");
continue;
}
};
tracing::info!(ip_list = ?ip_list, dst_peer_id = ?dst_peer_id, "got ip list");
@@ -505,6 +512,8 @@ impl DirectConnectorManagerData {
);
return Ok(());
}
tokio::time::sleep(Duration::from_millis(backoff.next_backoff())).await;
}
}
}
@@ -538,16 +547,13 @@ impl PeerTaskLauncher for DirectConnectorLauncher {
}
async fn collect_peers_need_task(&self, data: &Self::Data) -> Vec<Self::CollectPeerItem> {
data.peer_black_list.cleanup();
let my_peer_id = data.peer_manager.my_peer_id();
data.peer_manager
.list_peers()
.await
.into_iter()
.filter(|peer_id| {
*peer_id != my_peer_id
&& !data.peer_manager.has_directly_connected_conn(*peer_id)
&& !data.peer_black_list.contains(peer_id)
*peer_id != my_peer_id && !data.peer_manager.has_directly_connected_conn(*peer_id)
})
.collect()
}

View File

@@ -124,11 +124,11 @@ impl DNSTunnelConnector {
let responses = responses.clone();
async move {
let response = resolver.srv_lookup(srv_domain).await.with_context(|| {
format!("srv_lookup failed, srv_domain: {}", srv_domain)
format!("srv_lookup failed, srv_domain: {}", srv_domain.to_string())
})?;
tracing::info!(?response, ?srv_domain, "srv_lookup response");
for record in response.iter() {
let parsed_record = Self::handle_one_srv_record(record, protocol);
let parsed_record = Self::handle_one_srv_record(record, &protocol);
tracing::info!(?parsed_record, ?srv_domain, "parsed_record");
if parsed_record.is_err() {
eprintln!(
@@ -153,7 +153,8 @@ impl DNSTunnelConnector {
let url = weighted_choice(srv_records.as_slice()).with_context(|| {
format!(
"failed to choose a srv record, domain_name: {}, srv_records: {:?}",
domain_name, srv_records
domain_name.to_string(),
srv_records
)
})?;

View File

@@ -93,7 +93,7 @@ impl HttpTunnelConnector {
tracing::info!("try to create connector by url: {}", query[0]);
self.redirect_type = HttpRedirectType::RedirectToQuery;
return create_connector_by_url(
query[0].as_ref(),
&query[0].to_string(),
&self.global_ctx,
self.ip_version,
)
@@ -193,7 +193,7 @@ impl HttpTunnelConnector {
.ok_or_else(|| Error::InvalidUrl("no redirect address found".to_string()))?;
let new_url = url::Url::parse(redirect_url.as_str())
.with_context(|| format!("parsing redirect url failed. url: {}", redirect_url))?;
return self.handle_302_redirect(new_url, redirect_url).await;
return self.handle_302_redirect(new_url, &redirect_url).await;
} else if res.status_code().is_success() {
return self.handle_200_success(&body).await;
} else {

View File

@@ -4,11 +4,11 @@ use std::{
};
use anyhow::Context;
use dashmap::DashSet;
use dashmap::{DashMap, DashSet};
use tokio::{
sync::{
broadcast::{error::RecvError, Receiver},
mpsc,
mpsc, Mutex,
},
task::JoinSet,
time::timeout,
@@ -32,6 +32,7 @@ use crate::{
global_ctx::{ArcGlobalCtx, GlobalCtxEvent},
netns::NetNS,
},
connector::set_bind_addr_for_peer_connector,
peers::peer_manager::PeerManager,
proto::cli::{
Connector, ConnectorManageRpc, ConnectorStatus, ListConnectorRequest,
@@ -42,7 +43,8 @@ use crate::{
use super::create_connector_by_url;
type ConnectorMap = Arc<DashSet<String>>;
type MutexConnector = Arc<Mutex<Box<dyn TunnelConnector>>>;
type ConnectorMap = Arc<DashMap<String, MutexConnector>>;
#[derive(Debug, Clone)]
struct ReconnResult {
@@ -70,7 +72,7 @@ pub struct ManualConnectorManager {
impl ManualConnectorManager {
pub fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Self {
let connectors = Arc::new(DashSet::new());
let connectors = Arc::new(DashMap::new());
let tasks = JoinSet::new();
let event_subscriber = global_ctx.subscribe();
@@ -103,11 +105,14 @@ impl ManualConnectorManager {
T: TunnelConnector + 'static,
{
tracing::info!("add_connector: {}", connector.remote_url());
self.data.connectors.insert(connector.remote_url().into());
self.data.connectors.insert(
connector.remote_url().into(),
Arc::new(Mutex::new(Box::new(connector))),
);
}
pub async fn add_connector_by_url(&self, url: &str) -> Result<(), Error> {
self.data.connectors.insert(url.to_owned());
self.add_connector(create_connector_by_url(url, &self.global_ctx, IpVersion::Both).await?);
Ok(())
}
@@ -131,7 +136,7 @@ impl ManualConnectorManager {
.data
.connectors
.iter()
.map(|x| x.key().clone())
.map(|x| x.key().clone().into())
.collect();
let dead_urls: BTreeSet<String> = Self::collect_dead_conns(self.data.clone())
@@ -155,8 +160,12 @@ impl ManualConnectorManager {
);
}
let reconnecting_urls: BTreeSet<String> =
self.data.reconnecting.iter().map(|x| x.clone()).collect();
let reconnecting_urls: BTreeSet<String> = self
.data
.reconnecting
.iter()
.map(|x| x.clone().into())
.collect();
for conn_url in reconnecting_urls {
ret.insert(
@@ -227,16 +236,16 @@ impl ManualConnectorManager {
for dead_url in dead_urls {
let data_clone = data.clone();
let sender = reconn_result_send.clone();
data.connectors.remove(&dead_url).unwrap();
let (_, connector) = data.connectors.remove(&dead_url).unwrap();
let insert_succ = data.reconnecting.insert(dead_url.clone());
assert!(insert_succ);
tasks.lock().unwrap().spawn(async move {
let reconn_ret = Self::conn_reconnect(data_clone.clone(), dead_url.clone() ).await;
let reconn_ret = Self::conn_reconnect(data_clone.clone(), dead_url.clone(), connector.clone()).await;
let _ = sender.send(reconn_ret).await;
data_clone.reconnecting.remove(&dead_url).unwrap();
data_clone.connectors.insert(dead_url.clone());
data_clone.connectors.insert(dead_url.clone(), connector);
});
}
tracing::info!("reconn_interval tick, done");
@@ -278,7 +287,7 @@ impl ManualConnectorManager {
let remove_later = DashSet::new();
for it in data.removed_conn_urls.iter() {
let url = it.key();
if data.connectors.remove(url).is_some() {
if let Some(_) = data.connectors.remove(url) {
tracing::warn!("connector: {}, removed", url);
continue;
} else if data.reconnecting.contains(url) {
@@ -297,7 +306,11 @@ impl ManualConnectorManager {
async fn collect_dead_conns(data: Arc<ConnectorManagerData>) -> BTreeSet<String> {
Self::handle_remove_connector(data.clone());
let all_urls: BTreeSet<String> = data.connectors.iter().map(|x| x.key().clone()).collect();
let all_urls: BTreeSet<String> = data
.connectors
.iter()
.map(|x| x.key().clone().into())
.collect();
let mut ret = BTreeSet::new();
for url in all_urls.iter() {
if !data.alive_conn_urls.contains(url) {
@@ -310,13 +323,25 @@ impl ManualConnectorManager {
async fn conn_reconnect_with_ip_version(
data: Arc<ConnectorManagerData>,
dead_url: String,
connector: MutexConnector,
ip_version: IpVersion,
) -> Result<ReconnResult, Error> {
let connector =
create_connector_by_url(&dead_url, &data.global_ctx.clone(), ip_version).await?;
let ip_collector = data.global_ctx.get_ip_collector();
data.global_ctx
.issue_event(GlobalCtxEvent::Connecting(connector.remote_url().clone()));
connector.lock().await.set_ip_version(ip_version);
if data.global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
connector.lock().await.as_mut(),
ip_version == IpVersion::V4,
&ip_collector,
)
.await;
}
data.global_ctx.issue_event(GlobalCtxEvent::Connecting(
connector.lock().await.remote_url().clone(),
));
tracing::info!("reconnect try connect... conn: {:?}", connector);
let Some(pm) = data.peer_manager.upgrade() else {
return Err(Error::AnyhowError(anyhow::anyhow!(
@@ -324,7 +349,9 @@ impl ManualConnectorManager {
)));
};
let (peer_id, conn_id) = pm.try_direct_connect(connector).await?;
let (peer_id, conn_id) = pm
.try_direct_connect(connector.lock().await.as_mut())
.await?;
tracing::info!("reconnect succ: {} {} {}", peer_id, conn_id, dead_url);
Ok(ReconnResult {
dead_url,
@@ -336,6 +363,7 @@ impl ManualConnectorManager {
async fn conn_reconnect(
data: Arc<ConnectorManagerData>,
dead_url: String,
connector: MutexConnector,
) -> Result<ReconnResult, Error> {
tracing::info!("reconnect: {}", dead_url);
@@ -387,34 +415,32 @@ impl ManualConnectorManager {
let ret = timeout(
// allow http connector to wait longer
std::time::Duration::from_secs(if use_long_timeout { 20 } else { 2 }),
Self::conn_reconnect_with_ip_version(data.clone(), dead_url.clone(), ip_version),
Self::conn_reconnect_with_ip_version(
data.clone(),
dead_url.clone(),
connector.clone(),
ip_version,
),
)
.await;
tracing::info!("reconnect: {} done, ret: {:?}", dead_url, ret);
match ret {
Ok(Ok(_)) => {
// 外层和内层都成功:解包并跳出
if ret.is_ok() && ret.as_ref().unwrap().is_ok() {
reconn_ret = ret.unwrap();
break;
} else {
if ret.is_err() {
reconn_ret = Err(ret.unwrap_err().into());
} else if ret.as_ref().unwrap().is_err() {
reconn_ret = Err(ret.unwrap().unwrap_err());
}
Ok(Err(e)) => {
// 外层成功,内层失败
reconn_ret = Err(e);
}
Err(e) => {
// 外层失败
reconn_ret = Err(e.into());
}
}
// 发送事件(只有在未 break 时才执行)
data.global_ctx.issue_event(GlobalCtxEvent::ConnectError(
dead_url.clone(),
format!("{:?}", ip_version),
format!("{:?}", reconn_ret),
));
}
}
reconn_ret
}

View File

@@ -260,7 +260,7 @@ impl PunchBothEasySymHoleClient {
)
.await;
let remote_ret = handle_rpc_result(remote_ret, dst_peer_id, &self.blacklist)?;
let remote_ret = handle_rpc_result(remote_ret, dst_peer_id, self.blacklist.clone())?;
if remote_ret.is_busy {
*is_busy = true;
@@ -314,11 +314,7 @@ impl PunchBothEasySymHoleClient {
);
for _ in 0..2 {
match try_connect_with_socket(
global_ctx.clone(),
socket.socket.clone(),
remote_mapped_addr.into(),
)
match try_connect_with_socket(socket.socket.clone(), remote_mapped_addr.into())
.await
{
Ok(tunnel) => {
@@ -389,7 +385,7 @@ pub mod tests {
let udp1 = Arc::new(UdpSocket::bind("0.0.0.0:40164").await.unwrap());
// 144 - DST_PORT_OFFSET = 124
let udp2 = Arc::new(UdpSocket::bind("0.0.0.0:40124").await.unwrap());
let udps = [udp1, udp2];
let udps = vec![udp1, udp2];
let counter = Arc::new(AtomicU32::new(0));

View File

@@ -1,5 +1,5 @@
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4},
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
sync::Arc,
time::Duration,
};
@@ -67,9 +67,9 @@ impl From<NatType> for UdpNatType {
}
}
impl From<UdpNatType> for NatType {
fn from(val: UdpNatType) -> Self {
match val {
impl Into<NatType> for UdpNatType {
fn into(self) -> NatType {
match self {
UdpNatType::Unknown => NatType::Unknown,
UdpNatType::Open(nat_type) => nat_type,
UdpNatType::Cone(nat_type) => nat_type,
@@ -249,7 +249,7 @@ impl UdpSocketArray {
tracing::info!(?addr, ?tid, "got hole punching packet with intreast tid");
tid_to_socket
.entry(tid)
.or_default()
.or_insert_with(Vec::new)
.push(PunchedUdpSocket {
socket: socket.clone(),
tid,
@@ -556,7 +556,7 @@ impl PunchHoleServerCommon {
#[tracing::instrument(err, ret(level=Level::DEBUG), skip(ports))]
pub(crate) async fn send_symmetric_hole_punch_packet(
ports: &[u16],
ports: &Vec<u16>,
udp: Arc<UdpSocket>,
transaction_id: u32,
public_ips: &Vec<Ipv4Addr>,
@@ -582,33 +582,7 @@ pub(crate) async fn send_symmetric_hole_punch_packet(
Ok(cur_port_idx % ports.len())
}
async fn check_udp_socket_local_addr(
global_ctx: ArcGlobalCtx,
remote_mapped_addr: SocketAddr,
) -> Result<(), Error> {
let socket = UdpSocket::bind("0.0.0.0:0").await?;
socket.connect(remote_mapped_addr).await?;
if let Ok(local_addr) = socket.local_addr() {
// local_addr should not be equal to virtual ipv4 or virtual ipv6
match local_addr.ip() {
IpAddr::V4(ip) => {
if global_ctx.get_ipv4().map(|ip| ip.address()) == Some(ip) {
return Err(anyhow::anyhow!("local address is virtual ipv4").into());
}
}
IpAddr::V6(ip) => {
if global_ctx.get_ipv6().map(|ip| ip.address()) == Some(ip) {
return Err(anyhow::anyhow!("local address is virtual ipv6").into());
}
}
}
}
Ok(())
}
pub(crate) async fn try_connect_with_socket(
global_ctx: ArcGlobalCtx,
socket: Arc<UdpSocket>,
remote_mapped_addr: SocketAddr,
) -> Result<Box<dyn Tunnel>, Error> {
@@ -622,11 +596,8 @@ pub(crate) async fn try_connect_with_socket(
.parse()
.unwrap(),
);
check_udp_socket_local_addr(global_ctx, remote_mapped_addr).await?;
connector
.try_connect_with_socket(socket, remote_mapped_addr)
.await
.map_err(Error::from)
.map_err(|e| Error::from(e))
}

View File

@@ -154,7 +154,7 @@ impl PunchConeHoleClient {
)
.await;
let resp = handle_rpc_result(resp, dst_peer_id, &self.blacklist)?;
let resp = handle_rpc_result(resp, dst_peer_id, self.blacklist.clone())?;
let remote_mapped_addr = resp.listener_mapped_addr.ok_or(anyhow::anyhow!(
"select_punch_listener response missing listener_mapped_addr"
@@ -172,7 +172,7 @@ impl PunchConeHoleClient {
udp_array
.send_with_all(
&new_hole_punch_packet(tid, HOLE_PUNCH_PACKET_BODY_LEN).into_bytes(),
remote_mapped_addr.into(),
remote_mapped_addr.clone().into(),
)
.await
.with_context(|| "failed to send hole punch packet from local")
@@ -188,7 +188,7 @@ impl PunchConeHoleClient {
..Default::default()
},
SendPunchPacketConeRequest {
listener_mapped_addr: Some(remote_mapped_addr),
listener_mapped_addr: Some(remote_mapped_addr.into()),
dest_addr: Some(local_mapped_addr.into()),
transaction_id: tid,
packet_count_per_batch: 2,
@@ -223,11 +223,7 @@ impl PunchConeHoleClient {
tracing::debug!(?socket, ?tid, "punched socket found, try connect with it");
for _ in 0..2 {
match try_connect_with_socket(
global_ctx.clone(),
socket.socket.clone(),
remote_mapped_addr.into(),
)
match try_connect_with_socket(socket.socket.clone(), remote_mapped_addr.into())
.await
{
Ok(tunnel) => {

View File

@@ -39,7 +39,7 @@ pub(crate) mod cone;
pub(crate) mod sym_to_cone;
// sym punch should be serialized
static SYM_PUNCH_LOCK: Lazy<DashMap<PeerId, Arc<Mutex<()>>>> = Lazy::new(DashMap::new);
static SYM_PUNCH_LOCK: Lazy<DashMap<PeerId, Arc<Mutex<()>>>> = Lazy::new(|| DashMap::new());
pub static RUN_TESTING: Lazy<AtomicBool> = Lazy::new(|| AtomicBool::new(false));
// Blacklist timeout in seconds
@@ -183,7 +183,7 @@ impl BackOff {
pub fn handle_rpc_result<T>(
ret: Result<T, rpc_types::error::Error>,
dst_peer_id: PeerId,
blacklist: &timedmap::TimedMap<PeerId, ()>,
blacklist: Arc<timedmap::TimedMap<PeerId, ()>>,
) -> Result<T, rpc_types::error::Error> {
match ret {
Ok(ret) => Ok(ret),
@@ -223,7 +223,7 @@ impl UdpHoePunchConnectorData {
#[tracing::instrument(skip(self))]
async fn handle_punch_result(
&self,
self: &Self,
ret: Result<Option<Box<dyn Tunnel>>, Error>,
backoff: Option<&mut BackOff>,
round: Option<&mut u32>,
@@ -236,9 +236,11 @@ impl UdpHoePunchConnectorData {
if let Some(round) = round {
*round = round.saturating_sub(1);
}
} else if let Some(round) = round {
} else {
if let Some(round) = round {
*round += 1;
}
}
};
match ret {
@@ -462,7 +464,7 @@ impl PeerTaskLauncher for UdpHolePunchPeerTaskLauncher {
}
let conns = data.peer_mgr.list_peer_conns(peer_id).await;
if conns.is_some() && !conns.unwrap().is_empty() {
if conns.is_some() && conns.unwrap().len() > 0 {
continue;
}

View File

@@ -14,15 +14,11 @@ use tokio::{net::UdpSocket, sync::RwLock};
use tracing::Level;
use crate::{
common::{
global_ctx::ArcGlobalCtx, scoped_task::ScopedTask, stun::StunInfoCollectorTrait, PeerId,
},
connector::udp_hole_punch::{
common::{
common::{scoped_task::ScopedTask, stun::StunInfoCollectorTrait, PeerId},
connector::udp_hole_punch::common::{
send_symmetric_hole_punch_packet, try_connect_with_socket, HOLE_PUNCH_PACKET_BODY_LEN,
},
handle_rpc_result,
},
connector::udp_hole_punch::handle_rpc_result,
defer,
peers::peer_manager::PeerManager,
proto::{
@@ -80,9 +76,9 @@ impl PunchSymToConeHoleServer {
let public_ips = request
.public_ips
.into_iter()
.map(std::net::Ipv4Addr::from)
.map(|ip| std::net::Ipv4Addr::from(ip))
.collect::<Vec<_>>();
if public_ips.is_empty() {
if public_ips.len() == 0 {
tracing::warn!("send_punch_packet_easy_sym got zero len public ip");
return Err(
anyhow::anyhow!("send_punch_packet_easy_sym got zero len public ip").into(),
@@ -158,9 +154,9 @@ impl PunchSymToConeHoleServer {
let public_ips = request
.public_ips
.into_iter()
.map(std::net::Ipv4Addr::from)
.map(|ip| std::net::Ipv4Addr::from(ip))
.collect::<Vec<_>>();
if public_ips.is_empty() {
if public_ips.len() == 0 {
tracing::warn!("try_punch_symmetric got zero len public ip");
return Err(anyhow::anyhow!("try_punch_symmetric got zero len public ip").into());
}
@@ -281,7 +277,7 @@ impl PunchSymToConeHoleClient {
return;
};
let req = SendPunchPacketEasySymRequest {
listener_mapped_addr: remote_mapped_addr.into(),
listener_mapped_addr: remote_mapped_addr.clone().into(),
public_ips: public_ips.clone().into_iter().map(|x| x.into()).collect(),
transaction_id: tid,
base_port_num: base_port_for_easy_sym.unwrap() as u32,
@@ -313,7 +309,7 @@ impl PunchSymToConeHoleClient {
port_index: u32,
) -> Option<u32> {
let req = SendPunchPacketHardSymRequest {
listener_mapped_addr: remote_mapped_addr.into(),
listener_mapped_addr: remote_mapped_addr.clone().into(),
public_ips: public_ips.clone().into_iter().map(|x| x.into()).collect(),
transaction_id: tid,
round,
@@ -333,16 +329,16 @@ impl PunchSymToConeHoleClient {
{
Err(e) => {
tracing::error!(?e, "failed to send punch packet for hard sym");
None
return None;
}
Ok(resp) => Some(resp.next_port_index),
Ok(resp) => return Some(resp.next_port_index),
}
}
async fn get_rpc_stub(
&self,
dst_peer_id: PeerId,
) -> Box<dyn UdpHolePunchRpc<Controller = BaseController> + std::marker::Send + 'static> {
) -> Box<(dyn UdpHolePunchRpc<Controller = BaseController> + std::marker::Send + 'static)> {
self.peer_mgr
.get_peer_rpc_mgr()
.rpc_client()
@@ -354,7 +350,6 @@ impl PunchSymToConeHoleClient {
}
async fn check_hole_punch_result<T>(
global_ctx: ArcGlobalCtx,
udp_array: &Arc<UdpSocketArray>,
packet: &[u8],
tid: u32,
@@ -366,7 +361,7 @@ impl PunchSymToConeHoleClient {
let mut finish_time: Option<Instant> = None;
while finish_time.is_none() || finish_time.as_ref().unwrap().elapsed().as_millis() < 1000 {
udp_array
.send_with_all(packet, remote_mapped_addr.into())
.send_with_all(&packet, remote_mapped_addr.into())
.await?;
tokio::time::sleep(Duration::from_millis(200)).await;
@@ -381,13 +376,7 @@ impl PunchSymToConeHoleClient {
};
// if hole punched but tunnel creation failed, need to retry entire process.
match try_connect_with_socket(
global_ctx.clone(),
socket.socket.clone(),
remote_mapped_addr.into(),
)
.await
{
match try_connect_with_socket(socket.socket.clone(), remote_mapped_addr.into()).await {
Ok(tunnel) => {
ret_tunnel.replace(tunnel);
break;
@@ -437,7 +426,7 @@ impl PunchSymToConeHoleClient {
)
.await;
let resp = handle_rpc_result(resp, dst_peer_id, &self.blacklist)?;
let resp = handle_rpc_result(resp, dst_peer_id, self.blacklist.clone())?;
let remote_mapped_addr = resp.listener_mapped_addr.ok_or(anyhow::anyhow!(
"select_punch_listener response missing listener_mapped_addr"
@@ -446,7 +435,6 @@ impl PunchSymToConeHoleClient {
// try direct connect first
if self.try_direct_connect.load(Ordering::Relaxed) {
if let Ok(tunnel) = try_connect_with_socket(
global_ctx.clone(),
Arc::new(UdpSocket::bind("0.0.0.0:0").await?),
remote_mapped_addr.into(),
)
@@ -484,17 +472,16 @@ impl PunchSymToConeHoleClient {
rpc_stub,
base_port_for_easy_sym,
my_nat_info,
remote_mapped_addr,
remote_mapped_addr.clone(),
public_ips.clone(),
tid,
))
.into();
let ret_tunnel = Self::check_hole_punch_result(
global_ctx.clone(),
&udp_array,
&packet,
tid,
remote_mapped_addr,
remote_mapped_addr.clone(),
&scoped_punch_task,
)
.await?;
@@ -510,7 +497,7 @@ impl PunchSymToConeHoleClient {
let scoped_punch_task: ScopedTask<Option<u32>> =
tokio::spawn(Self::remote_send_hole_punch_packet_random(
rpc_stub,
remote_mapped_addr,
remote_mapped_addr.clone(),
public_ips.clone(),
tid,
round,
@@ -518,11 +505,10 @@ impl PunchSymToConeHoleClient {
))
.into();
let ret_tunnel = Self::check_hole_punch_result(
global_ctx,
&udp_array,
&packet,
tid,
remote_mapped_addr,
remote_mapped_addr.clone(),
&scoped_punch_task,
)
.await?;

View File

@@ -4,6 +4,7 @@ use std::{
net::{IpAddr, SocketAddr},
path::PathBuf,
str::FromStr,
sync::Mutex,
time::Duration,
vec,
};
@@ -21,26 +22,23 @@ use tokio::time::timeout;
use easytier::{
common::{
config::PortForwardConfig,
constants::EASYTIER_VERSION,
stun::{StunInfoCollector, StunInfoCollectorTrait},
},
proto::{
cli::{
list_peer_route_pair, AclManageRpc, AclManageRpcClientFactory, AddPortForwardRequest,
ConnectorManageRpc, ConnectorManageRpcClientFactory, DumpRouteRequest,
GetAclStatsRequest, GetPrometheusStatsRequest, GetStatsRequest,
GetVpnPortalInfoRequest, GetWhitelistRequest, ListConnectorRequest,
ListForeignNetworkRequest, ListGlobalForeignNetworkRequest, ListMappedListenerRequest,
ListPeerRequest, ListPeerResponse, ListPortForwardRequest, ListRouteRequest,
ListRouteResponse, ManageMappedListenerRequest, MappedListenerManageAction,
MappedListenerManageRpc, MappedListenerManageRpcClientFactory, NodeInfo, PeerManageRpc,
PeerManageRpcClientFactory, PortForwardManageRpc, PortForwardManageRpcClientFactory,
RemovePortForwardRequest, SetWhitelistRequest, ShowNodeInfoRequest, StatsRpc,
StatsRpcClientFactory, TcpProxyEntryState, TcpProxyEntryTransportType, TcpProxyRpc,
TcpProxyRpcClientFactory, VpnPortalRpc, VpnPortalRpcClientFactory,
list_peer_route_pair, AclManageRpc, AclManageRpcClientFactory, ConnectorManageRpc,
ConnectorManageRpcClientFactory, DumpRouteRequest, GetAclStatsRequest,
GetVpnPortalInfoRequest, ListConnectorRequest, ListForeignNetworkRequest,
ListGlobalForeignNetworkRequest, ListMappedListenerRequest, ListPeerRequest,
ListPeerResponse, ListRouteRequest, ListRouteResponse, ManageMappedListenerRequest,
MappedListenerManageAction, MappedListenerManageRpc,
MappedListenerManageRpcClientFactory, NodeInfo, PeerManageRpc,
PeerManageRpcClientFactory, ShowNodeInfoRequest, TcpProxyEntryState,
TcpProxyEntryTransportType, TcpProxyRpc, TcpProxyRpcClientFactory, VpnPortalRpc,
VpnPortalRpcClientFactory,
},
common::{NatType, SocketType},
common::NatType,
peer_rpc::{GetGlobalPeerMapRequest, PeerCenterRpc, PeerCenterRpcClientFactory},
rpc_impl::standalone::StandAloneClient,
rpc_types::controller::BaseController,
@@ -98,12 +96,6 @@ enum SubCommand {
Proxy,
#[command(about = "show ACL rules statistics")]
Acl(AclArgs),
#[command(about = "manage port forwarding")]
PortForward(PortForwardArgs),
#[command(about = "manage TCP/UDP whitelist")]
Whitelist(WhitelistArgs),
#[command(about = "show statistics information")]
Stats(StatsArgs),
#[command(about = t!("core_clap.generate_completions").to_string())]
GenAutocomplete { shell: Shell },
}
@@ -201,76 +193,6 @@ enum AclSubCommand {
Stats,
}
#[derive(Args, Debug)]
struct PortForwardArgs {
#[command(subcommand)]
sub_command: Option<PortForwardSubCommand>,
}
#[derive(Subcommand, Debug)]
enum PortForwardSubCommand {
/// Add port forward rule
Add {
#[arg(help = "Protocol (tcp/udp)")]
protocol: String,
#[arg(help = "Local bind address (e.g., 0.0.0.0:8080)")]
bind_addr: String,
#[arg(help = "Destination address (e.g., 10.1.1.1:80)")]
dst_addr: String,
},
/// Remove port forward rule
Remove {
#[arg(help = "Protocol (tcp/udp)")]
protocol: String,
#[arg(help = "Local bind address (e.g., 0.0.0.0:8080)")]
bind_addr: String,
#[arg(help = "Optional Destination address (e.g., 10.1.1.1:80)")]
dst_addr: Option<String>,
},
/// List port forward rules
List,
}
#[derive(Args, Debug)]
struct WhitelistArgs {
#[command(subcommand)]
sub_command: Option<WhitelistSubCommand>,
}
#[derive(Subcommand, Debug)]
enum WhitelistSubCommand {
/// Set TCP port whitelist
SetTcp {
#[arg(help = "TCP ports (e.g., 80,443,8000-9000)")]
ports: String,
},
/// Set UDP port whitelist
SetUdp {
#[arg(help = "UDP ports (e.g., 53,5000-6000)")]
ports: String,
},
/// Clear TCP whitelist
ClearTcp,
/// Clear UDP whitelist
ClearUdp,
/// Show current whitelist configuration
Show,
}
#[derive(Args, Debug)]
struct StatsArgs {
#[command(subcommand)]
sub_command: Option<StatsSubCommand>,
}
#[derive(Subcommand, Debug)]
enum StatsSubCommand {
/// Show general statistics
Show,
/// Show statistics in Prometheus format
Prometheus,
}
#[derive(Args, Debug)]
struct ServiceArgs {
#[arg(short, long, default_value = env!("CARGO_PKG_NAME"), help = "service name")]
@@ -325,7 +247,7 @@ struct InstallArgs {
type Error = anyhow::Error;
struct CommandHandler<'a> {
client: tokio::sync::Mutex<RpcClient>,
client: Mutex<RpcClient>,
verbose: bool,
output_format: &'a OutputFormat,
}
@@ -339,7 +261,7 @@ impl CommandHandler<'_> {
Ok(self
.client
.lock()
.await
.unwrap()
.scoped_client::<PeerManageRpcClientFactory<BaseController>>("".to_string())
.await
.with_context(|| "failed to get peer manager client")?)
@@ -351,7 +273,7 @@ impl CommandHandler<'_> {
Ok(self
.client
.lock()
.await
.unwrap()
.scoped_client::<ConnectorManageRpcClientFactory<BaseController>>("".to_string())
.await
.with_context(|| "failed to get connector manager client")?)
@@ -363,7 +285,7 @@ impl CommandHandler<'_> {
Ok(self
.client
.lock()
.await
.unwrap()
.scoped_client::<MappedListenerManageRpcClientFactory<BaseController>>("".to_string())
.await
.with_context(|| "failed to get mapped listener manager client")?)
@@ -375,7 +297,7 @@ impl CommandHandler<'_> {
Ok(self
.client
.lock()
.await
.unwrap()
.scoped_client::<PeerCenterRpcClientFactory<BaseController>>("".to_string())
.await
.with_context(|| "failed to get peer center client")?)
@@ -387,7 +309,7 @@ impl CommandHandler<'_> {
Ok(self
.client
.lock()
.await
.unwrap()
.scoped_client::<VpnPortalRpcClientFactory<BaseController>>("".to_string())
.await
.with_context(|| "failed to get vpn portal client")?)
@@ -399,7 +321,7 @@ impl CommandHandler<'_> {
Ok(self
.client
.lock()
.await
.unwrap()
.scoped_client::<AclManageRpcClientFactory<BaseController>>("".to_string())
.await
.with_context(|| "failed to get acl manager client")?)
@@ -412,36 +334,12 @@ impl CommandHandler<'_> {
Ok(self
.client
.lock()
.await
.unwrap()
.scoped_client::<TcpProxyRpcClientFactory<BaseController>>(transport_type.to_string())
.await
.with_context(|| "failed to get vpn portal client")?)
}
async fn get_port_forward_manager_client(
&self,
) -> Result<Box<dyn PortForwardManageRpc<Controller = BaseController>>, Error> {
Ok(self
.client
.lock()
.await
.scoped_client::<PortForwardManageRpcClientFactory<BaseController>>("".to_string())
.await
.with_context(|| "failed to get port forward manager client")?)
}
async fn get_stats_client(
&self,
) -> Result<Box<dyn StatsRpc<Controller = BaseController>>, Error> {
Ok(self
.client
.lock()
.await
.scoped_client::<StatsRpcClientFactory<BaseController>>("".to_string())
.await
.with_context(|| "failed to get stats client")?)
}
async fn list_peers(&self) -> Result<ListPeerResponse, Error> {
let client = self.get_peer_manager_client().await?;
let request = ListPeerRequest::default();
@@ -573,18 +471,6 @@ impl CommandHandler<'_> {
items.push(p.into());
}
// Sort items by ipv4 (using IpAddr for proper numeric comparison) first, then by hostname
items.sort_by(|a, b| {
use std::net::{IpAddr, Ipv4Addr};
use std::str::FromStr;
let a_ip = IpAddr::from_str(&a.ipv4).unwrap_or(IpAddr::V4(Ipv4Addr::UNSPECIFIED));
let b_ip = IpAddr::from_str(&b.ipv4).unwrap_or(IpAddr::V4(Ipv4Addr::UNSPECIFIED));
match a_ip.cmp(&b_ip) {
std::cmp::Ordering::Equal => a.hostname.cmp(&b.hostname),
other => other,
}
});
print_output(&items, self.output_format)?;
Ok(())
@@ -865,7 +751,7 @@ impl CommandHandler<'_> {
Ok(())
}
async fn handle_mapped_listener_add(&self, url: &str) -> Result<(), Error> {
async fn handle_mapped_listener_add(&self, url: &String) -> Result<(), Error> {
let url = Self::mapped_listener_validate_url(url)?;
let client = self.get_mapped_listener_manager_client().await?;
let request = ManageMappedListenerRequest {
@@ -878,7 +764,7 @@ impl CommandHandler<'_> {
Ok(())
}
async fn handle_mapped_listener_remove(&self, url: &str) -> Result<(), Error> {
async fn handle_mapped_listener_remove(&self, url: &String) -> Result<(), Error> {
let url = Self::mapped_listener_validate_url(url)?;
let client = self.get_mapped_listener_manager_client().await?;
let request = ManageMappedListenerRequest {
@@ -891,7 +777,7 @@ impl CommandHandler<'_> {
Ok(())
}
fn mapped_listener_validate_url(url: &str) -> Result<url::Url, Error> {
fn mapped_listener_validate_url(url: &String) -> Result<url::Url, Error> {
let url = url::Url::parse(url)?;
if url.scheme() != "tcp" && url.scheme() != "udp" {
return Err(anyhow::anyhow!(
@@ -902,264 +788,6 @@ impl CommandHandler<'_> {
}
Ok(url)
}
async fn handle_port_forward_add(
&self,
protocol: &str,
bind_addr: &str,
dst_addr: &str,
) -> Result<(), Error> {
let bind_addr: std::net::SocketAddr = bind_addr
.parse()
.with_context(|| format!("Invalid bind address: {}", bind_addr))?;
let dst_addr: std::net::SocketAddr = dst_addr
.parse()
.with_context(|| format!("Invalid destination address: {}", dst_addr))?;
if protocol != "tcp" && protocol != "udp" {
return Err(anyhow::anyhow!("Protocol must be 'tcp' or 'udp'"));
}
let client = self.get_port_forward_manager_client().await?;
let request = AddPortForwardRequest {
cfg: Some(
PortForwardConfig {
proto: protocol.to_string(),
bind_addr,
dst_addr,
}
.into(),
),
};
client
.add_port_forward(BaseController::default(), request)
.await?;
println!(
"Port forward rule added: {} {} -> {}",
protocol, bind_addr, dst_addr
);
Ok(())
}
async fn handle_port_forward_remove(
&self,
protocol: &str,
bind_addr: &str,
dst_addr: Option<&str>,
) -> Result<(), Error> {
let bind_addr: std::net::SocketAddr = bind_addr
.parse()
.with_context(|| format!("Invalid bind address: {}", bind_addr))?;
if protocol != "tcp" && protocol != "udp" {
return Err(anyhow::anyhow!("Protocol must be 'tcp' or 'udp'"));
}
let client = self.get_port_forward_manager_client().await?;
let request = RemovePortForwardRequest {
cfg: Some(
PortForwardConfig {
proto: protocol.to_string(),
bind_addr,
dst_addr: dst_addr
.map(|s| s.parse::<SocketAddr>().unwrap())
.unwrap_or("0.0.0.0:0".parse::<SocketAddr>().unwrap()),
}
.into(),
),
};
client
.remove_port_forward(BaseController::default(), request)
.await?;
println!("Port forward rule removed: {} {}", protocol, bind_addr);
Ok(())
}
async fn handle_port_forward_list(&self) -> Result<(), Error> {
let client = self.get_port_forward_manager_client().await?;
let request = ListPortForwardRequest::default();
let response = client
.list_port_forward(BaseController::default(), request)
.await?;
if self.verbose || *self.output_format == OutputFormat::Json {
println!("{}", serde_json::to_string_pretty(&response)?);
return Ok(());
}
#[derive(tabled::Tabled, serde::Serialize)]
struct PortForwardTableItem {
protocol: String,
bind_addr: String,
dst_addr: String,
}
let items: Vec<PortForwardTableItem> = response
.cfgs
.into_iter()
.map(|rule| PortForwardTableItem {
protocol: format!(
"{:?}",
SocketType::try_from(rule.socket_type).unwrap_or(SocketType::Tcp)
),
bind_addr: rule
.bind_addr
.map(|addr| addr.to_string())
.unwrap_or_default(),
dst_addr: rule
.dst_addr
.map(|addr| addr.to_string())
.unwrap_or_default(),
})
.collect();
print_output(&items, self.output_format)?;
Ok(())
}
async fn handle_whitelist_set_tcp(&self, ports: &str) -> Result<(), Error> {
let tcp_ports = Self::parse_port_list(ports)?;
let client = self.get_acl_manager_client().await?;
// Get current UDP ports to preserve them
let current = client
.get_whitelist(BaseController::default(), GetWhitelistRequest::default())
.await?;
let request = SetWhitelistRequest {
tcp_ports,
udp_ports: current.udp_ports,
};
client
.set_whitelist(BaseController::default(), request)
.await?;
println!("TCP whitelist updated: {}", ports);
Ok(())
}
async fn handle_whitelist_set_udp(&self, ports: &str) -> Result<(), Error> {
let udp_ports = Self::parse_port_list(ports)?;
let client = self.get_acl_manager_client().await?;
// Get current TCP ports to preserve them
let current = client
.get_whitelist(BaseController::default(), GetWhitelistRequest::default())
.await?;
let request = SetWhitelistRequest {
tcp_ports: current.tcp_ports,
udp_ports,
};
client
.set_whitelist(BaseController::default(), request)
.await?;
println!("UDP whitelist updated: {}", ports);
Ok(())
}
async fn handle_whitelist_clear_tcp(&self) -> Result<(), Error> {
let client = self.get_acl_manager_client().await?;
// Get current UDP ports to preserve them
let current = client
.get_whitelist(BaseController::default(), GetWhitelistRequest::default())
.await?;
let request = SetWhitelistRequest {
tcp_ports: vec![],
udp_ports: current.udp_ports,
};
client
.set_whitelist(BaseController::default(), request)
.await?;
println!("TCP whitelist cleared");
Ok(())
}
async fn handle_whitelist_clear_udp(&self) -> Result<(), Error> {
let client = self.get_acl_manager_client().await?;
// Get current TCP ports to preserve them
let current = client
.get_whitelist(BaseController::default(), GetWhitelistRequest::default())
.await?;
let request = SetWhitelistRequest {
tcp_ports: current.tcp_ports,
udp_ports: vec![],
};
client
.set_whitelist(BaseController::default(), request)
.await?;
println!("UDP whitelist cleared");
Ok(())
}
async fn handle_whitelist_show(&self) -> Result<(), Error> {
let client = self.get_acl_manager_client().await?;
let request = GetWhitelistRequest::default();
let response = client
.get_whitelist(BaseController::default(), request)
.await?;
if self.verbose || *self.output_format == OutputFormat::Json {
println!("{}", serde_json::to_string_pretty(&response)?);
return Ok(());
}
println!(
"TCP Whitelist: {}",
if response.tcp_ports.is_empty() {
"None".to_string()
} else {
response.tcp_ports.join(", ")
}
);
println!(
"UDP Whitelist: {}",
if response.udp_ports.is_empty() {
"None".to_string()
} else {
response.udp_ports.join(", ")
}
);
Ok(())
}
fn parse_port_list(ports_str: &str) -> Result<Vec<String>, Error> {
let mut ports = Vec::new();
for port_spec in ports_str.split(',') {
let port_spec = port_spec.trim();
if port_spec.contains('-') {
// Handle port range
let parts: Vec<&str> = port_spec.split('-').collect();
if parts.len() != 2 {
return Err(anyhow::anyhow!("Invalid port range: {}", port_spec));
}
let start: u16 = parts[0]
.parse()
.with_context(|| format!("Invalid start port: {}", parts[0]))?;
let end: u16 = parts[1]
.parse()
.with_context(|| format!("Invalid end port: {}", parts[1]))?;
if start > end {
return Err(anyhow::anyhow!("Invalid port range: start > end"));
}
ports.push(format!("{}-{}", start, end));
} else {
// Handle single port
let port: u16 = port_spec
.parse()
.with_context(|| format!("Invalid port number: {}", port_spec))?;
ports.push(port.to_string());
}
}
Ok(ports)
}
}
#[derive(Debug)]
@@ -1457,7 +1085,7 @@ async fn main() -> Result<(), Error> {
.unwrap(),
));
let handler = CommandHandler {
client: tokio::sync::Mutex::new(client),
client: Mutex::new(client),
verbose: cli.verbose,
output_format: &cli.output_format,
};
@@ -1715,10 +1343,16 @@ async fn main() -> Result<(), Error> {
format!("{:?}", stun_info.udp_nat_type()).as_str(),
]);
ip_list.interface_ipv4s.iter().for_each(|ip| {
builder.push_record(vec!["Interface IPv4", ip.to_string().as_str()]);
builder.push_record(vec![
"Interface IPv4",
format!("{}", ip.to_string()).as_str(),
]);
});
ip_list.interface_ipv6s.iter().for_each(|ip| {
builder.push_record(vec!["Interface IPv6", ip.to_string().as_str()]);
builder.push_record(vec![
"Interface IPv6",
format!("{}", ip.to_string()).as_str(),
]);
});
for (idx, l) in node_info.listeners.iter().enumerate() {
if l.starts_with("ring") {
@@ -1860,109 +1494,6 @@ async fn main() -> Result<(), Error> {
handler.handle_acl_stats().await?;
}
},
SubCommand::PortForward(port_forward_args) => match &port_forward_args.sub_command {
Some(PortForwardSubCommand::Add {
protocol,
bind_addr,
dst_addr,
}) => {
handler
.handle_port_forward_add(protocol, bind_addr, dst_addr)
.await?;
}
Some(PortForwardSubCommand::Remove {
protocol,
bind_addr,
dst_addr,
}) => {
handler
.handle_port_forward_remove(protocol, bind_addr, dst_addr.as_deref())
.await?;
}
Some(PortForwardSubCommand::List) | None => {
handler.handle_port_forward_list().await?;
}
},
SubCommand::Whitelist(whitelist_args) => match &whitelist_args.sub_command {
Some(WhitelistSubCommand::SetTcp { ports }) => {
handler.handle_whitelist_set_tcp(ports).await?;
}
Some(WhitelistSubCommand::SetUdp { ports }) => {
handler.handle_whitelist_set_udp(ports).await?;
}
Some(WhitelistSubCommand::ClearTcp) => {
handler.handle_whitelist_clear_tcp().await?;
}
Some(WhitelistSubCommand::ClearUdp) => {
handler.handle_whitelist_clear_udp().await?;
}
Some(WhitelistSubCommand::Show) | None => {
handler.handle_whitelist_show().await?;
}
},
SubCommand::Stats(stats_args) => match &stats_args.sub_command {
Some(StatsSubCommand::Show) | None => {
let client = handler.get_stats_client().await?;
let request = GetStatsRequest {};
let response = client.get_stats(BaseController::default(), request).await?;
if cli.output_format == OutputFormat::Json {
println!("{}", serde_json::to_string_pretty(&response.metrics)?);
} else {
#[derive(tabled::Tabled, serde::Serialize)]
struct StatsTableRow {
#[tabled(rename = "Metric Name")]
name: String,
#[tabled(rename = "Value")]
value: String,
#[tabled(rename = "Labels")]
labels: String,
}
let table_rows: Vec<StatsTableRow> = response
.metrics
.iter()
.map(|metric| {
let labels_str = if metric.labels.is_empty() {
"-".to_string()
} else {
metric
.labels
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect::<Vec<_>>()
.join(", ")
};
let formatted_value = if metric.name.contains("bytes") {
format_size(metric.value, humansize::BINARY)
} else if metric.name.contains("duration") {
format!("{} ms", metric.value)
} else {
metric.value.to_string()
};
StatsTableRow {
name: metric.name.clone(),
value: formatted_value,
labels: labels_str,
}
})
.collect();
print_output(&table_rows, &cli.output_format)?
}
}
Some(StatsSubCommand::Prometheus) => {
let client = handler.get_stats_client().await?;
let request = GetPrometheusStatsRequest {};
let response = client
.get_prometheus_stats(BaseController::default(), request)
.await?;
println!("{}", response.prometheus_text);
}
},
SubCommand::GenAutocomplete { shell } => {
let mut cmd = Cli::command();
easytier::print_completions(shell, &mut cmd, "easytier-cli");

View File

@@ -4,7 +4,7 @@
extern crate rust_i18n;
use std::{
net::{IpAddr, SocketAddr},
net::{Ipv4Addr, SocketAddr},
path::PathBuf,
process::ExitCode,
sync::Arc,
@@ -18,9 +18,8 @@ use clap_complete::Shell;
use easytier::{
common::{
config::{
get_avaliable_encrypt_methods, ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig,
LoggingConfigLoader, NetworkIdentity, PeerConfig, PortForwardConfig, TomlConfigLoader,
VpnPortalConfig,
ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig, LoggingConfigLoader,
NetworkIdentity, PeerConfig, PortForwardConfig, TomlConfigLoader, VpnPortalConfig,
},
constants::EASYTIER_VERSION,
global_ctx::GlobalCtx,
@@ -30,7 +29,10 @@ use easytier::{
connector::create_connector_by_url,
instance_manager::NetworkInstanceManager,
launcher::{add_proxy_network_to_config, ConfigSource},
proto::common::{CompressionAlgoPb, NatType},
proto::{
acl::{Acl, AclV1, Action, Chain, ChainType, Protocol, Rule},
common::{CompressionAlgoPb, NatType},
},
tunnel::{IpVersion, PROTO_PORT_OFFSET},
utils::{init_logger, setup_panic_handler},
web_client,
@@ -53,15 +55,10 @@ use jemalloc_ctl::{epoch, stats, Access as _, AsName as _};
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
#[cfg(feature = "jemalloc-prof")]
#[allow(non_upper_case_globals)]
#[export_name = "malloc_conf"]
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0";
fn set_prof_active(_active: bool) {
#[cfg(feature = "jemalloc-prof")]
{
const PROF_ACTIVE: &[u8] = b"prof.active\0";
const PROF_ACTIVE: &'static [u8] = b"prof.active\0";
let name = PROF_ACTIVE.name();
name.write(_active).expect("Should succeed to set prof");
}
@@ -70,7 +67,7 @@ fn set_prof_active(_active: bool) {
fn dump_profile(_cur_allocated: usize) {
#[cfg(feature = "jemalloc-prof")]
{
const PROF_DUMP: &[u8] = b"prof.dump\0";
const PROF_DUMP: &'static [u8] = b"prof.dump\0";
static mut PROF_DUMP_FILE_NAME: [u8; 128] = [0; 128];
let file_name_str = format!(
"profile-{}-{}.out",
@@ -284,15 +281,6 @@ struct NetworkOptions {
)]
disable_encryption: Option<bool>,
#[arg(
long,
env = "ET_ENCRYPTION_ALGORITHM",
help = t!("core_clap.encryption_algorithm").to_string(),
default_value = "aes-gcm",
value_parser = get_avaliable_encrypt_methods()
)]
encryption_algorithm: Option<String>,
#[arg(
long,
env = "ET_MULTI_THREAD",
@@ -348,7 +336,7 @@ struct NetworkOptions {
help = t!("core_clap.exit_nodes").to_string(),
num_args = 0..
)]
exit_nodes: Vec<IpAddr>,
exit_nodes: Vec<Ipv4Addr>,
#[arg(
long,
@@ -525,7 +513,7 @@ struct NetworkOptions {
#[arg(
long,
value_delimiter = ',',
help = t!("core_clap.tcp_whitelist").to_string(),
help = "TCP port whitelist. Supports single ports (80) and ranges (8000-9000)",
num_args = 0..
)]
tcp_whitelist: Vec<String>,
@@ -533,28 +521,10 @@ struct NetworkOptions {
#[arg(
long,
value_delimiter = ',',
help = t!("core_clap.udp_whitelist").to_string(),
help = "UDP port whitelist. Supports single ports (53) and ranges (5000-6000)",
num_args = 0..
)]
udp_whitelist: Vec<String>,
#[arg(
long,
env = "ET_DISABLE_RELAY_KCP",
help = t!("core_clap.disable_relay_kcp").to_string(),
num_args = 0..=1,
default_missing_value = "true"
)]
disable_relay_kcp: Option<bool>,
#[arg(
long,
env = "ET_ENABLE_RELAY_FOREIGN_NETWORK_KCP",
help = t!("core_clap.enable_relay_foreign_network_kcp").to_string(),
num_args = 0..=1,
default_missing_value = "true"
)]
enable_relay_foreign_network_kcp: Option<bool>,
}
#[derive(Parser, Debug)]
@@ -652,6 +622,117 @@ impl NetworkOptions {
false
}
fn parse_port_list(port_list: &[String]) -> anyhow::Result<Vec<String>> {
let mut ports = Vec::new();
for port_spec in port_list {
if port_spec.contains('-') {
// Handle port range like "8000-9000"
let parts: Vec<&str> = port_spec.split('-').collect();
if parts.len() != 2 {
return Err(anyhow::anyhow!("Invalid port range format: {}", port_spec));
}
let start: u16 = parts[0]
.parse()
.with_context(|| format!("Invalid start port in range: {}", port_spec))?;
let end: u16 = parts[1]
.parse()
.with_context(|| format!("Invalid end port in range: {}", port_spec))?;
if start > end {
return Err(anyhow::anyhow!(
"Start port must be <= end port in range: {}",
port_spec
));
}
// Add individual ports in the range
for port in start..=end {
ports.push(port.to_string());
}
} else {
// Handle single port
let port: u16 = port_spec
.parse()
.with_context(|| format!("Invalid port number: {}", port_spec))?;
ports.push(port.to_string());
}
}
Ok(ports)
}
fn generate_acl_from_whitelists(&self) -> anyhow::Result<Option<Acl>> {
if self.tcp_whitelist.is_empty() && self.udp_whitelist.is_empty() {
return Ok(None);
}
let mut acl = Acl {
acl_v1: Some(AclV1 { chains: vec![] }),
};
let acl_v1 = acl.acl_v1.as_mut().unwrap();
// Create inbound chain for whitelist rules
let mut inbound_chain = Chain {
name: "inbound_whitelist".to_string(),
chain_type: ChainType::Inbound as i32,
description: "Auto-generated inbound whitelist from CLI".to_string(),
enabled: true,
rules: vec![],
default_action: Action::Drop as i32, // Default deny
};
let mut rule_priority = 1000u32;
// Add TCP whitelist rules
if !self.tcp_whitelist.is_empty() {
let tcp_ports = Self::parse_port_list(&self.tcp_whitelist)?;
let tcp_rule = Rule {
name: "tcp_whitelist".to_string(),
description: "Auto-generated TCP whitelist rule".to_string(),
priority: rule_priority,
enabled: true,
protocol: Protocol::Tcp as i32,
ports: tcp_ports,
source_ips: vec![],
destination_ips: vec![],
source_ports: vec![],
action: Action::Allow as i32,
rate_limit: 0,
burst_limit: 0,
stateful: true,
};
inbound_chain.rules.push(tcp_rule);
rule_priority -= 1;
}
// Add UDP whitelist rules
if !self.udp_whitelist.is_empty() {
let udp_ports = Self::parse_port_list(&self.udp_whitelist)?;
let udp_rule = Rule {
name: "udp_whitelist".to_string(),
description: "Auto-generated UDP whitelist rule".to_string(),
priority: rule_priority,
enabled: true,
protocol: Protocol::Udp as i32,
ports: udp_ports,
source_ips: vec![],
destination_ips: vec![],
source_ports: vec![],
action: Action::Allow as i32,
rate_limit: 0,
burst_limit: 0,
stateful: false,
};
inbound_chain.rules.push(udp_rule);
}
acl_v1.chains.push(inbound_chain);
Ok(Some(acl))
}
fn merge_into(&self, cfg: &mut TomlConfigLoader) -> anyhow::Result<()> {
if self.hostname.is_some() {
cfg.set_hostname(self.hostname.clone());
@@ -701,7 +782,7 @@ impl NetworkOptions {
.map(|s| s.parse().unwrap())
.collect(),
);
} else if cfg.get_listeners().is_none() {
} else if cfg.get_listeners() == None {
cfg.set_listeners(
Cli::parse_listeners(false, vec!["11010".to_string()])?
.into_iter()
@@ -740,7 +821,7 @@ impl NetworkOptions {
}
for n in self.proxy_networks.iter() {
add_proxy_network_to_config(n, cfg)?;
add_proxy_network_to_config(n, &cfg)?;
}
let rpc_portal = if let Some(r) = &self.rpc_portal {
@@ -754,9 +835,9 @@ impl NetworkOptions {
cfg.set_rpc_portal(rpc_portal);
if let Some(rpc_portal_whitelist) = &self.rpc_portal_whitelist {
let mut whitelist = cfg.get_rpc_portal_whitelist().unwrap_or_default();
let mut whitelist = cfg.get_rpc_portal_whitelist().unwrap_or_else(|| Vec::new());
for cidr in rpc_portal_whitelist {
whitelist.push(*cidr);
whitelist.push((*cidr).clone());
}
cfg.set_rpc_portal_whitelist(Some(whitelist));
}
@@ -825,18 +906,18 @@ impl NetworkOptions {
port_forward.port().expect("local bind port is missing")
)
.parse()
.unwrap_or_else(|_| panic!("failed to parse local bind addr {}", example_str));
.expect(format!("failed to parse local bind addr {}", example_str).as_str());
let dst_addr = port_forward
let dst_addr = format!(
"{}",
port_forward
.path_segments()
.unwrap_or_else(|| panic!("remote destination addr is missing {}", example_str))
.expect(format!("remote destination addr is missing {}", example_str).as_str())
.next()
.unwrap_or_else(|| panic!("remote destination addr is missing {}", example_str))
.to_string()
.expect(format!("remote destination addr is missing {}", example_str).as_str())
)
.parse()
.unwrap_or_else(|_| {
panic!("failed to parse remote destination addr {}", example_str)
});
.expect(format!("failed to parse remote destination addr {}", example_str).as_str());
let port_forward_item = PortForwardConfig {
bind_addr,
@@ -856,9 +937,6 @@ impl NetworkOptions {
if let Some(v) = self.disable_encryption {
f.enable_encryption = !v;
}
if let Some(algorithm) = &self.encryption_algorithm {
f.encryption_algorithm = algorithm.clone();
}
if let Some(v) = self.disable_ipv6 {
f.enable_ipv6 = !v;
}
@@ -906,23 +984,16 @@ impl NetworkOptions {
.foreign_relay_bps_limit
.unwrap_or(f.foreign_relay_bps_limit);
f.multi_thread_count = self.multi_thread_count.unwrap_or(f.multi_thread_count);
f.disable_relay_kcp = self.disable_relay_kcp.unwrap_or(f.disable_relay_kcp);
f.enable_relay_foreign_network_kcp = self
.enable_relay_foreign_network_kcp
.unwrap_or(f.enable_relay_foreign_network_kcp);
cfg.set_flags(f);
if !self.exit_nodes.is_empty() {
cfg.set_exit_nodes(self.exit_nodes.clone());
}
let mut old_tcp_whitelist = cfg.get_tcp_whitelist();
old_tcp_whitelist.extend(self.tcp_whitelist.clone());
cfg.set_tcp_whitelist(old_tcp_whitelist);
let mut old_udp_whitelist = cfg.get_udp_whitelist();
old_udp_whitelist.extend(self.udp_whitelist.clone());
cfg.set_udp_whitelist(old_udp_whitelist);
// Handle port whitelists by generating ACL configuration
if let Some(acl) = self.generate_acl_from_whitelists()? {
cfg.set_acl(Some(acl));
}
Ok(())
}
@@ -1141,7 +1212,7 @@ async fn run_main(cli: Cli) -> anyhow::Result<()> {
let mut cfg = TomlConfigLoader::default();
cli.network_options
.merge_into(&mut cfg)
.with_context(|| "failed to create config from cli".to_string())?;
.with_context(|| format!("failed to create config from cli"))?;
println!("Starting easytier from cli with config:");
println!("############### TOML ###############\n");
println!("{}", cfg.dump());
@@ -1151,14 +1222,6 @@ async fn run_main(cli: Cli) -> anyhow::Result<()> {
tokio::select! {
_ = manager.wait() => {
let infos = manager.collect_network_infos()?;
let errs = infos
.into_values()
.filter_map(|info| info.error_msg)
.collect::<Vec<_>>();
if !errs.is_empty() {
return Err(anyhow::anyhow!("some instances stopped with errors"));
}
}
_ = tokio::signal::ctrl_c() => {
println!("ctrl-c received, exiting...");

View File

@@ -294,7 +294,7 @@ pub fn new_udp_header<T: ToTargetAddr>(target_addr: T) -> Result<Vec<u8>> {
}
/// Parse data from UDP client on raw buffer, return (frag, target_addr, payload).
pub async fn parse_udp_request(mut req: &[u8]) -> Result<(u8, TargetAddr, &[u8])> {
pub async fn parse_udp_request<'a>(mut req: &'a [u8]) -> Result<(u8, TargetAddr, &'a [u8])> {
let rsv = read_exact!(req, [0u8; 2]).context("Malformed request")?;
if !rsv.eq(&[0u8; 2]) {

View File

@@ -455,16 +455,16 @@ impl<T: AsyncRead + AsyncWrite + Unpin, A: Authentication, C: AsyncTcpConnector>
info!("User logged successfully.");
Ok(credentials)
return Ok(credentials);
} else {
self.inner
.write_all(&[1, consts::SOCKS5_AUTH_METHOD_NOT_ACCEPTABLE])
.await
.context("Can't reply with auth method not acceptable.")?;
Err(SocksError::AuthenticationRejected(
"Authentication, rejected.".to_string(),
))
return Err(SocksError::AuthenticationRejected(format!(
"Authentication, rejected."
)));
}
}

View File

@@ -72,7 +72,10 @@ impl TargetAddr {
}
pub fn is_ip(&self) -> bool {
matches!(self, TargetAddr::Ip(_))
match self {
TargetAddr::Ip(_) => true,
_ => false,
}
}
pub fn is_domain(&self) -> bool {
@@ -101,7 +104,7 @@ impl TargetAddr {
}
TargetAddr::Domain(ref domain, port) => {
debug!("TargetAddr::Domain");
if domain.len() > u8::MAX as usize {
if domain.len() > u8::max_value() as usize {
return Err(SocksError::ExceededMaxDomainLen(domain.len()).into());
}
buf.extend_from_slice(&[consts::SOCKS5_ADDR_TYPE_DOMAIN_NAME, domain.len() as u8]);
@@ -122,7 +125,8 @@ impl std::net::ToSocketAddrs for TargetAddr {
fn to_socket_addrs(&self) -> io::Result<IntoIter<SocketAddr>> {
match *self {
TargetAddr::Ip(addr) => Ok(vec![addr].into_iter()),
TargetAddr::Domain(_, _) => Err(io::Error::other(
TargetAddr::Domain(_, _) => Err(io::Error::new(
io::ErrorKind::Other,
"Domain name has to be explicitly resolved, please use TargetAddr::resolve_dns().",
)),
}
@@ -145,7 +149,7 @@ pub trait ToTargetAddr {
fn to_target_addr(&self) -> io::Result<TargetAddr>;
}
impl ToTargetAddr for (&str, u16) {
impl<'a> ToTargetAddr for (&'a str, u16) {
fn to_target_addr(&self) -> io::Result<TargetAddr> {
// try to parse as an IP first
if let Ok(addr) = self.0.parse::<Ipv4Addr>() {

View File

@@ -23,7 +23,6 @@ use tracing::Instrument;
use crate::{
common::{error::Error, global_ctx::ArcGlobalCtx, PeerId},
gateway::ip_reassembler::ComposeIpv4PacketArgs,
peers::{peer_manager::PeerManager, PeerPacketFilter},
tunnel::packet_def::{PacketType, ZCPacket},
};
@@ -119,7 +118,7 @@ fn socket_recv_loop(
}
};
if len == 0 {
if len <= 0 {
tracing::error!("recv empty packet, len: {}", len);
return;
}
@@ -159,18 +158,20 @@ fn socket_recv_loop(
let payload_len = len - ipv4_packet.get_header_length() as usize * 4;
let id = ipv4_packet.get_identification();
let _ = compose_ipv4_packet(
ComposeIpv4PacketArgs {
buf: &mut buf[..],
src_v4: &v.mapped_dst_ip,
dst_v4: &dest_ip,
next_protocol: IpNextHeaderProtocols::Icmp,
&mut buf[..],
&v.mapped_dst_ip,
&dest_ip,
IpNextHeaderProtocols::Icmp,
payload_len,
payload_mtu: 1200,
ip_id: id,
},
1200,
id,
|buf| {
let mut p = ZCPacket::new_with_payload(buf);
p.fill_peer_manager_hdr(v.my_peer_id, v.src_peer_id, PacketType::Data as u8);
p.fill_peer_manager_hdr(
v.my_peer_id.into(),
v.src_peer_id.into(),
PacketType::Data as u8,
);
p.mut_peer_manager_header().unwrap().set_no_proxy(true);
if let Err(e) = sender.send(p) {
@@ -185,7 +186,7 @@ fn socket_recv_loop(
#[async_trait::async_trait]
impl PeerPacketFilter for IcmpProxy {
async fn try_process_packet_from_peer(&self, packet: ZCPacket) -> Option<ZCPacket> {
if self.try_handle_peer_packet(&packet).await.is_some() {
if let Some(_) = self.try_handle_peer_packet(&packet).await {
return None;
} else {
return Some(packet);
@@ -305,7 +306,7 @@ impl IcmpProxy {
return Err(anyhow::anyhow!("peer manager is gone").into());
};
pm.add_packet_process_pipeline(Box::new(self.clone())).await;
pm.add_packet_process_pipeline(self.clone()).await;
Ok(())
}
@@ -319,7 +320,10 @@ impl IcmpProxy {
.unwrap()
.as_ref()
.with_context(|| "icmp socket not created")?
.send_to(icmp_packet.packet(), &SocketAddrV4::new(dst_ip, 0).into())?;
.send_to(
icmp_packet.packet(),
&SocketAddrV4::new(dst_ip.into(), 0).into(),
)?;
Ok(())
}
@@ -345,15 +349,13 @@ impl IcmpProxy {
let len = buf.len() - 20;
let _ = compose_ipv4_packet(
ComposeIpv4PacketArgs {
buf: &mut buf[..],
src_v4: src_ip,
dst_v4: dst_ip,
next_protocol: IpNextHeaderProtocols::Icmp,
payload_len: len,
payload_mtu: 1200,
ip_id: rand::random(),
},
&mut buf[..],
src_ip,
dst_ip,
IpNextHeaderProtocols::Icmp,
len,
1200,
rand::random(),
|buf| {
let mut packet = ZCPacket::new_with_payload(buf);
packet.fill_peer_manager_hdr(src_peer_id, dst_peer_id, PacketType::Data as u8);
@@ -385,7 +387,7 @@ impl IcmpProxy {
return None;
};
let ipv4 = Ipv4Packet::new(packet.payload())?;
let ipv4 = Ipv4Packet::new(&packet.payload())?;
if ipv4.get_version() != 4 || ipv4.get_next_level_protocol() != IpNextHeaderProtocols::Icmp
{
@@ -394,17 +396,17 @@ impl IcmpProxy {
let mut real_dst_ip = ipv4.get_destination();
if !(self
if !self
.cidr_set
.contains_v4(ipv4.get_destination(), &mut real_dst_ip)
|| is_exit_node
|| (self.global_ctx.no_tun()
&& !is_exit_node
&& !(self.global_ctx.no_tun()
&& Some(ipv4.get_destination())
== self
.global_ctx
.get_ipv4()
.as_ref()
.map(cidr::Ipv4Inet::address)))
.map(cidr::Ipv4Inet::address))
{
return None;
}
@@ -414,10 +416,12 @@ impl IcmpProxy {
resembled_buf =
self.ip_resemmbler
.add_fragment(ipv4.get_source(), ipv4.get_destination(), &ipv4);
resembled_buf.as_ref()?;
if resembled_buf.is_none() {
return None;
};
icmp::echo_request::EchoRequestPacket::new(resembled_buf.as_ref().unwrap())?
} else {
icmp::echo_request::EchoRequestPacket::new(ipv4.payload())?
icmp::echo_request::EchoRequestPacket::new(&ipv4.payload())?
};
if icmp_packet.get_icmp_type() != IcmpTypes::EchoRequest {
@@ -480,9 +484,10 @@ impl Drop for IcmpProxy {
"dropping icmp proxy, {:?}",
self.socket.lock().unwrap().as_ref()
);
if let Some(s) = self.socket.lock().unwrap().as_ref() {
self.socket.lock().unwrap().as_ref().and_then(|s| {
tracing::info!("shutting down icmp socket");
let _ = s.shutdown(std::net::Shutdown::Both);
}
Some(())
});
}
}

View File

@@ -190,36 +190,33 @@ impl IpReassembler {
}
}
pub struct ComposeIpv4PacketArgs<'a> {
pub buf: &'a mut [u8],
pub src_v4: &'a Ipv4Addr,
pub dst_v4: &'a Ipv4Addr,
pub next_protocol: IpNextHeaderProtocol,
pub payload_len: usize,
pub payload_mtu: usize,
pub ip_id: u16,
}
// ip payload should be in buf[20..]
pub fn compose_ipv4_packet<F>(args: ComposeIpv4PacketArgs, cb: F) -> Result<(), Error>
pub fn compose_ipv4_packet<F>(
buf: &mut [u8],
src_v4: &Ipv4Addr,
dst_v4: &Ipv4Addr,
next_protocol: IpNextHeaderProtocol,
payload_len: usize,
payload_mtu: usize,
ip_id: u16,
cb: F,
) -> Result<(), Error>
where
F: Fn(&[u8]) -> Result<(), Error>,
{
let total_pieces = args.payload_len.div_ceil(args.payload_mtu);
let total_pieces = (payload_len + payload_mtu - 1) / payload_mtu;
let mut buf_offset = 0;
let mut fragment_offset = 0;
let mut cur_piece = 0;
while fragment_offset < args.payload_len {
let next_fragment_offset =
std::cmp::min(fragment_offset + args.payload_mtu, args.payload_len);
while fragment_offset < payload_len {
let next_fragment_offset = std::cmp::min(fragment_offset + payload_mtu, payload_len);
let fragment_len = next_fragment_offset - fragment_offset;
let mut ipv4_packet =
MutableIpv4Packet::new(&mut args.buf[buf_offset..buf_offset + fragment_len + 20])
.unwrap();
MutableIpv4Packet::new(&mut buf[buf_offset..buf_offset + fragment_len + 20]).unwrap();
ipv4_packet.set_version(4);
ipv4_packet.set_header_length(5);
ipv4_packet.set_total_length((fragment_len + 20) as u16);
ipv4_packet.set_identification(args.ip_id);
ipv4_packet.set_identification(ip_id);
if total_pieces > 1 {
if cur_piece != total_pieces - 1 {
ipv4_packet.set_flags(Ipv4Flags::MoreFragments);
@@ -235,9 +232,9 @@ where
ipv4_packet.set_ecn(0);
ipv4_packet.set_dscp(0);
ipv4_packet.set_ttl(32);
ipv4_packet.set_source(*args.src_v4);
ipv4_packet.set_destination(*args.dst_v4);
ipv4_packet.set_next_level_protocol(args.next_protocol);
ipv4_packet.set_source(src_v4.clone());
ipv4_packet.set_destination(dst_v4.clone());
ipv4_packet.set_next_level_protocol(next_protocol);
ipv4_packet.set_checksum(ipv4::checksum(&ipv4_packet.to_immutable()));
tracing::trace!(?ipv4_packet, "udp nat packet response send");
@@ -257,7 +254,7 @@ mod tests {
#[test]
fn resembler() {
let raw_packets = [
let raw_packets = vec![
// last packet
vec![
0x45, 0x00, 0x00, 0x1c, 0x1c, 0x46, 0x20, 0x01, 0x40, 0x06, 0xb1, 0xe6, 0xc0, 0xa8,
@@ -285,7 +282,7 @@ mod tests {
let resembler = IpReassembler::new(Duration::from_secs(1));
for (idx, raw_packet) in raw_packets.iter().enumerate() {
if let Some(packet) = Ipv4Packet::new(raw_packet) {
if let Some(packet) = Ipv4Packet::new(&raw_packet) {
let ret = resembler.add_fragment(source, destination, &packet);
if idx != 2 {
assert!(ret.is_none());

View File

@@ -20,12 +20,7 @@ use pnet::packet::{
Packet as _,
};
use prost::Message;
use tokio::{
io::{copy_bidirectional, AsyncRead, AsyncWrite},
select,
task::JoinSet,
};
use tokio_util::io::InspectReader;
use tokio::{io::copy_bidirectional, select, task::JoinSet};
use super::{
tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy},
@@ -33,13 +28,11 @@ use super::{
};
use crate::{
common::{
acl_processor::PacketInfo,
error::Result,
global_ctx::{ArcGlobalCtx, GlobalCtx},
},
peers::{acl_filter::AclFilter, peer_manager::PeerManager, NicPacketFilter, PeerPacketFilter},
peers::{peer_manager::PeerManager, NicPacketFilter, PeerPacketFilter},
proto::{
acl::{Action, ChainType, Protocol},
cli::{
ListTcpProxyEntryRequest, ListTcpProxyEntryResponse, TcpProxyEntry, TcpProxyEntryState,
TcpProxyEntryTransportType, TcpProxyRpc,
@@ -70,9 +63,7 @@ impl PeerPacketFilter for KcpEndpointFilter {
async fn try_process_packet_from_peer(&self, packet: ZCPacket) -> Option<ZCPacket> {
let t = packet.peer_manager_header().unwrap().packet_type;
if t == PacketType::KcpSrc as u8 && !self.is_src {
// src packet, but we are dst
} else if t == PacketType::KcpDst as u8 && self.is_src {
// dst packet, but we are src
} else {
return Some(packet);
}
@@ -105,7 +96,7 @@ async fn handle_kcp_output(
PacketType::KcpDst as u8
};
let mut packet = ZCPacket::new_with_payload(&packet.inner().freeze());
packet.fill_peer_manager_hdr(peer_mgr.my_peer_id(), dst_peer_id, packet_type);
packet.fill_peer_manager_hdr(peer_mgr.my_peer_id(), dst_peer_id, packet_type as u8);
if let Err(e) = peer_mgr.send_msg(packet, dst_peer_id).await {
tracing::error!("failed to send kcp packet to peer: {:?}", e);
@@ -173,7 +164,7 @@ impl NatDstConnector for NatDstKcpConnector {
let kcp_endpoint = self.kcp_endpoint.clone();
let my_peer_id = peer_mgr.my_peer_id();
let conn_data_clone = conn_data;
let conn_data_clone = conn_data.clone();
connect_tasks.spawn(async move {
kcp_endpoint
@@ -184,7 +175,9 @@ impl NatDstConnector for NatDstKcpConnector {
Bytes::from(conn_data_clone.encode_to_vec()),
)
.await
.with_context(|| format!("failed to connect to nat dst: {}", nat_dst))
.with_context(|| {
format!("failed to connect to nat dst: {}", nat_dst.to_string())
})
});
}
@@ -203,7 +196,7 @@ impl NatDstConnector for NatDstKcpConnector {
_ipv4: &Ipv4Packet,
_real_dst_ip: &mut Ipv4Addr,
) -> bool {
hdr.from_peer_id == hdr.to_peer_id && hdr.is_kcp_src_modified()
return hdr.from_peer_id == hdr.to_peer_id && hdr.is_kcp_src_modified();
}
fn transport_type(&self) -> TcpProxyEntryTransportType {
@@ -230,10 +223,15 @@ impl TcpProxyForKcpSrcTrait for TcpProxyForKcpSrc {
}
async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool {
self.0
.get_peer_manager()
.check_allow_kcp_to_dst(&IpAddr::V4(*dst_ip))
.await
let peer_map: Arc<crate::peers::peer_map::PeerMap> =
self.0.get_peer_manager().get_peer_map();
let Some(dst_peer_id) = peer_map.get_peer_id_by_ipv4(dst_ip).await else {
return false;
};
let Some(peer_info) = peer_map.get_route_peer_info(dst_peer_id).await else {
return false;
};
peer_info.feature_flag.map(|x| x.kcp_input).unwrap_or(false)
}
}
@@ -343,13 +341,13 @@ impl KcpProxySrc {
pub async fn start(&self) {
self.peer_manager
.add_nic_packet_process_pipeline(Box::new(self.tcp_proxy.clone()))
.add_nic_packet_process_pipeline(Arc::new(self.tcp_proxy.clone()))
.await;
self.peer_manager
.add_packet_process_pipeline(Box::new(self.tcp_proxy.0.clone()))
.add_packet_process_pipeline(Arc::new(self.tcp_proxy.0.clone()))
.await;
self.peer_manager
.add_packet_process_pipeline(Box::new(KcpEndpointFilter {
.add_packet_process_pipeline(Arc::new(KcpEndpointFilter {
kcp_endpoint: self.kcp_endpoint.clone(),
is_src: true,
}))
@@ -374,50 +372,6 @@ pub struct KcpProxyDst {
tasks: JoinSet<()>,
}
#[derive(Clone)]
pub struct ProxyAclHandler {
pub acl_filter: Arc<AclFilter>,
pub packet_info: PacketInfo,
pub chain_type: ChainType,
}
impl ProxyAclHandler {
pub fn handle_packet(&self, buf: &[u8]) -> Result<()> {
let mut packet_info = self.packet_info.clone();
packet_info.packet_size = buf.len();
let ret = self
.acl_filter
.get_processor()
.process_packet(&packet_info, self.chain_type);
self.acl_filter.handle_acl_result(
&ret,
&packet_info,
self.chain_type,
&self.acl_filter.get_processor(),
);
if !matches!(ret.action, Action::Allow) {
return Err(anyhow::anyhow!("acl denied").into());
}
Ok(())
}
pub async fn copy_bidirection_with_acl(
&self,
src: impl AsyncRead + AsyncWrite + Unpin,
mut dst: impl AsyncRead + AsyncWrite + Unpin,
) -> Result<()> {
let (src_reader, src_writer) = tokio::io::split(src);
let src_reader = InspectReader::new(src_reader, |buf| {
let _ = self.handle_packet(buf);
});
let mut src = tokio::io::join(src_reader, src_writer);
copy_bidirectional(&mut src, &mut dst).await?;
Ok(())
}
}
impl KcpProxyDst {
pub async fn new(peer_manager: Arc<PeerManager>) -> Self {
let mut kcp_endpoint = create_kcp_endpoint();
@@ -442,7 +396,7 @@ impl KcpProxyDst {
#[tracing::instrument(ret)]
async fn handle_one_in_stream(
kcp_stream: KcpStream,
mut kcp_stream: KcpStream,
global_ctx: ArcGlobalCtx,
proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>,
cidr_set: Arc<CidrSet>,
@@ -457,13 +411,15 @@ impl KcpProxyDst {
parsed_conn_data
))?
.into();
let src_socket: SocketAddr = parsed_conn_data.src.unwrap_or_default().into();
if let IpAddr::V4(dst_v4_ip) = dst_socket.ip() {
match dst_socket.ip() {
IpAddr::V4(dst_v4_ip) => {
let mut real_ip = dst_v4_ip;
if cidr_set.contains_v4(dst_v4_ip, &mut real_ip) {
dst_socket.set_ip(real_ip.into());
}
}
_ => {}
};
let conn_id = kcp_stream.conn_id();
@@ -481,36 +437,17 @@ impl KcpProxyDst {
proxy_entries.remove(&conn_id);
}
let send_to_self =
Some(dst_socket.ip()) == global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address()));
if send_to_self && global_ctx.no_tun() {
if Some(dst_socket.ip()) == global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address()))
&& global_ctx.no_tun()
{
dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap();
}
let acl_handler = ProxyAclHandler {
acl_filter: global_ctx.get_acl_filter().clone(),
packet_info: PacketInfo {
src_ip: src_socket.ip(),
dst_ip: dst_socket.ip(),
src_port: Some(src_socket.port()),
dst_port: Some(dst_socket.port()),
protocol: Protocol::Tcp,
packet_size: conn_data.len(),
},
chain_type: if send_to_self {
ChainType::Inbound
} else {
ChainType::Forward
},
};
acl_handler.handle_packet(&conn_data)?;
tracing::debug!("kcp connect to dst socket: {:?}", dst_socket);
let _g = global_ctx.net_ns.guard();
let connector = NatDstTcpConnector {};
let ret = connector
let mut ret = connector
.connect("0.0.0.0:0".parse().unwrap(), dst_socket)
.await?;
@@ -518,10 +455,7 @@ impl KcpProxyDst {
e.state = TcpProxyEntryState::Connected.into();
}
acl_handler
.copy_bidirection_with_acl(kcp_stream, ret)
.await?;
copy_bidirectional(&mut ret, &mut kcp_stream).await?;
Ok(())
}
@@ -550,7 +484,7 @@ impl KcpProxyDst {
pub async fn start(&mut self) {
self.run_accept_task().await;
self.peer_manager
.add_packet_process_pipeline(Box::new(KcpEndpointFilter {
.add_packet_process_pipeline(Arc::new(KcpEndpointFilter {
kcp_endpoint: self.kcp_endpoint.clone(),
is_src: false,
}))
@@ -578,7 +512,7 @@ impl TcpProxyRpc for KcpProxyDstRpcService {
let mut reply = ListTcpProxyEntryResponse::default();
if let Some(tcp_proxy) = self.0.upgrade() {
for item in tcp_proxy.iter() {
reply.entries.push(*item.value());
reply.entries.push(item.value().clone());
}
}
Ok(reply)

View File

@@ -56,11 +56,11 @@ impl CidrSet {
cidr_set.lock().unwrap().clear();
for cidr in cidrs.iter() {
let real_cidr = cidr.cidr;
let mapped = cidr.mapped_cidr.unwrap_or(real_cidr);
cidr_set.lock().unwrap().push(mapped);
let mapped = cidr.mapped_cidr.unwrap_or(real_cidr.clone());
cidr_set.lock().unwrap().push(mapped.clone());
if mapped != real_cidr {
mapped_to_real.insert(mapped, real_cidr);
mapped_to_real.insert(mapped.clone(), real_cidr.clone());
}
}
}
@@ -70,11 +70,11 @@ impl CidrSet {
}
pub fn contains_v4(&self, ipv4: std::net::Ipv4Addr, real_ip: &mut std::net::Ipv4Addr) -> bool {
let ip = ipv4;
let ip = ipv4.into();
let s = self.cidr_set.lock().unwrap();
for cidr in s.iter() {
if cidr.contains(&ip) {
if let Some(real_cidr) = self.mapped_to_real.get(cidr).map(|v| *v.value()) {
if let Some(real_cidr) = self.mapped_to_real.get(&cidr).map(|v| v.value().clone()) {
let origin_network_bits = real_cidr.first().address().to_bits();
let network_mask = cidr.mask().to_bits();

View File

@@ -7,21 +7,19 @@ use dashmap::DashMap;
use pnet::packet::ipv4::Ipv4Packet;
use prost::Message as _;
use quinn::{Endpoint, Incoming};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite};
use tokio::io::{copy_bidirectional, AsyncRead, AsyncReadExt, AsyncWrite};
use tokio::net::TcpStream;
use tokio::task::JoinSet;
use tokio::time::timeout;
use crate::common::acl_processor::PacketInfo;
use crate::common::error::Result;
use crate::common::global_ctx::{ArcGlobalCtx, GlobalCtx};
use crate::common::join_joinset_background;
use crate::defer;
use crate::gateway::kcp_proxy::{ProxyAclHandler, TcpProxyForKcpSrcTrait};
use crate::gateway::kcp_proxy::TcpProxyForKcpSrcTrait;
use crate::gateway::tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy};
use crate::gateway::CidrSet;
use crate::peers::peer_manager::PeerManager;
use crate::proto::acl::{ChainType, Protocol};
use crate::proto::cli::{
ListTcpProxyEntryRequest, ListTcpProxyEntryResponse, TcpProxyEntry, TcpProxyEntryState,
TcpProxyEntryTransportType, TcpProxyRpc,
@@ -172,7 +170,7 @@ impl NatDstConnector for NatDstQUICConnector {
_ipv4: &Ipv4Packet,
_real_dst_ip: &mut Ipv4Addr,
) -> bool {
hdr.from_peer_id == hdr.to_peer_id && !hdr.is_kcp_src_modified()
return hdr.from_peer_id == hdr.to_peer_id && !hdr.is_kcp_src_modified();
}
fn transport_type(&self) -> TcpProxyEntryTransportType {
@@ -229,10 +227,10 @@ impl QUICProxySrc {
pub async fn start(&self) {
self.peer_manager
.add_nic_packet_process_pipeline(Box::new(self.tcp_proxy.clone()))
.add_nic_packet_process_pipeline(Arc::new(self.tcp_proxy.clone()))
.await;
self.peer_manager
.add_packet_process_pipeline(Box::new(self.tcp_proxy.0.clone()))
.add_packet_process_pipeline(Arc::new(self.tcp_proxy.0.clone()))
.await;
self.tcp_proxy.0.start(false).await.unwrap();
}
@@ -324,13 +322,12 @@ impl QUICProxyDst {
.await;
match ret {
Ok(Ok((quic_stream, tcp_stream, acl))) => {
let remote_addr = quic_stream.connection.as_ref().map(|c| c.remote_address());
let ret = acl.copy_bidirection_with_acl(quic_stream, tcp_stream).await;
Ok(Ok((mut quic_stream, mut tcp_stream))) => {
let ret = copy_bidirectional(&mut quic_stream, &mut tcp_stream).await;
tracing::info!(
"QUIC connection handled, result: {:?}, remote addr: {:?}",
ret,
remote_addr,
quic_stream.connection.as_ref().map(|c| c.remote_address())
);
}
Ok(Err(e)) => {
@@ -348,7 +345,7 @@ impl QUICProxyDst {
cidr_set: Arc<CidrSet>,
proxy_entry_key: SocketAddr,
proxy_entries: Arc<DashMap<SocketAddr, TcpProxyEntry>>,
) -> Result<(QUICStream, TcpStream, ProxyAclHandler)> {
) -> Result<(QUICStream, TcpStream)> {
let conn = incoming.await.with_context(|| "accept failed")?;
let addr = conn.remote_address();
tracing::info!("Accepted QUIC connection from {}", addr);
@@ -379,8 +376,7 @@ impl QUICProxyDst {
dst_socket.set_ip(real_ip);
}
let send_to_self = Some(*dst_socket.ip()) == ctx.get_ipv4().map(|ip| ip.address());
if send_to_self && ctx.no_tun() {
if Some(*dst_socket.ip()) == ctx.get_ipv4().map(|ip| ip.address()) && ctx.no_tun() {
dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap();
}
@@ -395,24 +391,6 @@ impl QUICProxyDst {
},
);
let acl_handler = ProxyAclHandler {
acl_filter: ctx.get_acl_filter().clone(),
packet_info: PacketInfo {
src_ip: addr.ip(),
dst_ip: (*dst_socket.ip()).into(),
src_port: Some(addr.port()),
dst_port: Some(dst_socket.port()),
protocol: Protocol::Tcp,
packet_size: len as usize,
},
chain_type: if send_to_self {
ChainType::Inbound
} else {
ChainType::Forward
},
};
acl_handler.handle_packet(&buf)?;
let connector = NatDstTcpConnector {};
let dst_stream = {
@@ -433,7 +411,7 @@ impl QUICProxyDst {
receiver: r,
};
Ok((quic_stream, dst_stream, acl_handler))
Ok((quic_stream, dst_stream))
}
}
@@ -457,7 +435,7 @@ impl TcpProxyRpc for QUICProxyDstRpcService {
let mut reply = ListTcpProxyEntryResponse::default();
if let Some(tcp_proxy) = self.0.upgrade() {
for item in tcp_proxy.iter() {
reply.entries.push(*item.value());
reply.entries.push(item.value().clone());
}
}
Ok(reply)

View File

@@ -6,7 +6,6 @@ use std::{
use crossbeam::atomic::AtomicCell;
use kcp_sys::{endpoint::KcpEndpoint, stream::KcpStream};
use tokio_util::sync::{CancellationToken, DropGuard};
use crate::{
common::{
@@ -72,9 +71,9 @@ impl SocksUdpSocket {
}
enum SocksTcpStream {
Tcp(tokio::net::TcpStream),
SmolTcp(super::tokio_smoltcp::TcpStream),
Kcp(KcpStream),
TcpStream(tokio::net::TcpStream),
SmolTcpStream(super::tokio_smoltcp::TcpStream),
KcpStream(KcpStream),
}
impl AsyncRead for SocksTcpStream {
@@ -84,11 +83,15 @@ impl AsyncRead for SocksTcpStream {
buf: &mut tokio::io::ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
match self.get_mut() {
SocksTcpStream::Tcp(ref mut stream) => std::pin::Pin::new(stream).poll_read(cx, buf),
SocksTcpStream::SmolTcp(ref mut stream) => {
SocksTcpStream::TcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_read(cx, buf)
}
SocksTcpStream::SmolTcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_read(cx, buf)
}
SocksTcpStream::KcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_read(cx, buf)
}
SocksTcpStream::Kcp(ref mut stream) => std::pin::Pin::new(stream).poll_read(cx, buf),
}
}
}
@@ -100,11 +103,15 @@ impl AsyncWrite for SocksTcpStream {
buf: &[u8],
) -> std::task::Poll<Result<usize, std::io::Error>> {
match self.get_mut() {
SocksTcpStream::Tcp(ref mut stream) => std::pin::Pin::new(stream).poll_write(cx, buf),
SocksTcpStream::SmolTcp(ref mut stream) => {
SocksTcpStream::TcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_write(cx, buf)
}
SocksTcpStream::SmolTcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_write(cx, buf)
}
SocksTcpStream::KcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_write(cx, buf)
}
SocksTcpStream::Kcp(ref mut stream) => std::pin::Pin::new(stream).poll_write(cx, buf),
}
}
@@ -113,9 +120,11 @@ impl AsyncWrite for SocksTcpStream {
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), std::io::Error>> {
match self.get_mut() {
SocksTcpStream::Tcp(ref mut stream) => std::pin::Pin::new(stream).poll_flush(cx),
SocksTcpStream::SmolTcp(ref mut stream) => std::pin::Pin::new(stream).poll_flush(cx),
SocksTcpStream::Kcp(ref mut stream) => std::pin::Pin::new(stream).poll_flush(cx),
SocksTcpStream::TcpStream(ref mut stream) => std::pin::Pin::new(stream).poll_flush(cx),
SocksTcpStream::SmolTcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_flush(cx)
}
SocksTcpStream::KcpStream(ref mut stream) => std::pin::Pin::new(stream).poll_flush(cx),
}
}
@@ -124,9 +133,15 @@ impl AsyncWrite for SocksTcpStream {
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), std::io::Error>> {
match self.get_mut() {
SocksTcpStream::Tcp(ref mut stream) => std::pin::Pin::new(stream).poll_shutdown(cx),
SocksTcpStream::SmolTcp(ref mut stream) => std::pin::Pin::new(stream).poll_shutdown(cx),
SocksTcpStream::Kcp(ref mut stream) => std::pin::Pin::new(stream).poll_shutdown(cx),
SocksTcpStream::TcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_shutdown(cx)
}
SocksTcpStream::SmolTcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_shutdown(cx)
}
SocksTcpStream::KcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_shutdown(cx)
}
}
}
}
@@ -180,7 +195,7 @@ impl AsyncTcpConnector for SmolTcpConnector {
let modified_addr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), addr.port());
Ok(SocksTcpStream::Tcp(
Ok(SocksTcpStream::TcpStream(
tcp_connect_with_timeout(modified_addr, timeout_s).await?,
))
} else {
@@ -191,9 +206,9 @@ impl AsyncTcpConnector for SmolTcpConnector {
.await
.with_context(|| "connect to remote timeout")?;
Ok(SocksTcpStream::SmolTcp(remote_socket.map_err(|e| {
super::fast_socks5::SocksError::Other(e.into())
})?))
Ok(SocksTcpStream::SmolTcpStream(remote_socket.map_err(
|e| super::fast_socks5::SocksError::Other(e.into()),
)?))
}
}
}
@@ -233,7 +248,7 @@ impl AsyncTcpConnector for Socks5KcpConnector {
.connect(self.src_addr, addr)
.await
.map_err(|e| super::fast_socks5::SocksError::Other(e.into()))?;
Ok(SocksTcpStream::Kcp(ret))
Ok(SocksTcpStream::KcpStream(ret))
}
}
@@ -417,8 +432,6 @@ pub struct Socks5Server {
udp_forward_task: Arc<DashMap<UdpClientKey, ScopedTask<()>>>,
kcp_endpoint: Mutex<Option<Weak<KcpEndpoint>>>,
cancel_tokens: DashMap<PortForwardConfig, DropGuard>,
}
#[async_trait::async_trait]
@@ -518,8 +531,6 @@ impl Socks5Server {
udp_forward_task: Arc::new(DashMap::new()),
kcp_endpoint: Mutex::new(None),
cancel_tokens: DashMap::new(),
})
}
@@ -544,16 +555,16 @@ impl Socks5Server {
tcp_forward_task.lock().unwrap().abort_all();
udp_client_map.clear();
if let Some(cur_ipv4) = cur_ipv4 {
if cur_ipv4.is_none() {
let _ = net.lock().await.take();
} else {
net.lock().await.replace(Socks5ServerNet::new(
cur_ipv4,
cur_ipv4.unwrap(),
None,
peer_manager.clone(),
packet_recv.clone(),
entries.clone(),
));
} else {
let _ = net.lock().await.take();
}
}
@@ -603,13 +614,14 @@ impl Socks5Server {
need_start = true;
};
let cfgs = self.global_ctx.config.get_port_forwards();
self.reload_port_forwards(&cfgs).await?;
need_start = need_start || !cfgs.is_empty();
for port_forward in self.global_ctx.config.get_port_forwards() {
self.add_port_forward(port_forward).await?;
need_start = true;
}
if need_start {
self.peer_manager
.add_packet_process_pipeline(Box::new(self.clone()))
.add_packet_process_pipeline(self.clone())
.await;
self.run_net_update_task().await;
@@ -618,26 +630,6 @@ impl Socks5Server {
Ok(())
}
pub async fn reload_port_forwards(&self, cfgs: &Vec<PortForwardConfig>) -> Result<(), Error> {
// remove entries not in new cfg
self.cancel_tokens.retain(|k, _| {
cfgs.iter().any(|cfg| {
if cfg.dst_addr.ip().is_unspecified() {
k.bind_addr == cfg.bind_addr && k.proto == cfg.proto
} else {
k == cfg
}
})
});
// add new ones
for cfg in cfgs {
if !self.cancel_tokens.contains_key(cfg) {
self.add_port_forward(cfg.clone()).await?;
}
}
Ok(())
}
async fn handle_port_forward_connection(
mut incoming_socket: tokio::net::TcpStream,
connector: Box<dyn AsyncTcpConnector<S = SocksTcpStream> + Send>,
@@ -668,10 +660,12 @@ impl Socks5Server {
pub async fn add_port_forward(&self, cfg: PortForwardConfig) -> Result<(), Error> {
match cfg.proto.to_lowercase().as_str() {
"tcp" => {
self.add_tcp_port_forward(&cfg).await?;
self.add_tcp_port_forward(cfg.bind_addr, cfg.dst_addr)
.await?;
}
"udp" => {
self.add_udp_port_forward(&cfg).await?;
self.add_udp_port_forward(cfg.bind_addr, cfg.dst_addr)
.await?;
}
_ => {
return Err(anyhow::anyhow!(
@@ -686,12 +680,11 @@ impl Socks5Server {
Ok(())
}
pub fn remove_port_forward(&self, cfg: PortForwardConfig) {
let _ = self.cancel_tokens.remove(&cfg);
}
pub async fn add_tcp_port_forward(&self, cfg: &PortForwardConfig) -> Result<(), Error> {
let (bind_addr, dst_addr) = (cfg.bind_addr, cfg.dst_addr);
pub async fn add_tcp_port_forward(
&self,
bind_addr: SocketAddr,
dst_addr: SocketAddr,
) -> Result<(), Error> {
let listener = bind_tcp_socket(bind_addr, self.global_ctx.net_ns.clone())?;
let net = self.net.clone();
@@ -700,27 +693,15 @@ impl Socks5Server {
let forward_tasks = tasks.clone();
let kcp_endpoint = self.kcp_endpoint.lock().await.clone();
let peer_mgr = Arc::downgrade(&self.peer_manager.clone());
let cancel_token = CancellationToken::new();
self.cancel_tokens
.insert(cfg.clone(), cancel_token.clone().drop_guard());
self.tasks.lock().unwrap().spawn(async move {
loop {
let (incoming_socket, addr) = select! {
biased;
_ = cancel_token.cancelled() => {
tracing::info!("port forward for {:?} cancelled", bind_addr);
break;
}
res = listener.accept() => {
match res {
let (incoming_socket, addr) = match listener.accept().await {
Ok(result) => result,
Err(err) => {
tracing::error!("port forward accept error = {:?}", err);
continue;
}
}
}
};
tracing::info!(
@@ -735,26 +716,20 @@ impl Socks5Server {
continue;
};
let Some(peer_mgr_arc) = peer_mgr.upgrade() else {
tracing::error!("peer manager is dropped");
continue;
};
let dst_allow_kcp = peer_mgr_arc.check_allow_kcp_to_dst(&dst_addr.ip()).await;
tracing::debug!("dst_allow_kcp: {:?}", dst_allow_kcp);
let connector: Box<dyn AsyncTcpConnector<S = SocksTcpStream> + Send> =
match (&kcp_endpoint, dst_allow_kcp) {
(Some(kcp_endpoint), true) => Box::new(Socks5KcpConnector {
kcp_endpoint: kcp_endpoint.clone(),
peer_mgr: peer_mgr.clone(),
src_addr: addr,
}),
(_, _) => Box::new(SmolTcpConnector {
if kcp_endpoint.is_none() {
Box::new(SmolTcpConnector {
net: net.smoltcp_net.clone(),
entries: entries.clone(),
current_entry: std::sync::Mutex::new(None),
}),
})
} else {
let kcp_endpoint = kcp_endpoint.as_ref().unwrap().clone();
Box::new(Socks5KcpConnector {
kcp_endpoint,
peer_mgr: peer_mgr.clone(),
src_addr: addr,
})
};
forward_tasks
@@ -772,8 +747,11 @@ impl Socks5Server {
}
#[tracing::instrument(name = "add_udp_port_forward", skip(self))]
pub async fn add_udp_port_forward(&self, cfg: &PortForwardConfig) -> Result<(), Error> {
let (bind_addr, dst_addr) = (cfg.bind_addr, cfg.dst_addr);
pub async fn add_udp_port_forward(
&self,
bind_addr: SocketAddr,
dst_addr: SocketAddr,
) -> Result<(), Error> {
let socket = Arc::new(bind_udp_socket(bind_addr, self.global_ctx.net_ns.clone())?);
let entries = self.entries.clone();
@@ -781,29 +759,17 @@ impl Socks5Server {
let net = self.net.clone();
let udp_client_map = self.udp_client_map.clone();
let udp_forward_task = self.udp_forward_task.clone();
let cancel_token = CancellationToken::new();
self.cancel_tokens
.insert(cfg.clone(), cancel_token.clone().drop_guard());
self.tasks.lock().unwrap().spawn(async move {
loop {
// we set the max buffer size of smoltcp to 8192, so we need to use a buffer size that is less than 8192 here.
let mut buf = vec![0u8; 8192];
let (len, addr) = select! {
biased;
_ = cancel_token.cancelled() => {
tracing::info!("udp port forward for {:?} cancelled", bind_addr);
break;
}
res = socket.recv_from(&mut buf) => {
match res {
let (len, addr) = match socket.recv_from(&mut buf).await {
Ok(result) => result,
Err(err) => {
tracing::error!("udp port forward recv error = {:?}", err);
continue;
}
}
}
};
tracing::trace!(
@@ -944,10 +910,10 @@ impl Socks5Server {
udp_client_map.retain(|_, client_info| {
now.duration_since(client_info.last_active.load()).as_secs() < 600
});
udp_forward_task.retain(|k, _| udp_client_map.contains_key(k));
udp_forward_task.retain(|k, _| udp_client_map.contains_key(&k));
entries.retain(|_, data| match data {
Socks5EntryData::Udp((_, udp_client_key)) => {
udp_client_map.contains_key(udp_client_key)
udp_client_map.contains_key(&udp_client_key)
}
_ => true,
});

View File

@@ -63,13 +63,7 @@ pub struct NatDstTcpConnector;
impl NatDstConnector for NatDstTcpConnector {
type DstStream = TcpStream;
async fn connect(&self, _src: SocketAddr, nat_dst: SocketAddr) -> Result<Self::DstStream> {
let socket = match TcpSocket::new_v4() {
Ok(s) => s,
Err(e) => {
eprintln!("create v4 socket failed: {:?}", e);
return Err(e.into());
}
};
let socket = TcpSocket::new_v4().unwrap();
if let Err(e) = socket.set_nodelay(true) {
tracing::warn!("set_nodelay failed, ignore it: {:?}", e);
}
@@ -109,9 +103,9 @@ impl NatDstConnector for NatDstTcpConnector {
) -> bool {
let is_exit_node = hdr.is_exit_node();
if !(cidr_set.contains_v4(ipv4.get_destination(), real_dst_ip)
|| is_exit_node
|| global_ctx.no_tun()
if !cidr_set.contains_v4(ipv4.get_destination(), real_dst_ip)
&& !is_exit_node
&& !(global_ctx.no_tun()
&& Some(ipv4.get_destination())
== global_ctx.get_ipv4().as_ref().map(Ipv4Inet::address))
{
@@ -154,10 +148,10 @@ impl NatDstEntry {
}
}
fn parse_as_pb(&self, transport_type: TcpProxyEntryTransportType) -> TcpProxyEntry {
fn into_pb(&self, transport_type: TcpProxyEntryTransportType) -> TcpProxyEntry {
TcpProxyEntry {
src: Some(self.src.into()),
dst: Some(self.real_dst.into()),
src: Some(self.src.clone().into()),
dst: Some(self.real_dst.clone().into()),
start_time: self.start_time_local.timestamp() as u64,
state: self.state.load().into(),
transport_type: transport_type.into(),
@@ -332,15 +326,17 @@ pub struct TcpProxy<C: NatDstConnector> {
#[async_trait::async_trait]
impl<C: NatDstConnector> PeerPacketFilter for TcpProxy<C> {
async fn try_process_packet_from_peer(&self, mut packet: ZCPacket) -> Option<ZCPacket> {
if self.try_handle_peer_packet(&mut packet).await.is_some() {
if let Some(_) = self.try_handle_peer_packet(&mut packet).await {
if self.is_smoltcp_enabled() {
let smoltcp_stack_sender = self.smoltcp_stack_sender.as_ref().unwrap();
if let Err(e) = smoltcp_stack_sender.try_send(packet) {
tracing::error!("send to smoltcp stack failed: {:?}", e);
}
} else if let Err(e) = self.peer_manager.get_nic_channel().send(packet).await {
} else {
if let Err(e) = self.peer_manager.get_nic_channel().send(packet).await {
tracing::error!("send to nic failed: {:?}", e);
}
}
return None;
} else {
Some(packet)
@@ -480,10 +476,10 @@ impl<C: NatDstConnector> TcpProxy<C> {
self.run_listener().await?;
if add_pipeline {
self.peer_manager
.add_packet_process_pipeline(Box::new(self.clone()))
.add_packet_process_pipeline(self.clone())
.await;
self.peer_manager
.add_nic_packet_process_pipeline(Box::new(self.clone()))
.add_nic_packet_process_pipeline(self.clone())
.await;
}
join_joinset_background(self.tasks.clone(), "TcpProxy".to_owned());
@@ -608,7 +604,7 @@ impl<C: NatDstConnector> TcpProxy<C> {
self.enable_smoltcp
.store(false, std::sync::atomic::Ordering::Relaxed);
Ok(ProxyTcpListener::KernelTcpListener(tcp_listener))
return Ok(ProxyTcpListener::KernelTcpListener(tcp_listener));
}
}
@@ -915,10 +911,10 @@ impl<C: NatDstConnector> TcpProxy<C> {
let mut entries: Vec<TcpProxyEntry> = Vec::new();
let transport_type = self.connector.transport_type();
for entry in self.syn_map.iter() {
entries.push(entry.value().as_ref().parse_as_pb(transport_type));
entries.push(entry.value().as_ref().into_pb(transport_type));
}
for entry in self.conn_map.iter() {
entries.push(entry.value().as_ref().parse_as_pb(transport_type));
entries.push(entry.value().as_ref().into_pb(transport_type));
}
entries
}

View File

@@ -17,17 +17,11 @@ pub struct ChannelDevice {
caps: DeviceCapabilities,
}
pub type ChannelDeviceNewRet = (
ChannelDevice,
Sender<io::Result<Vec<u8>>>,
Receiver<Vec<u8>>,
);
impl ChannelDevice {
/// Make a new `ChannelDevice` with the given `recv` and `send` channels.
///
/// The `caps` is used to determine the device capabilities. `DeviceCapabilities::max_transmission_unit` must be set.
pub fn new(caps: DeviceCapabilities) -> ChannelDeviceNewRet {
pub fn new(caps: DeviceCapabilities) -> (Self, Sender<io::Result<Vec<u8>>>, Receiver<Vec<u8>>) {
let (tx1, rx1) = channel(1000);
let (tx2, rx2) = channel(1000);
(
@@ -51,7 +45,7 @@ impl Stream for ChannelDevice {
}
fn map_err(e: PollSendError<Vec<u8>>) -> io::Error {
io::Error::other(e)
io::Error::new(io::ErrorKind::Other, e)
}
impl Sink<Vec<u8>> for ChannelDevice {

Some files were not shown because too many files have changed in this diff Show More