mirror of
				https://github.com/EasyTier/EasyTier.git
				synced 2025-10-31 20:12:53 +08:00 
			
		
		
		
	Compare commits
	
		
			82 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|   | dde7a4dff1 | ||
|   | 40601bd05b | ||
|   | 72d5ed908e | ||
|   | 72673a9d52 | ||
|   | 327ccdcf38 | ||
|   | 8c2f96d1aa | ||
|   | 34ba0bc95b | ||
|   | ed162c2e66 | ||
|   | 40b5fe9a54 | ||
|   | 5a98fac395 | ||
|   | 0bab14cd72 | ||
|   | b407cfd9d4 | ||
|   | 25dcdc652a | ||
|   | 950cb04534 | ||
|   | c07d1286ef | ||
|   | 8ddd153022 | ||
|   | 870353c499 | ||
|   | ecebbecd3b | ||
|   | f39fbb2ce2 | ||
|   | ec56c0bc45 | ||
|   | 20a6025075 | ||
|   | 707963c0d9 | ||
|   | 3c7837692e | ||
|   | f890812577 | ||
|   | 47f3efe71b | ||
|   | 6d88b10b14 | ||
|   | d34a51739f | ||
|   | a6773aa549 | ||
|   | 0314c66635 | ||
|   | 3fb172b4d2 | ||
|   | 96fc19b803 | ||
|   | 9f7ba8ab8f | ||
|   | e592e9f29a | ||
|   | 4608bca998 | ||
|   | b5dfc7374c | ||
|   | b469f8197a | ||
|   | 0a38a8ef4a | ||
|   | e75be7801f | ||
|   | 6c49bb1865 | ||
|   | f9c24bc205 | ||
|   | d7c3179c6e | ||
|   | b0fd37949a | ||
|   | 29994b663a | ||
|   | fc397c35c5 | ||
|   | 0f2b214918 | ||
|   | fec885c427 | ||
|   | 5a2fd4465c | ||
|   | 83d1ecc4da | ||
|   | 7c6daf7c56 | ||
|   | 28fe6257be | ||
|   | 99430983bc | ||
|   | d758a4958f | ||
|   | 95b12dda5a | ||
|   | 2675cf2d00 | ||
|   | 72be46e8fa | ||
|   | c5580feb64 | ||
|   | 7e3819be86 | ||
|   | f0302f2be7 | ||
|   | b5f60f843d | ||
|   | 6bdfb8b01f | ||
|   | ef1d81a2a1 | ||
|   | 739b4ee106 | ||
|   | 6a038e8a88 | ||
|   | 72ea8a9f76 | ||
|   | 44d93648ee | ||
|   | 75f7865769 | ||
|   | 01e3ad99ca | ||
|   | 3c0d85c9db | ||
|   | b38991a14e | ||
|   | 465269566b | ||
|   | f103fc13d9 | ||
|   | e5917fad4e | ||
|   | de8c89eb03 | ||
|   | c142db301a | ||
|   | 8dc8c7d9e2 | ||
|   | 2b909e04ea | ||
|   | e130c3f2e4 | ||
|   | 3ad754879f | ||
|   | fd2b3768e1 | ||
|   | 67cff12c76 | ||
|   | c5ea7848b3 | ||
|   | 34365a096e | 
| @@ -6,72 +6,84 @@ rustflags = ["-C", "linker-flavor=ld.lld"] | ||||
| linker = "aarch64-linux-gnu-gcc" | ||||
|  | ||||
| [target.aarch64-unknown-linux-musl] | ||||
| linker = "aarch64-linux-musl-gcc" | ||||
| linker = "aarch64-unknown-linux-musl-gcc" | ||||
| rustflags = ["-C", "target-feature=+crt-static"] | ||||
|  | ||||
| [target.'cfg(all(windows, target_env = "msvc"))'] | ||||
| rustflags = ["-C", "target-feature=+crt-static"] | ||||
|  | ||||
| [target.mipsel-unknown-linux-musl] | ||||
| linker = "mipsel-linux-muslsf-gcc" | ||||
| linker = "mipsel-unknown-linux-muslsf-gcc" | ||||
| rustflags = [ | ||||
|     "-C", | ||||
|     "target-feature=+crt-static", | ||||
|     "-L", | ||||
|     "./musl_gcc/mipsel-linux-muslsf-cross/mipsel-linux-muslsf/lib", | ||||
|     "./musl_gcc/mipsel-unknown-linux-muslsf/mipsel-unknown-linux-muslsf/lib", | ||||
|     "-L", | ||||
|     "./musl_gcc/mipsel-linux-muslsf-cross/lib/gcc/mipsel-linux-muslsf/11.2.1", | ||||
|     "./musl_gcc/mipsel-unknown-linux-muslsf/mipsel-unknown-linux-muslsf/sysroot/usr/lib", | ||||
|     "-L", | ||||
|     "./musl_gcc/mipsel-unknown-linux-muslsf/lib/gcc/mipsel-unknown-linux-muslsf/15.1.0", | ||||
|     "-l", | ||||
|     "atomic", | ||||
|     "-l", | ||||
|     "ctz", | ||||
|     "-l", | ||||
|     "gcc", | ||||
| ] | ||||
|  | ||||
| [target.mips-unknown-linux-musl] | ||||
| linker = "mips-linux-muslsf-gcc" | ||||
| linker = "mips-unknown-linux-muslsf-gcc" | ||||
| rustflags = [ | ||||
|     "-C", | ||||
|     "target-feature=+crt-static", | ||||
|     "-L", | ||||
|     "./musl_gcc/mips-linux-muslsf-cross/mips-linux-muslsf/lib", | ||||
|     "./musl_gcc/mips-unknown-linux-muslsf/mips-unknown-linux-muslsf/lib", | ||||
|     "-L", | ||||
|     "./musl_gcc/mips-linux-muslsf-cross/lib/gcc/mips-linux-muslsf/11.2.1", | ||||
|     "./musl_gcc/mips-unknown-linux-muslsf/mips-unknown-linux-muslsf/sysroot/usr/lib", | ||||
|     "-L", | ||||
|     "./musl_gcc/mips-unknown-linux-muslsf/lib/gcc/mips-unknown-linux-muslsf/15.1.0", | ||||
|     "-l", | ||||
|     "atomic", | ||||
|     "-l", | ||||
|     "ctz", | ||||
|     "-l", | ||||
|     "gcc", | ||||
| ] | ||||
|  | ||||
| [target.armv7-unknown-linux-musleabihf] | ||||
| linker = "armv7l-linux-musleabihf-gcc" | ||||
| linker = "armv7-unknown-linux-musleabihf-gcc" | ||||
| rustflags = ["-C", "target-feature=+crt-static"] | ||||
|  | ||||
| [target.armv7-unknown-linux-musleabi] | ||||
| linker = "armv7m-linux-musleabi-gcc" | ||||
| linker = "armv7-unknown-linux-musleabi-gcc" | ||||
| rustflags = ["-C", "target-feature=+crt-static"] | ||||
|  | ||||
| [target.arm-unknown-linux-musleabihf] | ||||
| linker = "arm-linux-musleabihf-gcc" | ||||
| linker = "arm-unknown-linux-musleabihf-gcc" | ||||
| rustflags = [ | ||||
|     "-C", | ||||
|     "target-feature=+crt-static", | ||||
|     "-L", | ||||
|     "./musl_gcc/arm-linux-musleabihf-cross/arm-linux-musleabihf/lib", | ||||
|     "./musl_gcc/arm-unknown-linux-musleabihf/arm-unknown-linux-musleabihf/lib", | ||||
|     "-L", | ||||
|     "./musl_gcc/arm-linux-musleabihf-cross/lib/gcc/arm-linux-musleabihf/11.2.1", | ||||
|     "./musl_gcc/arm-unknown-linux-musleabihf/lib/gcc/arm-unknown-linux-musleabihf/15.1.0", | ||||
|     "-l", | ||||
|     "atomic", | ||||
|     "-l", | ||||
|     "gcc", | ||||
| ] | ||||
|  | ||||
| [target.arm-unknown-linux-musleabi] | ||||
| linker = "arm-linux-musleabi-gcc" | ||||
| linker = "arm-unknown-linux-musleabi-gcc" | ||||
| rustflags = [ | ||||
|     "-C", | ||||
|     "target-feature=+crt-static", | ||||
|     "-L", | ||||
|     "./musl_gcc/arm-linux-musleabi-cross/arm-linux-musleabi/lib", | ||||
|     "./musl_gcc/arm-unknown-linux-musleabi/arm-unknown-linux-musleabi/lib", | ||||
|     "-L", | ||||
|     "./musl_gcc/arm-linux-musleabi-cross/lib/gcc/arm-linux-musleabi/11.2.1", | ||||
|     "./musl_gcc/arm-unknown-linux-musleabi/lib/gcc/arm-unknown-linux-musleabi/15.1.0", | ||||
|     "-l", | ||||
|     "atomic", | ||||
|     "-l", | ||||
|     "gcc", | ||||
| ] | ||||
|   | ||||
							
								
								
									
										159
									
								
								.github/workflows/core.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										159
									
								
								.github/workflows/core.yml
									
									
									
									
										vendored
									
									
								
							| @@ -31,6 +31,47 @@ jobs: | ||||
|           skip_after_successful_duplicate: 'true' | ||||
|           cancel_others: 'true' | ||||
|           paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/core.yml", ".github/workflows/install_rust.sh"]' | ||||
|   build_web: | ||||
|     runs-on: ubuntu-latest | ||||
|     needs: pre_job | ||||
|     if: needs.pre_job.outputs.should_skip != 'true' | ||||
|     steps: | ||||
|       - uses: actions/checkout@v3 | ||||
|  | ||||
|       - uses: actions/setup-node@v4 | ||||
|         with: | ||||
|           node-version: 21 | ||||
|  | ||||
|       - name: Install pnpm | ||||
|         uses: pnpm/action-setup@v3 | ||||
|         with: | ||||
|           version: 9 | ||||
|           run_install: false | ||||
|  | ||||
|       - name: Get pnpm store directory | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV | ||||
|  | ||||
|       - name: Setup pnpm cache | ||||
|         uses: actions/cache@v4 | ||||
|         with: | ||||
|           path: ${{ env.STORE_PATH }} | ||||
|           key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} | ||||
|           restore-keys: | | ||||
|             ${{ runner.os }}-pnpm-store- | ||||
|  | ||||
|       - name: Install frontend dependencies | ||||
|         run: | | ||||
|           pnpm -r install | ||||
|           pnpm -r --filter "./easytier-web/*"  build | ||||
|  | ||||
|       - name: Archive artifact | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: easytier-web-dashboard | ||||
|           path: | | ||||
|             easytier-web/frontend/dist/* | ||||
|   build: | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
| @@ -71,10 +112,12 @@ jobs: | ||||
|           - TARGET: x86_64-pc-windows-msvc | ||||
|             OS: windows-latest | ||||
|             ARTIFACT_NAME: windows-x86_64 | ||||
|  | ||||
|           - TARGET: aarch64-pc-windows-msvc | ||||
|             OS: windows-latest | ||||
|             ARTIFACT_NAME: windows-arm64 | ||||
|           - TARGET: i686-pc-windows-msvc | ||||
|             OS: windows-latest | ||||
|             ARTIFACT_NAME: windows-i686 | ||||
|  | ||||
|           - TARGET: x86_64-unknown-freebsd | ||||
|             OS: ubuntu-22.04 | ||||
| @@ -87,7 +130,9 @@ jobs: | ||||
|       TARGET: ${{ matrix.TARGET }} | ||||
|       OS: ${{ matrix.OS }} | ||||
|       OSS_BUCKET: ${{ secrets.ALIYUN_OSS_BUCKET }} | ||||
|     needs: pre_job | ||||
|     needs: | ||||
|       - pre_job | ||||
|       - build_web | ||||
|     if: needs.pre_job.outputs.should_skip != 'true' | ||||
|     steps: | ||||
|       - uses: actions/checkout@v3 | ||||
| @@ -96,6 +141,12 @@ jobs: | ||||
|         run: | | ||||
|           echo "GIT_DESC=$(git log -1 --format=%cd.%h --date=format:%Y-%m-%d_%H:%M:%S)" >> $GITHUB_ENV | ||||
|  | ||||
|       - name: Download web artifact | ||||
|         uses: actions/download-artifact@v4 | ||||
|         with: | ||||
|           name: easytier-web-dashboard | ||||
|           path: easytier-web/frontend/dist/ | ||||
|  | ||||
|       - name: Cargo cache | ||||
|         if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }} | ||||
|         uses: actions/cache@v4 | ||||
| @@ -115,29 +166,38 @@ jobs: | ||||
|         if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }} | ||||
|         run: | | ||||
|           bash ./.github/workflows/install_rust.sh | ||||
|  | ||||
|           # we set the sysroot when sysroot is a dir | ||||
|           # this dir is a soft link generated by install_rust.sh | ||||
|           # kcp-sys need this to gen ffi bindings. without this clang may fail to find some libc headers such as bits/libc-header-start.h | ||||
|           export KCP_SYS_EXTRA_HEADER_PATH=/usr/include/musl-cross | ||||
|           if [[ -d "./musl_gcc/sysroot" ]]; then | ||||
|             export BINDGEN_EXTRA_CLANG_ARGS=--sysroot=$(readlink -f ./musl_gcc/sysroot) | ||||
|           fi | ||||
|  | ||||
|           if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then | ||||
|             cargo +nightly build -r --verbose --target $TARGET -Z build-std=std,panic_abort --no-default-features --features mips --package=easytier | ||||
|             cargo +nightly build -r --target $TARGET -Z build-std=std,panic_abort --package=easytier | ||||
|           else | ||||
|             cargo build --release --verbose --target $TARGET | ||||
|             if [[ $OS =~ ^windows.*$ ]]; then | ||||
|               SUFFIX=.exe | ||||
|             fi | ||||
|             cargo build --release --target $TARGET --package=easytier-web --features=embed | ||||
|             mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./target/$TARGET/release/easytier-web-embed"$SUFFIX" | ||||
|             cargo build --release --target $TARGET | ||||
|           fi | ||||
|  | ||||
|       # Copied and slightly modified from @lmq8267 (https://github.com/lmq8267) | ||||
|       - name: Build Core & Cli (X86_64 FreeBSD) | ||||
|         uses: cross-platform-actions/action@v0.23.0 | ||||
|         uses: vmactions/freebsd-vm@v1 | ||||
|         if: ${{ endsWith(matrix.TARGET, 'freebsd') }} | ||||
|         env: | ||||
|           TARGET: ${{ matrix.TARGET }} | ||||
|         with: | ||||
|           operating_system: freebsd | ||||
|           environment_variables: TARGET | ||||
|           architecture: x86-64 | ||||
|           version: ${{ matrix.BSD_VERSION }} | ||||
|           shell: bash | ||||
|           memory: 5G | ||||
|           cpu_count: 4 | ||||
|           envs: TARGET | ||||
|           release: ${{ matrix.BSD_VERSION }} | ||||
|           arch: x86_64 | ||||
|           usesh: true | ||||
|           mem: 6144 | ||||
|           cpu: 4 | ||||
|           run: | | ||||
|             uname -a | ||||
|             echo $SHELL | ||||
| @@ -146,40 +206,36 @@ jobs: | ||||
|             whoami | ||||
|             env | sort | ||||
|  | ||||
|             sudo pkg install -y git protobuf llvm-devel | ||||
|             pkg install -y git protobuf llvm-devel sudo curl | ||||
|             curl --proto 'https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y | ||||
|             source $HOME/.cargo/env | ||||
|             . $HOME/.cargo/env | ||||
|  | ||||
|             rustup set auto-self-update disable | ||||
|  | ||||
|             rustup install 1.84 | ||||
|             rustup default 1.84 | ||||
|             rustup install 1.86 | ||||
|             rustup default 1.86 | ||||
|  | ||||
|             export CC=clang | ||||
|             export CXX=clang++ | ||||
|             export CARGO_TERM_COLOR=always | ||||
|  | ||||
|             cargo build --release --verbose --target $TARGET --package=easytier-web --features=embed | ||||
|             mv ./target/$TARGET/release/easytier-web ./target/$TARGET/release/easytier-web-embed | ||||
|             cargo build --release --verbose --target $TARGET | ||||
|  | ||||
|       - name: Install UPX | ||||
|         if: ${{ matrix.OS != 'macos-latest' }} | ||||
|         uses: crazy-max/ghaction-upx@v3 | ||||
|         with: | ||||
|           version: latest | ||||
|           install-only: true | ||||
|  | ||||
|       - name: Compress | ||||
|         run: | | ||||
|           mkdir -p ./artifacts/objects/ | ||||
|           # windows is the only OS using a different convention for executable file name | ||||
|           if [[ $OS =~ ^windows.*$ && $TARGET =~ ^x86_64.*$ ]]; then | ||||
|               SUFFIX=.exe | ||||
|               cp easytier/third_party/Packet.dll ./artifacts/objects/ | ||||
|               cp easytier/third_party/wintun.dll ./artifacts/objects/ | ||||
|               cp easytier/third_party/*.dll ./artifacts/objects/ | ||||
|           elif [[ $OS =~ ^windows.*$ && $TARGET =~ ^i686.*$ ]]; then | ||||
|               SUFFIX=.exe | ||||
|               cp easytier/third_party/i686/*.dll ./artifacts/objects/ | ||||
|           elif [[ $OS =~ ^windows.*$ && $TARGET =~ ^aarch64.*$ ]]; then | ||||
|               SUFFIX=.exe | ||||
|               cp easytier/third_party/arm64/Packet.dll ./artifacts/objects/ | ||||
|               cp easytier/third_party/arm64/wintun.dll ./artifacts/objects/ | ||||
|               cp easytier/third_party/arm64/*.dll ./artifacts/objects/ | ||||
|           fi | ||||
|           if [[ $GITHUB_REF_TYPE =~ ^tag$ ]]; then | ||||
|             TAG=$GITHUB_REF_NAME | ||||
| @@ -188,14 +244,18 @@ jobs: | ||||
|           fi | ||||
|  | ||||
|           if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^.*freebsd$ ]]; then | ||||
|             upx --lzma --best ./target/$TARGET/release/easytier-core"$SUFFIX" | ||||
|             upx --lzma --best ./target/$TARGET/release/easytier-cli"$SUFFIX" | ||||
|             UPX_VERSION=4.2.4 | ||||
|             curl -L https://github.com/upx/upx/releases/download/v${UPX_VERSION}/upx-${UPX_VERSION}-amd64_linux.tar.xz -s | tar xJvf - | ||||
|             cp upx-${UPX_VERSION}-amd64_linux/upx . | ||||
|             ./upx --lzma --best ./target/$TARGET/release/easytier-core"$SUFFIX" | ||||
|             ./upx --lzma --best ./target/$TARGET/release/easytier-cli"$SUFFIX" | ||||
|           fi | ||||
|  | ||||
|           mv ./target/$TARGET/release/easytier-core"$SUFFIX" ./artifacts/objects/ | ||||
|           mv ./target/$TARGET/release/easytier-cli"$SUFFIX" ./artifacts/objects/ | ||||
|           if [[ ! $TARGET =~ ^mips.*$ ]]; then | ||||
|             mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./artifacts/objects/ | ||||
|             mv ./target/$TARGET/release/easytier-web-embed"$SUFFIX" ./artifacts/objects/ | ||||
|           fi | ||||
|  | ||||
|           mv ./artifacts/objects/* ./artifacts/ | ||||
| @@ -213,8 +273,47 @@ jobs: | ||||
|     runs-on: ubuntu-latest | ||||
|     needs: | ||||
|       - pre_job | ||||
|       - build_web | ||||
|       - build | ||||
|     steps: | ||||
|       - name: Mark result as failed | ||||
|         if: needs.build.result != 'success' | ||||
|         run: exit 1 | ||||
|  | ||||
|   magisk_build: | ||||
|     needs:  | ||||
|       - pre_job | ||||
|       - build_web | ||||
|       - build | ||||
|     if: needs.pre_job.outputs.should_skip != 'true' && always() | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|       - name: Checkout Code | ||||
|         uses: actions/checkout@v4  # 必须先检出代码才能获取模块配置 | ||||
|  | ||||
|       # 下载二进制文件到独立目录 | ||||
|       - name: Download Linux aarch64 binaries | ||||
|         uses: actions/download-artifact@v4 | ||||
|         with: | ||||
|           name: easytier-linux-aarch64 | ||||
|           path: ./downloaded-binaries/  # 独立目录避免冲突 | ||||
|  | ||||
|       # 将二进制文件复制到 Magisk 模块目录 | ||||
|       - name: Prepare binaries | ||||
|         run: | | ||||
|           mkdir -p ./easytier-contrib/easytier-magisk/ | ||||
|           cp ./downloaded-binaries/easytier-core ./easytier-contrib/easytier-magisk/ | ||||
|           cp ./downloaded-binaries/easytier-cli ./easytier-contrib/easytier-magisk/ | ||||
|           cp ./downloaded-binaries/easytier-web ./easytier-contrib/easytier-magisk/ | ||||
|  | ||||
|  | ||||
|       # 上传生成的模块 | ||||
|       - name: Upload Magisk Module | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: Easytier-Magisk | ||||
|           path: | | ||||
|             ./easytier-contrib/easytier-magisk | ||||
|             !./easytier-contrib/easytier-magisk/build.sh | ||||
|             !./easytier-contrib/easytier-magisk/magisk_update.json | ||||
|           if-no-files-found: error | ||||
|   | ||||
							
								
								
									
										2
									
								
								.github/workflows/docker.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/docker.yml
									
									
									
									
										vendored
									
									
								
							| @@ -11,7 +11,7 @@ on: | ||||
|       image_tag: | ||||
|         description: 'Tag for this image build' | ||||
|         type: string | ||||
|         default: 'v2.2.4' | ||||
|         default: 'v2.3.2' | ||||
|         required: true | ||||
|       mark_latest: | ||||
|         description: 'Mark this image as latest' | ||||
|   | ||||
							
								
								
									
										105
									
								
								.github/workflows/gui.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										105
									
								
								.github/workflows/gui.yml
									
									
									
									
										vendored
									
									
								
							| @@ -63,6 +63,11 @@ jobs: | ||||
|             GUI_TARGET: aarch64-pc-windows-msvc | ||||
|             ARTIFACT_NAME: windows-arm64 | ||||
|  | ||||
|           - TARGET: i686-pc-windows-msvc | ||||
|             OS: windows-latest | ||||
|             GUI_TARGET: i686-pc-windows-msvc | ||||
|             ARTIFACT_NAME: windows-i686 | ||||
|  | ||||
|     runs-on: ${{ matrix.OS }} | ||||
|     env: | ||||
|       NAME: easytier | ||||
| @@ -73,6 +78,56 @@ jobs: | ||||
|     needs: pre_job | ||||
|     if: needs.pre_job.outputs.should_skip != 'true'     | ||||
|     steps: | ||||
|       - name: Install GUI dependencies (x86 only) | ||||
|         if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }} | ||||
|         run: | | ||||
|           sudo apt update | ||||
|           sudo apt install -qq libwebkit2gtk-4.1-dev \ | ||||
|               build-essential \ | ||||
|               curl \ | ||||
|               wget \ | ||||
|               file \ | ||||
|               libgtk-3-dev \ | ||||
|               librsvg2-dev \ | ||||
|               libxdo-dev \ | ||||
|               libssl-dev \ | ||||
|               patchelf | ||||
|  | ||||
|       - name: Install GUI cross compile (aarch64 only) | ||||
|         if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }} | ||||
|         run: | | ||||
|           # see https://tauri.app/v1/guides/building/linux/ | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy main restricted" | sudo tee /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy universe" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates universe" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security main restricted" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security universe" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|  | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy main restricted" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy universe" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates universe" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security main restricted" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security universe" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|  | ||||
|           sudo dpkg --add-architecture arm64 | ||||
|           sudo apt update | ||||
|           sudo apt install aptitude | ||||
|           sudo aptitude install -y libgstreamer1.0-0:arm64 gstreamer1.0-plugins-base:arm64 gstreamer1.0-plugins-good:arm64 \ | ||||
|             libgstreamer-gl1.0-0:arm64 libgstreamer-plugins-base1.0-0:arm64 libgstreamer-plugins-good1.0-0:arm64 libwebkit2gtk-4.1-0:arm64 \ | ||||
|             libwebkit2gtk-4.1-dev:arm64 libssl-dev:arm64 gcc-aarch64-linux-gnu | ||||
|           echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV" | ||||
|           echo "PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig/" >> "$GITHUB_ENV" | ||||
|  | ||||
|       - uses: actions/checkout@v3 | ||||
|  | ||||
|       - name: Set current ref as env variable | ||||
| @@ -124,59 +179,13 @@ jobs: | ||||
|           # GitHub repo token to use to avoid rate limiter | ||||
|           repo-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|  | ||||
|       - name: Install GUI dependencies (x86 only) | ||||
|         if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }} | ||||
|         run: | | ||||
|           sudo apt install -qq libwebkit2gtk-4.1-dev \ | ||||
|               build-essential \ | ||||
|               curl \ | ||||
|               wget \ | ||||
|               file \ | ||||
|               libgtk-3-dev \ | ||||
|               librsvg2-dev \ | ||||
|               libxdo-dev \ | ||||
|               libssl-dev \ | ||||
|               patchelf | ||||
|  | ||||
|       - name: Install GUI cross compile (aarch64 only) | ||||
|         if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }} | ||||
|         run: | | ||||
|           # see https://tauri.app/v1/guides/building/linux/ | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy main restricted" | sudo tee /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy universe" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates universe" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security main restricted" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security universe" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|  | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy main restricted" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy universe" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates universe" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security main restricted" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security universe" | sudo tee -a /etc/apt/sources.list | ||||
|           echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security multiverse" | sudo tee -a /etc/apt/sources.list | ||||
|  | ||||
|           sudo dpkg --add-architecture arm64 | ||||
|           sudo apt-get update | ||||
|           sudo apt-get install -y libgstreamer1.0-0:arm64 gstreamer1.0-plugins-base:arm64 gstreamer1.0-plugins-good:arm64  | ||||
|           sudo apt-get install -y libgstreamer-gl1.0-0:arm64 libgstreamer-plugins-base1.0-0:arm64 libgstreamer-plugins-good1.0-0:arm64 libwebkit2gtk-4.1-0:arm64 | ||||
|           sudo apt install -f -o Dpkg::Options::="--force-overwrite" libwebkit2gtk-4.1-dev:arm64 libssl-dev:arm64 gcc-aarch64-linux-gnu | ||||
|           echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV" | ||||
|           echo "PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig/" >> "$GITHUB_ENV" | ||||
|  | ||||
|       - name: copy correct DLLs | ||||
|         if: ${{ matrix.OS == 'windows-latest' }} | ||||
|         run: | | ||||
|           if [[ $GUI_TARGET =~ ^aarch64.*$ ]]; then | ||||
|             cp ./easytier/third_party/arm64/*.dll ./easytier-gui/src-tauri/ | ||||
|           elif [[ $GUI_TARGET =~ ^i686.*$ ]]; then | ||||
|             cp ./easytier/third_party/i686/*.dll ./easytier-gui/src-tauri/ | ||||
|           else | ||||
|             cp ./easytier/third_party/*.dll ./easytier-gui/src-tauri/ | ||||
|           fi | ||||
|   | ||||
							
								
								
									
										52
									
								
								.github/workflows/install_rust.sh
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										52
									
								
								.github/workflows/install_rust.sh
									
									
									
									
										vendored
									
									
								
							| @@ -8,49 +8,33 @@ | ||||
| # dependencies are only needed on ubuntu as that's the only place where | ||||
| # we make cross-compilation | ||||
| if [[ $OS =~ ^ubuntu.*$ ]]; then | ||||
|     sudo apt-get update && sudo apt-get install -qq crossbuild-essential-arm64 crossbuild-essential-armhf musl-tools libappindicator3-dev llvm clang | ||||
|     #  curl -s musl.cc | grep mipsel | ||||
|     case $TARGET in | ||||
|     mipsel-unknown-linux-musl) | ||||
|         MUSL_URI=mipsel-linux-muslsf | ||||
|         ;; | ||||
|     mips-unknown-linux-musl) | ||||
|         MUSL_URI=mips-linux-muslsf | ||||
|         ;; | ||||
|     aarch64-unknown-linux-musl) | ||||
|         MUSL_URI=aarch64-linux-musl | ||||
|         ;; | ||||
|     armv7-unknown-linux-musleabihf) | ||||
|         MUSL_URI=armv7l-linux-musleabihf | ||||
|         ;; | ||||
|     armv7-unknown-linux-musleabi) | ||||
|         MUSL_URI=armv7m-linux-musleabi | ||||
|         ;; | ||||
|     arm-unknown-linux-musleabihf) | ||||
|         MUSL_URI=arm-linux-musleabihf | ||||
|         ;; | ||||
|     arm-unknown-linux-musleabi) | ||||
|         MUSL_URI=arm-linux-musleabi | ||||
|         ;; | ||||
|     esac | ||||
|  | ||||
|     if [ -n "$MUSL_URI" ]; then | ||||
|     sudo apt-get update && sudo apt-get install -qq musl-tools libappindicator3-dev llvm clang | ||||
|     # https://github.com/cross-tools/musl-cross/releases | ||||
|     # if "musl" is a substring of TARGET, we assume that we are using musl | ||||
|     MUSL_TARGET=$TARGET | ||||
|     # if target is mips or mipsel, we should use soft-float version of musl | ||||
|     if [[ $TARGET =~ ^mips.*$ || $TARGET =~ ^mipsel.*$ ]]; then | ||||
|         MUSL_TARGET=${TARGET}sf | ||||
|     fi | ||||
|     if [[ $MUSL_TARGET =~ musl ]]; then | ||||
|         mkdir -p ./musl_gcc | ||||
|         wget --inet4-only -c https://musl.cc/${MUSL_URI}-cross.tgz -P ./musl_gcc/ | ||||
|         tar zxf ./musl_gcc/${MUSL_URI}-cross.tgz -C ./musl_gcc/ | ||||
|         sudo ln -s $(pwd)/musl_gcc/${MUSL_URI}-cross/bin/*gcc /usr/bin/ | ||||
|         sudo ln -s $(pwd)/musl_gcc/${MUSL_URI}-cross/${MUSL_URI}/include/ /usr/include/musl-cross | ||||
|         wget --inet4-only -c https://github.com/cross-tools/musl-cross/releases/download/20250520/${MUSL_TARGET}.tar.xz -P ./musl_gcc/ | ||||
|         tar xf ./musl_gcc/${MUSL_TARGET}.tar.xz -C ./musl_gcc/ | ||||
|         sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/bin/*gcc /usr/bin/ | ||||
|         sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/include/ /usr/include/musl-cross | ||||
|         sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/${MUSL_TARGET}/sysroot/ ./musl_gcc/sysroot | ||||
|         sudo chmod -R a+rwx ./musl_gcc | ||||
|     fi | ||||
| fi | ||||
|  | ||||
| # see https://github.com/rust-lang/rustup/issues/3709 | ||||
| rustup set auto-self-update disable | ||||
| rustup install 1.84 | ||||
| rustup default 1.84 | ||||
| rustup install 1.86 | ||||
| rustup default 1.86 | ||||
|  | ||||
| # mips/mipsel cannot add target from rustup, need compile by ourselves | ||||
| if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then | ||||
|     cd "$PWD/musl_gcc/${MUSL_URI}-cross/lib/gcc/${MUSL_URI}/11.2.1" || exit 255 | ||||
|     cd "$PWD/musl_gcc/${MUSL_TARGET}/lib/gcc/${MUSL_TARGET}/15.1.0" || exit 255 | ||||
|     # for panic-abort | ||||
|     cp libgcc_eh.a libunwind.a | ||||
|  | ||||
|   | ||||
							
								
								
									
										13
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/workflows/release.yml
									
									
									
									
										vendored
									
									
								
							| @@ -21,7 +21,7 @@ on: | ||||
|       version: | ||||
|         description: 'Version for this release' | ||||
|         type: string | ||||
|         default: 'v2.2.4' | ||||
|         default: 'v2.3.2' | ||||
|         required: true | ||||
|       make_latest: | ||||
|         description: 'Mark this release as latest' | ||||
| @@ -57,7 +57,7 @@ jobs: | ||||
|           repo: EasyTier/EasyTier | ||||
|           path: release_assets_nozip | ||||
|  | ||||
|       - name: Download GUI Artifact | ||||
|       - name: Download Mobile Artifact | ||||
|         uses: dawidd6/action-download-artifact@v6 | ||||
|         with: | ||||
|           github_token: ${{secrets.GITHUB_TOKEN}} | ||||
| @@ -78,7 +78,14 @@ jobs: | ||||
|           ls -l -R ./ | ||||
|           chmod -R 755 . | ||||
|           for x in `ls`; do | ||||
|             zip ../zipped_assets/$x-${VERSION}.zip $x/*; | ||||
|             if [ "$x" = "Easytier-Magisk" ]; then | ||||
|               # for Easytier-Magisk, make sure files are in the root of the zip | ||||
|               cd $x; | ||||
|               zip -r ../../zipped_assets/$x-${VERSION}.zip .; | ||||
|               cd ..; | ||||
|             else | ||||
|               zip -r ../zipped_assets/$x-${VERSION}.zip $x; | ||||
|             fi | ||||
|           done | ||||
|  | ||||
|       - name: Release | ||||
|   | ||||
							
								
								
									
										32
									
								
								.github/workflows/test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										32
									
								
								.github/workflows/test.yml
									
									
									
									
										vendored
									
									
								
							| @@ -47,11 +47,40 @@ jobs: | ||||
|  | ||||
|       - name: Setup system for test | ||||
|         run: | | ||||
|           sudo modprobe br_netfilter | ||||
|           sudo sysctl net.bridge.bridge-nf-call-iptables=0 | ||||
|           sudo sysctl net.bridge.bridge-nf-call-ip6tables=0 | ||||
|           sudo sysctl net.ipv6.conf.lo.disable_ipv6=0 | ||||
|           sudo ip addr add 2001:db8::2/64 dev lo | ||||
|  | ||||
|       - uses: actions/setup-node@v4 | ||||
|         with: | ||||
|           node-version: 21 | ||||
|  | ||||
|       - name: Install pnpm | ||||
|         uses: pnpm/action-setup@v3 | ||||
|         with: | ||||
|           version: 9 | ||||
|           run_install: false | ||||
|  | ||||
|       - name: Get pnpm store directory | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV | ||||
|  | ||||
|       - name: Setup pnpm cache | ||||
|         uses: actions/cache@v4 | ||||
|         with: | ||||
|           path: ${{ env.STORE_PATH }} | ||||
|           key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} | ||||
|           restore-keys: | | ||||
|             ${{ runner.os }}-pnpm-store- | ||||
|  | ||||
|       - name: Install frontend dependencies | ||||
|         run: | | ||||
|           pnpm -r install | ||||
|           pnpm -r --filter "./easytier-web/*"  build | ||||
|  | ||||
|       - name: Cargo cache | ||||
|         uses: actions/cache@v4 | ||||
|         with: | ||||
| @@ -62,6 +91,7 @@ jobs: | ||||
|  | ||||
|       - name: Run tests | ||||
|         run: | | ||||
|           sudo -E env "PATH=$PATH" cargo test --no-default-features --features=full --verbose -- --test-threads=1 --nocapture | ||||
|           sudo prlimit --pid $$ --nofile=1048576:1048576 | ||||
|           sudo -E env "PATH=$PATH" cargo test --no-default-features --features=full --verbose -- --test-threads=1 | ||||
|           sudo chown -R $USER:$USER ./target | ||||
|           sudo chown -R $USER:$USER ~/.cargo | ||||
|   | ||||
							
								
								
									
										1157
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										1157
									
								
								Cargo.lock
									
									
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										10
									
								
								Cargo.toml
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								Cargo.toml
									
									
									
									
									
								
							| @@ -1,6 +1,12 @@ | ||||
| [workspace] | ||||
| resolver = "2" | ||||
| members = ["easytier", "easytier-gui/src-tauri", "easytier-rpc-build", "easytier-web"] | ||||
| members = [ | ||||
|     "easytier", | ||||
|     "easytier-gui/src-tauri", | ||||
|     "easytier-rpc-build", | ||||
|     "easytier-web", | ||||
|     "easytier-contrib/easytier-ffi", | ||||
| ] | ||||
| default-members = ["easytier", "easytier-web"] | ||||
|  | ||||
| [profile.dev] | ||||
| @@ -10,3 +16,5 @@ panic = "unwind" | ||||
| panic = "abort" | ||||
| lto = true | ||||
| codegen-units = 1 | ||||
| opt-level = 3 | ||||
| strip = true | ||||
|   | ||||
							
								
								
									
										174
									
								
								LICENSE
									
									
									
									
									
								
							
							
						
						
									
										174
									
								
								LICENSE
									
									
									
									
									
								
							| @@ -1,73 +1,165 @@ | ||||
| Apache License | ||||
| Version 2.0, January 2004 | ||||
| http://www.apache.org/licenses/ | ||||
|                    GNU LESSER GENERAL PUBLIC LICENSE | ||||
|                        Version 3, 29 June 2007 | ||||
|  | ||||
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> | ||||
|  Everyone is permitted to copy and distribute verbatim copies | ||||
|  of this license document, but changing it is not allowed. | ||||
|  | ||||
| 1. Definitions. | ||||
|  | ||||
| "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. | ||||
|   This version of the GNU Lesser General Public License incorporates | ||||
| the terms and conditions of version 3 of the GNU General Public | ||||
| License, supplemented by the additional permissions listed below. | ||||
|  | ||||
| "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. | ||||
|   0. Additional Definitions. | ||||
|  | ||||
| "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|   As used herein, "this License" refers to version 3 of the GNU Lesser | ||||
| General Public License, and the "GNU GPL" refers to version 3 of the GNU | ||||
| General Public License. | ||||
|  | ||||
| "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. | ||||
|   "The Library" refers to a covered work governed by this License, | ||||
| other than an Application or a Combined Work as defined below. | ||||
|  | ||||
| "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. | ||||
|   An "Application" is any work that makes use of an interface provided | ||||
| by the Library, but which is not otherwise based on the Library. | ||||
| Defining a subclass of a class defined by the Library is deemed a mode | ||||
| of using an interface provided by the Library. | ||||
|  | ||||
| "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. | ||||
|   A "Combined Work" is a work produced by combining or linking an | ||||
| Application with the Library.  The particular version of the Library | ||||
| with which the Combined Work was made is also called the "Linked | ||||
| Version". | ||||
|  | ||||
| "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). | ||||
|   The "Minimal Corresponding Source" for a Combined Work means the | ||||
| Corresponding Source for the Combined Work, excluding any source code | ||||
| for portions of the Combined Work that, considered in isolation, are | ||||
| based on the Application, and not on the Linked Version. | ||||
|  | ||||
| "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. | ||||
|   The "Corresponding Application Code" for a Combined Work means the | ||||
| object code and/or source code for the Application, including any data | ||||
| and utility programs needed for reproducing the Combined Work from the | ||||
| Application, but excluding the System Libraries of the Combined Work. | ||||
|  | ||||
| "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." | ||||
|   1. Exception to Section 3 of the GNU GPL. | ||||
|  | ||||
| "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. | ||||
|   You may convey a covered work under sections 3 and 4 of this License | ||||
| without being bound by section 3 of the GNU GPL. | ||||
|  | ||||
| 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. | ||||
|   2. Conveying Modified Versions. | ||||
|  | ||||
| 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. | ||||
|   If you modify a copy of the Library, and, in your modifications, a | ||||
| facility refers to a function or data to be supplied by an Application | ||||
| that uses the facility (other than as an argument passed when the | ||||
| facility is invoked), then you may convey a copy of the modified | ||||
| version: | ||||
|  | ||||
| 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: | ||||
|    a) under this License, provided that you make a good faith effort to | ||||
|    ensure that, in the event an Application does not supply the | ||||
|    function or data, the facility still operates, and performs | ||||
|    whatever part of its purpose remains meaningful, or | ||||
|  | ||||
|      (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and | ||||
|    b) under the GNU GPL, with none of the additional permissions of | ||||
|    this License applicable to that copy. | ||||
|  | ||||
|      (b) You must cause any modified files to carry prominent notices stating that You changed the files; and | ||||
|   3. Object Code Incorporating Material from Library Header Files. | ||||
|  | ||||
|      (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and | ||||
|   The object code form of an Application may incorporate material from | ||||
| a header file that is part of the Library.  You may convey such object | ||||
| code under terms of your choice, provided that, if the incorporated | ||||
| material is not limited to numerical parameters, data structure | ||||
| layouts and accessors, or small macros, inline functions and templates | ||||
| (ten or fewer lines in length), you do both of the following: | ||||
|  | ||||
|      (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. | ||||
|    a) Give prominent notice with each copy of the object code that the | ||||
|    Library is used in it and that the Library and its use are | ||||
|    covered by this License. | ||||
|  | ||||
|      You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. | ||||
|    b) Accompany the object code with a copy of the GNU GPL and this license | ||||
|    document. | ||||
|  | ||||
| 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. | ||||
|   4. Combined Works. | ||||
|  | ||||
| 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. | ||||
|   You may convey a Combined Work under terms of your choice that, | ||||
| taken together, effectively do not restrict modification of the | ||||
| portions of the Library contained in the Combined Work and reverse | ||||
| engineering for debugging such modifications, if you also do each of | ||||
| the following: | ||||
|  | ||||
| 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. | ||||
|    a) Give prominent notice with each copy of the Combined Work that | ||||
|    the Library is used in it and that the Library and its use are | ||||
|    covered by this License. | ||||
|  | ||||
| 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. | ||||
|    b) Accompany the Combined Work with a copy of the GNU GPL and this license | ||||
|    document. | ||||
|  | ||||
| 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. | ||||
|    c) For a Combined Work that displays copyright notices during | ||||
|    execution, include the copyright notice for the Library among | ||||
|    these notices, as well as a reference directing the user to the | ||||
|    copies of the GNU GPL and this license document. | ||||
|  | ||||
| END OF TERMS AND CONDITIONS | ||||
|    d) Do one of the following: | ||||
|  | ||||
| APPENDIX: How to apply the Apache License to your work. | ||||
|        0) Convey the Minimal Corresponding Source under the terms of this | ||||
|        License, and the Corresponding Application Code in a form | ||||
|        suitable for, and under terms that permit, the user to | ||||
|        recombine or relink the Application with a modified version of | ||||
|        the Linked Version to produce a modified Combined Work, in the | ||||
|        manner specified by section 6 of the GNU GPL for conveying | ||||
|        Corresponding Source. | ||||
|  | ||||
| To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!)  The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. | ||||
|        1) Use a suitable shared library mechanism for linking with the | ||||
|        Library.  A suitable mechanism is one that (a) uses at run time | ||||
|        a copy of the Library already present on the user's computer | ||||
|        system, and (b) will operate properly with a modified version | ||||
|        of the Library that is interface-compatible with the Linked | ||||
|        Version. | ||||
|  | ||||
| Copyright 2023 sunsijie | ||||
|    e) Provide Installation Information, but only if you would otherwise | ||||
|    be required to provide such information under section 6 of the | ||||
|    GNU GPL, and only to the extent that such information is | ||||
|    necessary to install and execute a modified version of the | ||||
|    Combined Work produced by recombining or relinking the | ||||
|    Application with a modified version of the Linked Version. (If | ||||
|    you use option 4d0, the Installation Information must accompany | ||||
|    the Minimal Corresponding Source and Corresponding Application | ||||
|    Code. If you use option 4d1, you must provide the Installation | ||||
|    Information in the manner specified by section 6 of the GNU GPL | ||||
|    for conveying Corresponding Source.) | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|   5. Combined Libraries. | ||||
|  | ||||
| http://www.apache.org/licenses/LICENSE-2.0 | ||||
|   You may place library facilities that are a work based on the | ||||
| Library side by side in a single library together with other library | ||||
| facilities that are not Applications and are not covered by this | ||||
| License, and convey such a combined library under terms of your | ||||
| choice, if you do both of the following: | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
|    a) Accompany the combined library with a copy of the same work based | ||||
|    on the Library, uncombined with any other library facilities, | ||||
|    conveyed under the terms of this License. | ||||
|  | ||||
|    b) Give prominent notice with the combined library that part of it | ||||
|    is a work based on the Library, and explaining where to find the | ||||
|    accompanying uncombined form of the same work. | ||||
|  | ||||
|   6. Revised Versions of the GNU Lesser General Public License. | ||||
|  | ||||
|   The Free Software Foundation may publish revised and/or new versions | ||||
| of the GNU Lesser General Public License from time to time. Such new | ||||
| versions will be similar in spirit to the present version, but may | ||||
| differ in detail to address new problems or concerns. | ||||
|  | ||||
|   Each version is given a distinguishing version number. If the | ||||
| Library as you received it specifies that a certain numbered version | ||||
| of the GNU Lesser General Public License "or any later version" | ||||
| applies to it, you have the option of following the terms and | ||||
| conditions either of that published version or of any later version | ||||
| published by the Free Software Foundation. If the Library as you | ||||
| received it does not specify a version number of the GNU Lesser | ||||
| General Public License, you may choose any version of the GNU Lesser | ||||
| General Public License ever published by the Free Software Foundation. | ||||
|  | ||||
|   If the Library as you received it specifies that a proxy can decide | ||||
| whether future versions of the GNU Lesser General Public License shall | ||||
| apply, that proxy's public statement of acceptance of any version is | ||||
| permanent authorization for you to choose that version for the | ||||
| Library. | ||||
|   | ||||
							
								
								
									
										35
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										35
									
								
								README.md
									
									
									
									
									
								
							| @@ -1,11 +1,13 @@ | ||||
| # EasyTier | ||||
|  | ||||
| [](https://github.com/EasyTier/EasyTier/releases) | ||||
| [](https://github.com/EasyTier/EasyTier/blob/main/LICENSE) | ||||
| [](https://github.com/EasyTier/EasyTier/commits/main) | ||||
| [](https://github.com/EasyTier/EasyTier/issues) | ||||
| [](https://github.com/EasyTier/EasyTier/actions/workflows/core.yml) | ||||
| [](https://github.com/EasyTier/EasyTier/actions/workflows/gui.yml) | ||||
| [](https://github.com/EasyTier/EasyTier/actions/workflows/test.yml) | ||||
| [](https://deepwiki.com/EasyTier/EasyTier) | ||||
|  | ||||
| [简体中文](/README_CN.md) | [English](/README.md) | ||||
|  | ||||
| @@ -62,13 +64,42 @@ EasyTier is a simple, safe and decentralized VPN networking solution implemented | ||||
|     wget -O /tmp/easytier.sh "https://raw.githubusercontent.com/EasyTier/EasyTier/main/script/install.sh" && bash /tmp/easytier.sh install | ||||
|     ``` | ||||
|  | ||||
|     You can also uninstall/update Easytier by the command "uninstall" or "update" of this script | ||||
|     The script supports the following commands and options: | ||||
|  | ||||
|     Commands: | ||||
|     - `install`: Install EasyTier | ||||
|     - `uninstall`: Uninstall EasyTier | ||||
|     - `update`: Update EasyTier to the latest version | ||||
|     - `help`: Show help message | ||||
|  | ||||
|     Options: | ||||
|     - `--skip-folder-verify`: Skip folder verification during installation | ||||
|     - `--skip-folder-fix`: Skip automatic folder path fixing | ||||
|     - `--no-gh-proxy`: Disable GitHub proxy | ||||
|     - `--gh-proxy`: Set custom GitHub proxy URL (default: https://ghfast.top/) | ||||
|  | ||||
|     Examples: | ||||
|     ```sh | ||||
|     # Show help | ||||
|     bash /tmp/easytier.sh help | ||||
|  | ||||
|     # Install with options | ||||
|     bash /tmp/easytier.sh install --skip-folder-verify | ||||
|     bash /tmp/easytier.sh install --no-gh-proxy | ||||
|     bash /tmp/easytier.sh install --gh-proxy https://your-proxy.com/ | ||||
|  | ||||
|     # Update EasyTier | ||||
|     bash /tmp/easytier.sh update | ||||
|  | ||||
|     # Uninstall EasyTier | ||||
|     bash /tmp/easytier.sh uninstall | ||||
|     ``` | ||||
|  | ||||
| 6. **Install by Homebrew (For MacOS Only)** | ||||
|  | ||||
|     ```sh | ||||
|     brew tap brewforge/chinese | ||||
|     brew install --cask easytier | ||||
|     brew install --cask easytier-gui | ||||
|     ``` | ||||
|  | ||||
| ## Quick Start | ||||
|   | ||||
							
								
								
									
										33
									
								
								README_CN.md
									
									
									
									
									
								
							
							
						
						
									
										33
									
								
								README_CN.md
									
									
									
									
									
								
							| @@ -61,13 +61,42 @@ | ||||
|     wget -O /tmp/easytier.sh "https://raw.githubusercontent.com/EasyTier/EasyTier/main/script/install.sh" && bash /tmp/easytier.sh install | ||||
|     ``` | ||||
|  | ||||
|     使用本脚本安装的 Easytier 可以使用脚本的 uninstall/update 对其卸载/升级 | ||||
|     脚本支持以下命令和选项: | ||||
|  | ||||
|     命令: | ||||
|     - `install`: 安装 EasyTier | ||||
|     - `uninstall`: 卸载 EasyTier | ||||
|     - `update`: 更新 EasyTier 到最新版本 | ||||
|     - `help`: 显示帮助信息 | ||||
|  | ||||
|     选项: | ||||
|     - `--skip-folder-verify`: 跳过安装过程中的文件夹验证 | ||||
|     - `--skip-folder-fix`: 跳过自动修复文件夹路径 | ||||
|     - `--no-gh-proxy`: 禁用 GitHub 代理 | ||||
|     - `--gh-proxy`: 设置自定义 GitHub 代理 URL (默认值: https://ghfast.top/) | ||||
|  | ||||
|     示例: | ||||
|     ```sh | ||||
|     # 查看帮助 | ||||
|     bash /tmp/easytier.sh help | ||||
|  | ||||
|     # 安装(带选项) | ||||
|     bash /tmp/easytier.sh install --skip-folder-verify | ||||
|     bash /tmp/easytier.sh install --no-gh-proxy | ||||
|     bash /tmp/easytier.sh install --gh-proxy https://your-proxy.com/ | ||||
|  | ||||
|     # 更新 EasyTier | ||||
|     bash /tmp/easytier.sh update | ||||
|  | ||||
|     # 卸载 EasyTier | ||||
|     bash /tmp/easytier.sh uninstall | ||||
|     ``` | ||||
|  | ||||
| 6. **使用 Homebrew 安装 (仅适用于 MacOS)** | ||||
|  | ||||
|     ```sh | ||||
|     brew tap brewforge/chinese | ||||
|     brew install --cask easytier | ||||
|     brew install --cask easytier-gui | ||||
|     ``` | ||||
|  | ||||
| ## 快速开始 | ||||
|   | ||||
							
								
								
									
										17
									
								
								easytier-contrib/easytier-ffi/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								easytier-contrib/easytier-ffi/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,17 @@ | ||||
| [package] | ||||
| name = "easytier-ffi" | ||||
| version = "0.1.0" | ||||
| edition = "2021" | ||||
|  | ||||
| [lib] | ||||
| crate-type = ["cdylib"] | ||||
|  | ||||
| [dependencies] | ||||
| easytier = { path = "../../easytier" } | ||||
|  | ||||
| once_cell = "1.18.0" | ||||
| dashmap = "6.0" | ||||
|  | ||||
| serde = { version = "1.0", features = ["derive"] } | ||||
| serde_json = "1" | ||||
| uuid = "1.17.0" | ||||
							
								
								
									
										159
									
								
								easytier-contrib/easytier-ffi/examples/csharp.cs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										159
									
								
								easytier-contrib/easytier-ffi/examples/csharp.cs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,159 @@ | ||||
| public class EasyTierFFI | ||||
| { | ||||
|     // 导入 DLL 函数 | ||||
|     private const string DllName = "easytier_ffi.dll"; | ||||
|  | ||||
|     [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] | ||||
|     private static extern int parse_config([MarshalAs(UnmanagedType.LPStr)] string cfgStr); | ||||
|  | ||||
|     [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] | ||||
|     private static extern int run_network_instance([MarshalAs(UnmanagedType.LPStr)] string cfgStr); | ||||
|  | ||||
|     [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] | ||||
|     private static extern int retain_network_instance(IntPtr instNames, int length); | ||||
|  | ||||
|     [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] | ||||
|     private static extern int collect_network_infos(IntPtr infos, int maxLength); | ||||
|  | ||||
|     [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] | ||||
|     private static extern void get_error_msg(out IntPtr errorMsg); | ||||
|  | ||||
|     [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)] | ||||
|     private static extern void free_string(IntPtr str); | ||||
|  | ||||
|     // 定义 KeyValuePair 结构体 | ||||
|     [StructLayout(LayoutKind.Sequential)] | ||||
|     public struct KeyValuePair | ||||
|     { | ||||
|         public IntPtr Key; | ||||
|         public IntPtr Value; | ||||
|     } | ||||
|  | ||||
|     // 解析配置 | ||||
|     public static void ParseConfig(string config) | ||||
|     { | ||||
|         if (string.IsNullOrEmpty(config)) | ||||
|         { | ||||
|             throw new ArgumentException("Configuration string cannot be null or empty."); | ||||
|         } | ||||
|  | ||||
|         int result = parse_config(config); | ||||
|         if (result < 0) | ||||
|         { | ||||
|             throw new Exception(GetErrorMessage()); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // 启动网络实例 | ||||
|     public static void RunNetworkInstance(string config) | ||||
|     { | ||||
|         if (string.IsNullOrEmpty(config)) | ||||
|         { | ||||
|             throw new ArgumentException("Configuration string cannot be null or empty."); | ||||
|         } | ||||
|  | ||||
|         int result = run_network_instance(config); | ||||
|         if (result < 0) | ||||
|         { | ||||
|             throw new Exception(GetErrorMessage()); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // 保留网络实例 | ||||
|     public static void RetainNetworkInstances(string[] instanceNames) | ||||
|     { | ||||
|         IntPtr[] namePointers = null; | ||||
|         IntPtr namesPtr = IntPtr.Zero; | ||||
|  | ||||
|         try | ||||
|         { | ||||
|             if (instanceNames != null && instanceNames.Length > 0) | ||||
|             { | ||||
|                 namePointers = new IntPtr[instanceNames.Length]; | ||||
|                 for (int i = 0; i < instanceNames.Length; i++) | ||||
|                 { | ||||
|                     if (string.IsNullOrEmpty(instanceNames[i])) | ||||
|                     { | ||||
|                         throw new ArgumentException("Instance name cannot be null or empty."); | ||||
|                     } | ||||
|                     namePointers[i] = Marshal.StringToHGlobalAnsi(instanceNames[i]); | ||||
|                 } | ||||
|  | ||||
|                 namesPtr = Marshal.AllocHGlobal(Marshal.SizeOf<IntPtr>() * namePointers.Length); | ||||
|                 Marshal.Copy(namePointers, 0, namesPtr, namePointers.Length); | ||||
|             } | ||||
|  | ||||
|             int result = retain_network_instance(namesPtr, instanceNames?.Length ?? 0); | ||||
|             if (result < 0) | ||||
|             { | ||||
|                 throw new Exception(GetErrorMessage()); | ||||
|             } | ||||
|         } | ||||
|         finally | ||||
|         { | ||||
|             if (namePointers != null) | ||||
|             { | ||||
|                 foreach (var ptr in namePointers) | ||||
|                 { | ||||
|                     if (ptr != IntPtr.Zero) | ||||
|                     { | ||||
|                         Marshal.FreeHGlobal(ptr); | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|  | ||||
|             if (namesPtr != IntPtr.Zero) | ||||
|             { | ||||
|                 Marshal.FreeHGlobal(namesPtr); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // 收集网络信息 | ||||
|     public static KeyValuePair<string, string>[] CollectNetworkInfos(int maxLength) | ||||
|     { | ||||
|         IntPtr buffer = Marshal.AllocHGlobal(Marshal.SizeOf<KeyValuePair>() * maxLength); | ||||
|         try | ||||
|         { | ||||
|             int count = collect_network_infos(buffer, maxLength); | ||||
|             if (count < 0) | ||||
|             { | ||||
|                 throw new Exception(GetErrorMessage()); | ||||
|             } | ||||
|  | ||||
|             var result = new KeyValuePair<string, string>[count]; | ||||
|             for (int i = 0; i < count; i++) | ||||
|             { | ||||
|                 var kv = Marshal.PtrToStructure<KeyValuePair>(buffer + i * Marshal.SizeOf<KeyValuePair>()); | ||||
|                 string key = Marshal.PtrToStringAnsi(kv.Key); | ||||
|                 string value = Marshal.PtrToStringAnsi(kv.Value); | ||||
|  | ||||
|                 // 释放由 FFI 分配的字符串内存 | ||||
|                 free_string(kv.Key); | ||||
|                 free_string(kv.Value); | ||||
|  | ||||
|                 result[i] = new KeyValuePair<string, string>(key, value); | ||||
|             } | ||||
|  | ||||
|             return result; | ||||
|         } | ||||
|         finally | ||||
|         { | ||||
|             Marshal.FreeHGlobal(buffer); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // 获取错误信息 | ||||
|     private static string GetErrorMessage() | ||||
|     { | ||||
|         get_error_msg(out IntPtr errorMsgPtr); | ||||
|         if (errorMsgPtr == IntPtr.Zero) | ||||
|         { | ||||
|             return "Unknown error"; | ||||
|         } | ||||
|  | ||||
|         string errorMsg = Marshal.PtrToStringAnsi(errorMsgPtr); | ||||
|         free_string(errorMsgPtr); // 释放错误信息字符串 | ||||
|         return errorMsg; | ||||
|     } | ||||
| } | ||||
							
								
								
									
										224
									
								
								easytier-contrib/easytier-ffi/src/lib.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										224
									
								
								easytier-contrib/easytier-ffi/src/lib.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,224 @@ | ||||
| use std::sync::Mutex; | ||||
|  | ||||
| use dashmap::DashMap; | ||||
| use easytier::{ | ||||
|     common::config::{ConfigLoader as _, TomlConfigLoader}, | ||||
|     instance_manager::NetworkInstanceManager, | ||||
|     launcher::ConfigSource, | ||||
| }; | ||||
|  | ||||
| static INSTANCE_NAME_ID_MAP: once_cell::sync::Lazy<DashMap<String, uuid::Uuid>> = | ||||
|     once_cell::sync::Lazy::new(DashMap::new); | ||||
| static INSTANCE_MANAGER: once_cell::sync::Lazy<NetworkInstanceManager> = | ||||
|     once_cell::sync::Lazy::new(NetworkInstanceManager::new); | ||||
|  | ||||
| static ERROR_MSG: once_cell::sync::Lazy<Mutex<Vec<u8>>> = | ||||
|     once_cell::sync::Lazy::new(|| Mutex::new(Vec::new())); | ||||
|  | ||||
| #[repr(C)] | ||||
| pub struct KeyValuePair { | ||||
|     pub key: *const std::ffi::c_char, | ||||
|     pub value: *const std::ffi::c_char, | ||||
| } | ||||
|  | ||||
| fn set_error_msg(msg: &str) { | ||||
|     let bytes = msg.as_bytes(); | ||||
|     let mut msg_buf = ERROR_MSG.lock().unwrap(); | ||||
|     let len = bytes.len(); | ||||
|     msg_buf.resize(len, 0); | ||||
|     msg_buf[..len].copy_from_slice(bytes); | ||||
| } | ||||
|  | ||||
| #[no_mangle] | ||||
| pub extern "C" fn get_error_msg(out: *mut *const std::ffi::c_char) { | ||||
|     let msg_buf = ERROR_MSG.lock().unwrap(); | ||||
|     if msg_buf.is_empty() { | ||||
|         unsafe { | ||||
|             *out = std::ptr::null(); | ||||
|         } | ||||
|         return; | ||||
|     } | ||||
|     let cstr = std::ffi::CString::new(&msg_buf[..]).unwrap(); | ||||
|     unsafe { | ||||
|         *out = cstr.into_raw(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[no_mangle] | ||||
| pub extern "C" fn free_string(s: *const std::ffi::c_char) { | ||||
|     if s.is_null() { | ||||
|         return; | ||||
|     } | ||||
|     unsafe { | ||||
|         let _ = std::ffi::CString::from_raw(s as *mut std::ffi::c_char); | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[no_mangle] | ||||
| pub extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int { | ||||
|     let cfg_str = unsafe { | ||||
|         assert!(!cfg_str.is_null()); | ||||
|         std::ffi::CStr::from_ptr(cfg_str) | ||||
|             .to_string_lossy() | ||||
|             .into_owned() | ||||
|     }; | ||||
|  | ||||
|     if let Err(e) = TomlConfigLoader::new_from_str(&cfg_str) { | ||||
|         set_error_msg(&format!("failed to parse config: {:?}", e)); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     0 | ||||
| } | ||||
|  | ||||
| #[no_mangle] | ||||
| pub extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int { | ||||
|     let cfg_str = unsafe { | ||||
|         assert!(!cfg_str.is_null()); | ||||
|         std::ffi::CStr::from_ptr(cfg_str) | ||||
|             .to_string_lossy() | ||||
|             .into_owned() | ||||
|     }; | ||||
|     let cfg = match TomlConfigLoader::new_from_str(&cfg_str) { | ||||
|         Ok(cfg) => cfg, | ||||
|         Err(e) => { | ||||
|             set_error_msg(&format!("failed to parse config: {}", e)); | ||||
|             return -1; | ||||
|         } | ||||
|     }; | ||||
|  | ||||
|     let inst_name = cfg.get_inst_name(); | ||||
|  | ||||
|     if INSTANCE_NAME_ID_MAP.contains_key(&inst_name) { | ||||
|         set_error_msg("instance already exists"); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     let instance_id = match INSTANCE_MANAGER.run_network_instance(cfg, ConfigSource::FFI) { | ||||
|         Ok(id) => id, | ||||
|         Err(e) => { | ||||
|             set_error_msg(&format!("failed to start instance: {}", e)); | ||||
|             return -1; | ||||
|         } | ||||
|     }; | ||||
|  | ||||
|     INSTANCE_NAME_ID_MAP.insert(inst_name, instance_id); | ||||
|  | ||||
|     0 | ||||
| } | ||||
|  | ||||
| #[no_mangle] | ||||
| pub extern "C" fn retain_network_instance( | ||||
|     inst_names: *const *const std::ffi::c_char, | ||||
|     length: usize, | ||||
| ) -> std::ffi::c_int { | ||||
|     if length == 0 { | ||||
|         if let Err(e) = INSTANCE_MANAGER.retain_network_instance(Vec::new()) { | ||||
|             set_error_msg(&format!("failed to retain instances: {}", e)); | ||||
|             return -1; | ||||
|         } | ||||
|         INSTANCE_NAME_ID_MAP.clear(); | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
|     let inst_names = unsafe { | ||||
|         assert!(!inst_names.is_null()); | ||||
|         std::slice::from_raw_parts(inst_names, length) | ||||
|             .iter() | ||||
|             .map(|&name| { | ||||
|                 assert!(!name.is_null()); | ||||
|                 std::ffi::CStr::from_ptr(name) | ||||
|                     .to_string_lossy() | ||||
|                     .into_owned() | ||||
|             }) | ||||
|             .collect::<Vec<_>>() | ||||
|     }; | ||||
|  | ||||
|     let inst_ids: Vec<uuid::Uuid> = inst_names | ||||
|         .iter() | ||||
|         .filter_map(|name| INSTANCE_NAME_ID_MAP.get(name).map(|id| *id)) | ||||
|         .collect(); | ||||
|  | ||||
|     if let Err(e) = INSTANCE_MANAGER.retain_network_instance(inst_ids) { | ||||
|         set_error_msg(&format!("failed to retain instances: {}", e)); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     let _ = INSTANCE_NAME_ID_MAP.retain(|k, _| inst_names.contains(k)); | ||||
|  | ||||
|     0 | ||||
| } | ||||
|  | ||||
| #[no_mangle] | ||||
| pub extern "C" fn collect_network_infos( | ||||
|     infos: *mut KeyValuePair, | ||||
|     max_length: usize, | ||||
| ) -> std::ffi::c_int { | ||||
|     if max_length == 0 { | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
|     let infos = unsafe { | ||||
|         assert!(!infos.is_null()); | ||||
|         std::slice::from_raw_parts_mut(infos, max_length) | ||||
|     }; | ||||
|  | ||||
|     let collected_infos = match INSTANCE_MANAGER.collect_network_infos() { | ||||
|         Ok(infos) => infos, | ||||
|         Err(e) => { | ||||
|             set_error_msg(&format!("failed to collect network infos: {}", e)); | ||||
|             return -1; | ||||
|         } | ||||
|     }; | ||||
|  | ||||
|     let mut index = 0; | ||||
|     for (instance_id, value) in collected_infos.iter() { | ||||
|         if index >= max_length { | ||||
|             break; | ||||
|         } | ||||
|         let Some(key) = INSTANCE_MANAGER.get_network_instance_name(instance_id) else { | ||||
|             continue; | ||||
|         }; | ||||
|         // convert value to json string | ||||
|         let value = match serde_json::to_string(&value) { | ||||
|             Ok(value) => value, | ||||
|             Err(e) => { | ||||
|                 set_error_msg(&format!("failed to serialize instance info: {}", e)); | ||||
|                 return -1; | ||||
|             } | ||||
|         }; | ||||
|  | ||||
|         infos[index] = KeyValuePair { | ||||
|             key: std::ffi::CString::new(key.clone()).unwrap().into_raw(), | ||||
|             value: std::ffi::CString::new(value).unwrap().into_raw(), | ||||
|         }; | ||||
|         index += 1; | ||||
|     } | ||||
|  | ||||
|     index as std::ffi::c_int | ||||
| } | ||||
|  | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|  | ||||
|     #[test] | ||||
|     fn test_parse_config() { | ||||
|         let cfg_str = r#" | ||||
|             inst_name = "test" | ||||
|             network = "test_network" | ||||
|         "#; | ||||
|         let cstr = std::ffi::CString::new(cfg_str).unwrap(); | ||||
|         assert_eq!(parse_config(cstr.as_ptr()), 0); | ||||
|     } | ||||
|  | ||||
|     #[test] | ||||
|     fn test_run_network_instance() { | ||||
|         let cfg_str = r#" | ||||
|             inst_name = "test" | ||||
|             network = "test_network" | ||||
|         "#; | ||||
|         let cstr = std::ffi::CString::new(cfg_str).unwrap(); | ||||
|         assert_eq!(run_network_instance(cstr.as_ptr()), 0); | ||||
|     } | ||||
| } | ||||
| @@ -0,0 +1,33 @@ | ||||
| #!/sbin/sh | ||||
|  | ||||
| ################# | ||||
| # Initialization | ||||
| ################# | ||||
|  | ||||
| umask 022 | ||||
|  | ||||
| # echo before loading util_functions | ||||
| ui_print() { echo "$1"; } | ||||
|  | ||||
| require_new_magisk() { | ||||
|   ui_print "********************************" | ||||
|   ui_print " Please install Magisk v20.4+! " | ||||
|   ui_print "********************************" | ||||
|   exit 1 | ||||
| } | ||||
|  | ||||
| ######################### | ||||
| # Load util_functions.sh | ||||
| ######################### | ||||
|  | ||||
| OUTFD=$2 | ||||
| ZIPFILE=$3 | ||||
|  | ||||
| mount /data 2>/dev/null | ||||
|  | ||||
| [ -f /data/adb/magisk/util_functions.sh ] || require_new_magisk | ||||
| . /data/adb/magisk/util_functions.sh | ||||
| [ $MAGISK_VER_CODE -lt 20400 ] && require_new_magisk | ||||
|  | ||||
| install_module | ||||
| exit 0 | ||||
| @@ -0,0 +1 @@ | ||||
| #MAGISK | ||||
							
								
								
									
										6
									
								
								easytier-contrib/easytier-magisk/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								easytier-contrib/easytier-magisk/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,6 @@ | ||||
| # easytier_magisk版模块 | ||||
| magisk安装后重启 | ||||
|  | ||||
| 目录位置:/data/adb/modules/easytier_magisk | ||||
| 配置文件位置://data/adb/modules/easytier_magisk/config/config.toml | ||||
| 修改config.conf即可,修改后配置文件后去magisk app重新开关模块即可生效 | ||||
							
								
								
									
										14
									
								
								easytier-contrib/easytier-magisk/action.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								easytier-contrib/easytier-magisk/action.sh
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,14 @@ | ||||
| #!/data/adb/magisk/busybox sh | ||||
| MODDIR=${0%/*} | ||||
|  | ||||
| # 查找 easytier-core 进程的 PID | ||||
| PID=$(pgrep easytier-core) | ||||
|  | ||||
| # 检查是否找到了进程 | ||||
| if [ -z "$PID" ]; then | ||||
|     echo "easytier-core 进程未找到" | ||||
| else | ||||
|     # 结束进程 | ||||
|     kill $PID | ||||
|     echo "已结束 easytier-core 进程 (PID: $PID)" | ||||
| fi | ||||
							
								
								
									
										25
									
								
								easytier-contrib/easytier-magisk/build.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								easytier-contrib/easytier-magisk/build.sh
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| version=$(cat module.prop | grep 'version=' | awk -F '=' '{print $2}' | sed 's/ (.*//') | ||||
|  | ||||
| version='v'$(grep '^version =' ../../easytier/Cargo.toml | cut -d '"' -f 2) | ||||
|  | ||||
| if [ -z "$version" ]; then | ||||
|     echo "Error: 版本号不存在." | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| filename="easytier_magisk_${version}.zip" | ||||
| echo $version   | ||||
|  | ||||
|  | ||||
| if [ -f "./easytier-core" ] && [ -f "./easytier-cli" ] && [ -f "./easytier-web" ]; then | ||||
|     zip -r -o -X "$filename" ./ -x '.git/*' -x '.github/*' -x 'folder/*' -x 'build.sh' -x 'magisk_update.json' | ||||
| else | ||||
|     wget -O "easytier_last.zip" https://github.com/EasyTier/EasyTier/releases/download/"$version"/easytier-linux-aarch64-"$version".zip | ||||
|     unzip -o easytier_last.zip -d ./ | ||||
|     mv ./easytier-linux-aarch64/* ./ | ||||
|     rm -rf ./easytier_last.zip | ||||
|     rm -rf ./easytier-linux-aarch64 | ||||
|     zip -r -o -X "$filename" ./ -x '.git/*' -x '.github/*' -x 'folder/*' -x 'build.sh' -x 'magisk_update.json' | ||||
| fi | ||||
							
								
								
									
										37
									
								
								easytier-contrib/easytier-magisk/config/config.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								easytier-contrib/easytier-magisk/config/config.toml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| instance_name = "default" | ||||
| dhcp = false | ||||
| #ipv4="本机ip" | ||||
| listeners = [ | ||||
|     "tcp://0.0.0.0:11010", | ||||
|     "udp://0.0.0.0:11010", | ||||
|     "wg://0.0.0.0:11011", | ||||
|     "ws://0.0.0.0:11011/", | ||||
|     "wss://0.0.0.0:11012/", | ||||
| ] | ||||
| mapped_listeners = [] | ||||
| exit_nodes = [] | ||||
| rpc_portal = "0.0.0.0:15888" | ||||
|  | ||||
| [network_identity] | ||||
| network_name = "default" | ||||
| network_secret = "" | ||||
|  | ||||
| [[peer]] | ||||
| #uri = "协议://中转ip:端口" | ||||
|  | ||||
| [flags] | ||||
| default_protocol = "tcp" | ||||
| dev_name = "" | ||||
| enable_encryption = true | ||||
| enable_ipv6 = true | ||||
| mtu = 1380 | ||||
| latency_first = false | ||||
| enable_exit_node = false | ||||
| no_tun = false | ||||
| use_smoltcp = false | ||||
| foreign_network_whitelist = "*" | ||||
| disable_p2p = false | ||||
| relay_all_peer_rpc = false | ||||
| disable_udp_hole_punching = false | ||||
|  | ||||
|  | ||||
							
								
								
									
										7
									
								
								easytier-contrib/easytier-magisk/customize.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								easytier-contrib/easytier-magisk/customize.sh
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,7 @@ | ||||
| ui_print '安装完成' | ||||
| ui_print '当前架构为' + $ARCH | ||||
| ui_print '当前系统版本为' + $API | ||||
| ui_print '安装目录为:  /data/adb/modules/easytier_magisk' | ||||
| ui_print '配置文件位置:  /data/adb/modules/easytier_magisk/config/config.toml' | ||||
| ui_print '修改后配置文件后在magisk app点击操作按钮即可生效' | ||||
| ui_print '记得重启' | ||||
							
								
								
									
										48
									
								
								easytier-contrib/easytier-magisk/easytier_core.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										48
									
								
								easytier-contrib/easytier-magisk/easytier_core.sh
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,48 @@ | ||||
| #!/system/bin/sh | ||||
|  | ||||
| MODDIR=${0%/*} | ||||
| CONFIG_FILE="${MODDIR}/config/config.toml" | ||||
| LOG_FILE="${MODDIR}/log.log" | ||||
| MODULE_PROP="${MODDIR}/module.prop" | ||||
| EASYTIER="${MODDIR}/easytier-core" | ||||
|  | ||||
| # 更新module.prop文件中的description | ||||
| update_module_description() { | ||||
|     local status_message=$1 | ||||
|     sed -i "/^description=/c\description=[状态]${status_message}" ${MODULE_PROP} | ||||
| } | ||||
|  | ||||
| if [ ! -e /dev/net/tun ]; then | ||||
|     if [ ! -d /dev/net ]; then | ||||
|         mkdir -p /dev/net | ||||
|     fi | ||||
|  | ||||
|     ln -s /dev/tun /dev/net/tun | ||||
| fi | ||||
|  | ||||
| while true; do | ||||
|     if ls $MODDIR | grep -q "disable"; then | ||||
|         update_module_description "关闭中" | ||||
|         if pgrep -f 'easytier-core' >/dev/null; then | ||||
|             echo "开关控制$(date "+%Y-%m-%d %H:%M:%S") 进程已存在,正在关闭 ..." | ||||
|             pkill easytier-core # 关闭进程 | ||||
|         fi | ||||
|     else | ||||
|         if ! pgrep -f 'easytier-core' >/dev/null; then | ||||
|             if [ ! -f "$CONFIG_FILE" ]; then | ||||
|                 update_module_description "config.toml不存在" | ||||
|                 sleep 3s | ||||
|                 continue | ||||
|             fi | ||||
|  | ||||
|             TZ=Asia/Shanghai ${EASYTIER} -c ${CONFIG_FILE} > ${LOG_FILE} & | ||||
|             sleep 5s # 等待easytier-core启动完成 | ||||
|             update_module_description "已开启(不一定运行成功)" | ||||
|             ip rule add from all lookup main | ||||
|         else | ||||
|             echo "开关控制$(date "+%Y-%m-%d %H:%M:%S") 进程已存在" | ||||
|         fi | ||||
|     fi | ||||
|      | ||||
|     sleep 3s # 暂停3秒后再次执行循环 | ||||
| done | ||||
							
								
								
									
										6
									
								
								easytier-contrib/easytier-magisk/magisk_update.json
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								easytier-contrib/easytier-magisk/magisk_update.json
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,6 @@ | ||||
| { | ||||
|     "version": "v1.0", | ||||
|     "versionCode": 1, | ||||
|     "zipUrl": "", | ||||
|     "changelog": "" | ||||
| } | ||||
							
								
								
									
										7
									
								
								easytier-contrib/easytier-magisk/module.prop
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								easytier-contrib/easytier-magisk/module.prop
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,7 @@ | ||||
| id=easytier_magisk | ||||
| name=EasyTier_Magisk | ||||
| version=v2.3.2 | ||||
| versionCode=1 | ||||
| author=EasyTier | ||||
| description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier) | ||||
| updateJson=https://raw.githubusercontent.com/EasyTier/EasyTier/refs/heads/main/easytier-contrib/easytier-magisk/magisk_update.json | ||||
							
								
								
									
										27
									
								
								easytier-contrib/easytier-magisk/service.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								easytier-contrib/easytier-magisk/service.sh
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| #!/data/adb/magisk/busybox sh | ||||
| MODDIR=${0%/*} | ||||
| # MODDIR="$(dirname $(readlink -f "$0"))" | ||||
| chmod 755 ${MODDIR}/* | ||||
|  | ||||
| # 等待系统启动成功 | ||||
| while [ "$(getprop sys.boot_completed)" != "1" ]; do | ||||
|   sleep 5s | ||||
| done | ||||
|  | ||||
| # 防止系统挂起 | ||||
| echo "PowerManagerService.noSuspend" > /sys/power/wake_lock | ||||
|  | ||||
| # 修改模块描述 | ||||
| sed -i 's/$(description=)$[^"]*/\1[状态]关闭中/' "$MODDIR/module.prop" | ||||
|  | ||||
| # 等待 3 秒 | ||||
| sleep 3s | ||||
|  | ||||
| "${MODDIR}/easytier_core.sh" & | ||||
|  | ||||
| # 检查是否启用模块 | ||||
| while [ ! -f ${MODDIR}/disable ]; do  | ||||
|     sleep 2 | ||||
| done | ||||
|  | ||||
| pkill easytier-core | ||||
							
								
								
									
										2
									
								
								easytier-contrib/easytier-magisk/system/etc/resolv.conf
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								easytier-contrib/easytier-magisk/system/etc/resolv.conf
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,2 @@ | ||||
| nameserver 114.114.114.114 | ||||
| nameserver 223.5.5.5 | ||||
							
								
								
									
										3
									
								
								easytier-contrib/easytier-magisk/uninstall.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								easytier-contrib/easytier-magisk/uninstall.sh
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| MODDIR=${0%/*} | ||||
| pkill easytier-core # 结束 easytier-core 进程 | ||||
| rm -rf $MODDIR/* | ||||
| @@ -18,7 +18,11 @@ cd ../tauri-plugin-vpnservice | ||||
| pnpm install | ||||
| pnpm build | ||||
|  | ||||
| cd ../easytier-gui | ||||
| cd ../easytier-web/frontend-lib | ||||
| pnpm install | ||||
| pnpm build | ||||
|  | ||||
| cd ../../easytier-gui | ||||
| pnpm install | ||||
| pnpm tauri build | ||||
| ``` | ||||
|   | ||||
| @@ -50,7 +50,11 @@ dev_name_placeholder: 注意:当多个网络同时使用相同的TUN接口名 | ||||
| off_text: 点击关闭 | ||||
| on_text: 点击开启 | ||||
| show_config: 显示配置 | ||||
| edit_config: 编辑配置文件 | ||||
| close: 关闭 | ||||
| save: 保存 | ||||
| config_saved: 配置已保存 | ||||
|  | ||||
|  | ||||
| use_latency_first: 延迟优先模式 | ||||
| my_node_info: 当前节点信息 | ||||
| @@ -113,3 +117,4 @@ event: | ||||
|   VpnPortalClientDisconnected: VPN门户客户端已断开连接 | ||||
|   DhcpIpv4Changed: DHCP IPv4地址更改 | ||||
|   DhcpIpv4Conflicted: DHCP IPv4地址冲突 | ||||
|   PortForwardAdded: 端口转发添加 | ||||
|   | ||||
| @@ -51,7 +51,10 @@ dev_name_placeholder: 'Note: When multiple networks use the same TUN interface n | ||||
| off_text: Press to disable | ||||
| on_text: Press to enable | ||||
| show_config: Show Config | ||||
| edit_config: Edit Config File | ||||
| close: Close | ||||
| save: Save | ||||
| config_saved: Configuration saved | ||||
| my_node_info: My Node Info | ||||
| peer_count: Connected | ||||
| upload: Upload | ||||
| @@ -112,3 +115,4 @@ event: | ||||
|   VpnPortalClientDisconnected: VpnPortalClientDisconnected | ||||
|   DhcpIpv4Changed: DhcpIpv4Changed | ||||
|   DhcpIpv4Conflicted: DhcpIpv4Conflicted | ||||
|   PortForwardAdded: PortForwardAdded | ||||
|   | ||||
| @@ -1,7 +1,7 @@ | ||||
| { | ||||
|   "name": "easytier-gui", | ||||
|   "type": "module", | ||||
|   "version": "2.2.4", | ||||
|   "version": "2.3.2", | ||||
|   "private": true, | ||||
|   "packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4", | ||||
|   "scripts": { | ||||
| @@ -13,7 +13,7 @@ | ||||
|     "lint:fix": "eslint . --ignore-pattern src-tauri --fix" | ||||
|   }, | ||||
|   "dependencies": { | ||||
|     "@primevue/themes": "^4.2.1", | ||||
|     "@primevue/themes": "4.3.3", | ||||
|     "@tauri-apps/plugin-autostart": "2.0.0", | ||||
|     "@tauri-apps/plugin-clipboard-manager": "2.0.0", | ||||
|     "@tauri-apps/plugin-os": "2.0.0", | ||||
| @@ -24,7 +24,7 @@ | ||||
|     "easytier-frontend-lib": "workspace:*", | ||||
|     "ip-num": "1.5.1", | ||||
|     "pinia": "^2.2.4", | ||||
|     "primevue": "^4.2.1", | ||||
|     "primevue": "4.3.3", | ||||
|     "tauri-plugin-vpnservice-api": "workspace:*", | ||||
|     "vue": "^3.5.12", | ||||
|     "vue-router": "^4.4.5" | ||||
| @@ -32,7 +32,7 @@ | ||||
|   "devDependencies": { | ||||
|     "@antfu/eslint-config": "^3.7.3", | ||||
|     "@intlify/unplugin-vue-i18n": "^5.2.0", | ||||
|     "@primevue/auto-import-resolver": "^4.1.0", | ||||
|     "@primevue/auto-import-resolver": "4.3.3", | ||||
|     "@tauri-apps/api": "2.1.0", | ||||
|     "@tauri-apps/cli": "2.1.0", | ||||
|     "@types/default-gateway": "^7.2.2", | ||||
|   | ||||
| @@ -1,6 +1,6 @@ | ||||
| [package] | ||||
| name = "easytier-gui" | ||||
| version = "2.2.4" | ||||
| version = "2.3.2" | ||||
| description = "EasyTier GUI" | ||||
| authors = ["you"] | ||||
| edition = "2021" | ||||
| @@ -14,6 +14,13 @@ crate-type = ["staticlib", "cdylib", "rlib"] | ||||
| [build-dependencies] | ||||
| tauri-build = { version = "2.0.0-rc", features = [] } | ||||
|  | ||||
| # enable thunk-rs when compiling for x86_64 or i686 windows | ||||
| [target.x86_64-pc-windows-msvc.build-dependencies] | ||||
| thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] } | ||||
|  | ||||
| [target.i686-pc-windows-msvc.build-dependencies] | ||||
| thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] } | ||||
|  | ||||
| [dependencies] | ||||
| # wry 0.47 may crash on android, see https://github.com/EasyTier/EasyTier/issues/527 | ||||
| tauri = { version = "=2.0.6", features = [ | ||||
| @@ -46,6 +53,7 @@ tauri-plugin-positioner = { version = "2.0", features = ["tray-icon"] } | ||||
| tauri-plugin-vpnservice = { path = "../../tauri-plugin-vpnservice" } | ||||
| tauri-plugin-os = "2.0" | ||||
| tauri-plugin-autostart = "2.0" | ||||
| uuid = "1.17.0" | ||||
|  | ||||
|  | ||||
| [features] | ||||
| @@ -53,4 +61,4 @@ tauri-plugin-autostart = "2.0" | ||||
| custom-protocol = ["tauri/custom-protocol"] | ||||
|  | ||||
| [target.'cfg(not(any(target_os = "android", target_os = "ios")))'.dependencies] | ||||
| tauri-plugin-single-instance = "2.0.0-rc.0" | ||||
| tauri-plugin-single-instance = "2.2.3" | ||||
|   | ||||
| @@ -1,3 +1,12 @@ | ||||
| fn main() { | ||||
|     // enable thunk-rs when target os is windows and arch is x86_64 or i686 | ||||
|     #[cfg(target_os = "windows")] | ||||
|     if !std::env::var("TARGET") | ||||
|         .unwrap_or_default() | ||||
|         .contains("aarch64") | ||||
|     { | ||||
|         thunk::thunk(); | ||||
|     } | ||||
|  | ||||
|     tauri_build::build(); | ||||
| } | ||||
|   | ||||
| @@ -3,10 +3,10 @@ | ||||
|  | ||||
| use std::collections::BTreeMap; | ||||
|  | ||||
| use dashmap::DashMap; | ||||
| use easytier::{ | ||||
|     common::config::{ConfigLoader, FileLoggerConfig, TomlConfigLoader}, | ||||
|     launcher::{NetworkConfig, NetworkInstance, NetworkInstanceRunningInfo}, | ||||
|     common::config::{ConfigLoader, FileLoggerConfig, LoggingConfigBuilder, TomlConfigLoader}, | ||||
|     instance_manager::NetworkInstanceManager, | ||||
|     launcher::{ConfigSource, NetworkConfig, NetworkInstanceRunningInfo}, | ||||
|     utils::{self, NewFilterSender}, | ||||
| }; | ||||
|  | ||||
| @@ -17,8 +17,8 @@ pub const AUTOSTART_ARG: &str = "--autostart"; | ||||
| #[cfg(not(target_os = "android"))] | ||||
| use tauri::tray::{MouseButton, MouseButtonState, TrayIconBuilder, TrayIconEvent}; | ||||
|  | ||||
| static INSTANCE_MAP: once_cell::sync::Lazy<DashMap<String, NetworkInstance>> = | ||||
|     once_cell::sync::Lazy::new(DashMap::new); | ||||
| static INSTANCE_MANAGER: once_cell::sync::Lazy<NetworkInstanceManager> = | ||||
|     once_cell::sync::Lazy::new(NetworkInstanceManager::new); | ||||
|  | ||||
| static mut LOGGER_LEVEL_SENDER: once_cell::sync::Lazy<Option<NewFilterSender>> = | ||||
|     once_cell::sync::Lazy::new(Default::default); | ||||
| @@ -42,43 +42,48 @@ fn parse_network_config(cfg: NetworkConfig) -> Result<String, String> { | ||||
|     Ok(toml.dump()) | ||||
| } | ||||
|  | ||||
| #[tauri::command] | ||||
| fn generate_network_config(toml_config: String) -> Result<NetworkConfig, String> { | ||||
|     let config = TomlConfigLoader::new_from_str(&toml_config).map_err(|e| e.to_string())?; | ||||
|     let cfg = NetworkConfig::new_from_config(&config).map_err(|e| e.to_string())?; | ||||
|     Ok(cfg) | ||||
| } | ||||
|  | ||||
| #[tauri::command] | ||||
| fn run_network_instance(cfg: NetworkConfig) -> Result<(), String> { | ||||
|     if INSTANCE_MAP.contains_key(cfg.instance_id()) { | ||||
|         return Err("instance already exists".to_string()); | ||||
|     } | ||||
|     let instance_id = cfg.instance_id().to_string(); | ||||
|  | ||||
|     let cfg = cfg.gen_config().map_err(|e| e.to_string())?; | ||||
|     let mut instance = NetworkInstance::new(cfg); | ||||
|     instance.start().map_err(|e| e.to_string())?; | ||||
|  | ||||
|     INSTANCE_MANAGER | ||||
|         .run_network_instance(cfg, ConfigSource::GUI) | ||||
|         .map_err(|e| e.to_string())?; | ||||
|     println!("instance {} started", instance_id); | ||||
|     INSTANCE_MAP.insert(instance_id, instance); | ||||
|     Ok(()) | ||||
| } | ||||
|  | ||||
| #[tauri::command] | ||||
| fn retain_network_instance(instance_ids: Vec<String>) -> Result<(), String> { | ||||
|     let _ = INSTANCE_MAP.retain(|k, _| instance_ids.contains(k)); | ||||
|     println!( | ||||
|         "instance {:?} retained", | ||||
|         INSTANCE_MAP | ||||
|             .iter() | ||||
|             .map(|item| item.key().clone()) | ||||
|             .collect::<Vec<_>>() | ||||
|     ); | ||||
|     let instance_ids = instance_ids | ||||
|         .into_iter() | ||||
|         .filter_map(|id| uuid::Uuid::parse_str(&id).ok()) | ||||
|         .collect(); | ||||
|     let retained = INSTANCE_MANAGER | ||||
|         .retain_network_instance(instance_ids) | ||||
|         .map_err(|e| e.to_string())?; | ||||
|     println!("instance {:?} retained", retained); | ||||
|     Ok(()) | ||||
| } | ||||
|  | ||||
| #[tauri::command] | ||||
| fn collect_network_infos() -> Result<BTreeMap<String, NetworkInstanceRunningInfo>, String> { | ||||
|     let infos = INSTANCE_MANAGER | ||||
|         .collect_network_infos() | ||||
|         .map_err(|e| e.to_string())?; | ||||
|  | ||||
|     let mut ret = BTreeMap::new(); | ||||
|     for instance in INSTANCE_MAP.iter() { | ||||
|         if let Some(info) = instance.get_running_info() { | ||||
|             ret.insert(instance.key().clone(), info); | ||||
|         } | ||||
|     for (uuid, info) in infos { | ||||
|         ret.insert(uuid.to_string(), info); | ||||
|     } | ||||
|  | ||||
|     Ok(ret) | ||||
| } | ||||
|  | ||||
| @@ -97,10 +102,10 @@ fn set_logging_level(level: String) -> Result<(), String> { | ||||
|  | ||||
| #[tauri::command] | ||||
| fn set_tun_fd(instance_id: String, fd: i32) -> Result<(), String> { | ||||
|     let mut instance = INSTANCE_MAP | ||||
|         .get_mut(&instance_id) | ||||
|         .ok_or("instance not found")?; | ||||
|     instance.set_tun_fd(fd); | ||||
|     let uuid = uuid::Uuid::parse_str(&instance_id).map_err(|e| e.to_string())?; | ||||
|     INSTANCE_MANAGER | ||||
|         .set_tun_fd(&uuid, fd) | ||||
|         .map_err(|e| e.to_string())?; | ||||
|     Ok(()) | ||||
| } | ||||
|  | ||||
| @@ -108,7 +113,12 @@ fn set_tun_fd(instance_id: String, fd: i32) -> Result<(), String> { | ||||
| fn toggle_window_visibility<R: tauri::Runtime>(app: &tauri::AppHandle<R>) { | ||||
|     if let Some(window) = app.get_webview_window("main") { | ||||
|         if window.is_visible().unwrap_or_default() { | ||||
|             let _ = window.hide(); | ||||
|             if window.is_minimized().unwrap_or_default() { | ||||
|                 let _ = window.unminimize(); | ||||
|                 let _ = window.set_focus(); | ||||
|             } else { | ||||
|                 let _ = window.hide(); | ||||
|             } | ||||
|         } else { | ||||
|             let _ = window.show(); | ||||
|             let _ = window.set_focus(); | ||||
| @@ -180,13 +190,15 @@ pub fn run() { | ||||
|             let Ok(log_dir) = app.path().app_log_dir() else { | ||||
|                 return Ok(()); | ||||
|             }; | ||||
|             let config = TomlConfigLoader::default(); | ||||
|             config.set_file_logger_config(FileLoggerConfig { | ||||
|                 dir: Some(log_dir.to_string_lossy().to_string()), | ||||
|                 level: None, | ||||
|                 file: None, | ||||
|             }); | ||||
|             let Ok(Some(logger_reinit)) = utils::init_logger(config, true) else { | ||||
|             let config = LoggingConfigBuilder::default() | ||||
|                 .file_logger(FileLoggerConfig { | ||||
|                     dir: Some(log_dir.to_string_lossy().to_string()), | ||||
|                     level: None, | ||||
|                     file: None, | ||||
|                 }) | ||||
|                 .build() | ||||
|                 .map_err(|e| e.to_string())?; | ||||
|             let Ok(Some(logger_reinit)) = utils::init_logger(&config, true) else { | ||||
|                 return Ok(()); | ||||
|             }; | ||||
|             #[allow(static_mut_refs)] | ||||
| @@ -219,6 +231,7 @@ pub fn run() { | ||||
|         }) | ||||
|         .invoke_handler(tauri::generate_handler![ | ||||
|             parse_network_config, | ||||
|             generate_network_config, | ||||
|             run_network_instance, | ||||
|             retain_network_instance, | ||||
|             collect_network_infos, | ||||
|   | ||||
| @@ -17,7 +17,7 @@ | ||||
|     "createUpdaterArtifacts": false | ||||
|   }, | ||||
|   "productName": "easytier-gui", | ||||
|   "version": "2.2.4", | ||||
|   "version": "2.3.2", | ||||
|   "identifier": "com.kkrainbow.easytier", | ||||
|   "plugins": {}, | ||||
|   "app": { | ||||
|   | ||||
| @@ -8,5 +8,6 @@ onBeforeMount(async () => { | ||||
| </script> | ||||
|  | ||||
| <template> | ||||
|   <Toast position="bottom-right" /> | ||||
|   <RouterView /> | ||||
| </template> | ||||
|   | ||||
							
								
								
									
										2
									
								
								easytier-gui/src/auto-imports.d.ts
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								easytier-gui/src/auto-imports.d.ts
									
									
									
									
										vendored
									
									
								
							| @@ -23,6 +23,7 @@ declare global { | ||||
|   const effectScope: typeof import('vue')['effectScope'] | ||||
|   const event2human: typeof import('./composables/utils')['event2human'] | ||||
|   const generateMenuItem: typeof import('./composables/tray')['generateMenuItem'] | ||||
|   const generateNetworkConfig: typeof import('./composables/network')['generateNetworkConfig'] | ||||
|   const getActivePinia: typeof import('pinia')['getActivePinia'] | ||||
|   const getCurrentInstance: typeof import('vue')['getCurrentInstance'] | ||||
|   const getCurrentScope: typeof import('vue')['getCurrentScope'] | ||||
| @@ -134,6 +135,7 @@ declare module 'vue' { | ||||
|     readonly defineStore: UnwrapRef<typeof import('pinia')['defineStore']> | ||||
|     readonly effectScope: UnwrapRef<typeof import('vue')['effectScope']> | ||||
|     readonly generateMenuItem: UnwrapRef<typeof import('./composables/tray')['generateMenuItem']> | ||||
|     readonly generateNetworkConfig: UnwrapRef<typeof import('./composables/network')['generateNetworkConfig']> | ||||
|     readonly getActivePinia: UnwrapRef<typeof import('pinia')['getActivePinia']> | ||||
|     readonly getCurrentInstance: UnwrapRef<typeof import('vue')['getCurrentInstance']> | ||||
|     readonly getCurrentScope: UnwrapRef<typeof import('vue')['getCurrentScope']> | ||||
|   | ||||
| @@ -132,6 +132,14 @@ async function onNetworkInstanceChange() { | ||||
|     return | ||||
|   } | ||||
|  | ||||
|   // if use no tun mode, stop the vpn service | ||||
|   const no_tun = networkStore.isNoTunEnabled(insts[0]) | ||||
|   if (no_tun) { | ||||
|     console.error('no tun mode, stop vpn service') | ||||
|     await doStopVpn() | ||||
|     return | ||||
|   } | ||||
|  | ||||
|   let network_length = curNetworkInfo?.my_node_info?.virtual_ipv4.network_length | ||||
|   if (!network_length) { | ||||
|     network_length = 24 | ||||
|   | ||||
| @@ -8,6 +8,10 @@ export async function parseNetworkConfig(cfg: NetworkConfig) { | ||||
|   return invoke<string>('parse_network_config', { cfg }) | ||||
| } | ||||
|  | ||||
| export async function generateNetworkConfig(tomlConfig: string) { | ||||
|   return invoke<NetworkConfig>('generate_network_config', { tomlConfig }) | ||||
| } | ||||
|  | ||||
| export async function runNetworkInstance(cfg: NetworkConfig) { | ||||
|   return invoke('run_network_instance', { cfg }) | ||||
| } | ||||
|   | ||||
| @@ -8,7 +8,7 @@ import { exit } from '@tauri-apps/plugin-process' | ||||
| import { open } from '@tauri-apps/plugin-shell' | ||||
| import TieredMenu from 'primevue/tieredmenu' | ||||
| import { useToast } from 'primevue/usetoast' | ||||
| import { NetworkTypes, Config, Status, Utils, I18nUtils } from 'easytier-frontend-lib' | ||||
| import { NetworkTypes, Config, Status, Utils, I18nUtils, ConfigEditDialog } from 'easytier-frontend-lib' | ||||
|  | ||||
| import { isAutostart, setLoggingLevel } from '~/composables/network' | ||||
| import { useTray } from '~/composables/tray' | ||||
| @@ -23,7 +23,7 @@ useTray(true) | ||||
|  | ||||
| const items = ref([ | ||||
|   { | ||||
|     label: () => t('show_config'), | ||||
|     label: () => activeStep.value == "2" ? t('show_config') : t('edit_config'), | ||||
|     icon: 'pi pi-file-edit', | ||||
|     command: async () => { | ||||
|       try { | ||||
| @@ -262,6 +262,13 @@ onMounted(async () => { | ||||
| function isRunning(id: string) { | ||||
|   return networkStore.networkInstanceIds.includes(id) | ||||
| } | ||||
|  | ||||
| async function saveTomlConfig(tomlConfig: string) { | ||||
|   const config = await generateNetworkConfig(tomlConfig) | ||||
|   networkStore.replaceCurNetwork(config); | ||||
|   toast.add({ severity: 'success', detail: t('config_saved'), life: 3000 }) | ||||
|   visible.value = false | ||||
| } | ||||
| </script> | ||||
|  | ||||
| <script lang="ts"> | ||||
| @@ -269,17 +276,8 @@ function isRunning(id: string) { | ||||
|  | ||||
| <template> | ||||
|   <div id="root" class="flex flex-col"> | ||||
|     <Dialog v-model:visible="visible" modal header="Config File" :style="{ width: '70%' }"> | ||||
|       <Panel> | ||||
|         <ScrollPanel style="width: 100%; height: 300px"> | ||||
|           <pre>{{ tomlConfig }}</pre> | ||||
|         </ScrollPanel> | ||||
|       </Panel> | ||||
|       <Divider /> | ||||
|       <div class="flex gap-2 justify-end"> | ||||
|         <Button type="button" :label="t('close')" @click="visible = false" /> | ||||
|       </div> | ||||
|     </Dialog> | ||||
|     <ConfigEditDialog v-model:visible="visible" :cur-network="curNetworkConfig" :readonly="activeStep !== '1'" | ||||
|       :save-config="saveTomlConfig" :generate-config="parseNetworkConfig" /> | ||||
|  | ||||
|     <Dialog v-model:visible="aboutVisible" modal :header="t('about.title')" :style="{ width: '70%' }"> | ||||
|       <About /> | ||||
|   | ||||
| @@ -48,6 +48,12 @@ export const useNetworkStore = defineStore('networkStore', { | ||||
|       this.curNetwork = this.networkList[nextCurNetworkIdx] | ||||
|     }, | ||||
|  | ||||
|     replaceCurNetwork(cfg: NetworkTypes.NetworkConfig) { | ||||
|       const curNetworkIdx = this.networkList.indexOf(this.curNetwork) | ||||
|       this.networkList[curNetworkIdx] = cfg | ||||
|       this.curNetwork = cfg | ||||
|     }, | ||||
|  | ||||
|     removeNetworkInstance(instanceId: string) { | ||||
|       delete this.instances[instanceId] | ||||
|     }, | ||||
| @@ -128,6 +134,13 @@ export const useNetworkStore = defineStore('networkStore', { | ||||
|       } | ||||
|       this.saveAutoStartInstIdsToLocalStorage() | ||||
|     }, | ||||
|  | ||||
|     isNoTunEnabled(instanceId: string): boolean { | ||||
|       const cfg = this.networkList.find((cfg) => cfg.instance_id === instanceId) | ||||
|       if (!cfg) | ||||
|         return false | ||||
|       return cfg.no_tun ?? false | ||||
|     }, | ||||
|   }, | ||||
| }) | ||||
|  | ||||
|   | ||||
| @@ -45,3 +45,11 @@ | ||||
|   border-radius: 4px; | ||||
|   background-color: #0000005d; | ||||
| } | ||||
|  | ||||
| .p-password { | ||||
|   width: 100%; | ||||
| } | ||||
|  | ||||
| .p-password>input { | ||||
|   width: 100%; | ||||
| } | ||||
| @@ -1,6 +1,6 @@ | ||||
| [package] | ||||
| name = "easytier-web" | ||||
| version = "2.2.4" | ||||
| version = "2.3.2" | ||||
| edition = "2021" | ||||
| description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server." | ||||
|  | ||||
| @@ -18,6 +18,7 @@ axum = { version = "0.7", features = ["macros"] } | ||||
| axum-login = { version = "0.16" } | ||||
| password-auth = { version = "1.0.0" } | ||||
| axum-messages = "0.7.0" | ||||
| axum-embed = { version = "0.1.0", optional = true } | ||||
| tower-sessions-sqlx-store = { version = "0.14.1", features = ["sqlite"] } | ||||
| tower-sessions = { version = "0.13.0", default-features = false, features = [ | ||||
|     "signed", | ||||
| @@ -59,3 +60,14 @@ uuid = { version = "1.5.0", features = [ | ||||
| ] } | ||||
|  | ||||
| chrono = { version = "0.4.37", features = ["serde"] } | ||||
|  | ||||
| [features] | ||||
| default = [] | ||||
| embed = ["dep:axum-embed"] | ||||
|  | ||||
| # enable thunk-rs when compiling for x86_64 or i686 windows | ||||
| [target.x86_64-pc-windows-msvc.build-dependencies] | ||||
| thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] } | ||||
|  | ||||
| [target.i686-pc-windows-msvc.build-dependencies] | ||||
| thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] } | ||||
|   | ||||
							
								
								
									
										7
									
								
								easytier-web/build.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								easytier-web/build.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,7 @@ | ||||
| fn main() { | ||||
|     // enable thunk-rs when target os is windows and arch is x86_64 or i686 | ||||
|     #[cfg(target_os = "windows")] | ||||
|     if !std::env::var("TARGET").unwrap_or_default().contains("aarch64"){ | ||||
|         thunk::thunk(); | ||||
|     } | ||||
| } | ||||
| @@ -18,14 +18,14 @@ | ||||
|     "preview": "vite preview" | ||||
|   }, | ||||
|   "dependencies": { | ||||
|     "@primevue/themes": "^4.2.1", | ||||
|     "@primevue/themes": "4.3.3", | ||||
|     "@vueuse/core": "^11.1.0", | ||||
|     "aura": "link:@primevue\\themes\\aura", | ||||
|     "axios": "^1.7.7", | ||||
|     "floating-vue": "^5.2", | ||||
|     "ip-num": "1.5.1", | ||||
|     "primeicons": "^7.0.0", | ||||
|     "primevue": "^4.2.1", | ||||
|     "primevue": "4.3.3", | ||||
|     "tailwindcss-primeui": "^0.3.4", | ||||
|     "ts-md5": "^1.3.1", | ||||
|     "uuid": "^11.0.2", | ||||
|   | ||||
| @@ -1,7 +1,7 @@ | ||||
| <script setup lang="ts"> | ||||
| import InputGroup from 'primevue/inputgroup' | ||||
| import InputGroupAddon from 'primevue/inputgroupaddon' | ||||
| import { SelectButton, Checkbox, InputText, InputNumber, AutoComplete, Panel, Divider, ToggleButton, Button } from 'primevue' | ||||
| import { SelectButton, Checkbox, InputText, InputNumber, AutoComplete, Panel, Divider, ToggleButton, Button, Password } from 'primevue' | ||||
| import { DEFAULT_NETWORK_CONFIG, NetworkConfig, NetworkingMethod } from '../types/network' | ||||
| import { defineProps, defineEmits, ref, } from 'vue' | ||||
| import { useI18n } from 'vue-i18n' | ||||
| @@ -147,6 +147,8 @@ const bool_flags: BoolFlag[] = [ | ||||
|   { field: 'use_smoltcp', help: 'use_smoltcp_help' }, | ||||
|   { field: 'enable_kcp_proxy', help: 'enable_kcp_proxy_help' }, | ||||
|   { field: 'disable_kcp_input', help: 'disable_kcp_input_help' }, | ||||
|   { field: 'enable_quic_proxy', help: 'enable_quic_proxy_help' }, | ||||
|   { field: 'disable_quic_input', help: 'disable_quic_input_help' }, | ||||
|   { field: 'disable_p2p', help: 'disable_p2p_help' }, | ||||
|   { field: 'bind_device', help: 'bind_device_help' }, | ||||
|   { field: 'no_tun', help: 'no_tun_help' }, | ||||
| @@ -155,6 +157,9 @@ const bool_flags: BoolFlag[] = [ | ||||
|   { field: 'multi_thread', help: 'multi_thread_help' }, | ||||
|   { field: 'proxy_forward_by_system', help: 'proxy_forward_by_system_help' }, | ||||
|   { field: 'disable_encryption', help: 'disable_encryption_help' }, | ||||
|   { field: 'disable_udp_hole_punching', help: 'disable_udp_hole_punching_help' }, | ||||
|   { field: 'enable_magic_dns', help: 'enable_magic_dns_help' }, | ||||
|   { field: 'enable_private_mode', help: 'enable_private_mode_help' }, | ||||
| ] | ||||
|  | ||||
| </script> | ||||
| @@ -196,8 +201,8 @@ const bool_flags: BoolFlag[] = [ | ||||
|                 </div> | ||||
|                 <div class="flex flex-col gap-2 basis-5/12 grow"> | ||||
|                   <label for="network_secret">{{ t('network_secret') }}</label> | ||||
|                   <InputText id="network_secret" v-model="curNetwork.network_secret" | ||||
|                     aria-describedby="network_secret-help" /> | ||||
|                   <Password id="network_secret" v-model="curNetwork.network_secret" | ||||
|                     aria-describedby="network_secret-help" toggleMask :feedback="false" /> | ||||
|                 </div> | ||||
|               </div> | ||||
|  | ||||
| @@ -268,7 +273,7 @@ const bool_flags: BoolFlag[] = [ | ||||
|                       <div class="flex flex-col gap-2 basis-8/12 grow"> | ||||
|                         <InputGroup> | ||||
|                           <InputText v-model="curNetwork.vpn_portal_client_network_addr" | ||||
|                                      :placeholder="t('vpn_portal_client_network')" /> | ||||
|                             :placeholder="t('vpn_portal_client_network')" /> | ||||
|                           <InputGroupAddon> | ||||
|                             <span>/{{ curNetwork.vpn_portal_client_network_len }}</span> | ||||
|                           </InputGroupAddon> | ||||
| @@ -276,7 +281,7 @@ const bool_flags: BoolFlag[] = [ | ||||
|                       </div> | ||||
|                       <div class="flex flex-col gap-2 basis-3/12 grow"> | ||||
|                         <InputNumber v-model="curNetwork.vpn_portal_listen_port" :allow-empty="false" :format="false" | ||||
|                                      :min="0" :max="65535" fluid /> | ||||
|                           :min="0" :max="65535" fluid /> | ||||
|                       </div> | ||||
|                     </div> | ||||
|                   </div> | ||||
| @@ -301,6 +306,15 @@ const bool_flags: BoolFlag[] = [ | ||||
|                 </div> | ||||
|               </div> | ||||
|  | ||||
|               <div class="flex flex-row gap-x-9 flex-wrap w-full"> | ||||
|                 <div class="flex flex-col gap-2 grow p-fluid"> | ||||
|                   <label for="">{{ t('rpc_portal_whitelists') }}</label> | ||||
|                   <AutoComplete id="rpc_portal_whitelists" v-model="curNetwork.rpc_portal_whitelists" | ||||
|                     :placeholder="t('chips_placeholder', ['127.0.0.0/8'])" class="w-full" multiple fluid | ||||
|                     :suggestions="inetSuggestions" @complete="searchInetSuggestions" /> | ||||
|                 </div> | ||||
|               </div> | ||||
|  | ||||
|               <div class="flex flex-row gap-x-9 flex-wrap"> | ||||
|                 <div class="flex flex-col gap-2 basis-5/12 grow"> | ||||
|                   <label for="dev_name">{{ t('dev_name') }}</label> | ||||
| @@ -309,20 +323,31 @@ const bool_flags: BoolFlag[] = [ | ||||
|                 </div> | ||||
|               </div> | ||||
|  | ||||
|               <div class="flex flex-row gap-x-9 flex-wrap"> | ||||
|                 <div class="flex flex-col gap-2 basis-5/12 grow"> | ||||
|                   <div class="flex"> | ||||
|                     <label for="mtu">{{ t('mtu') }}</label> | ||||
|                     <span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('mtu_help')"></span> | ||||
|                   </div> | ||||
|                   <InputNumber id="mtu" v-model="curNetwork.mtu" aria-describedby="mtu-help" :format="false" | ||||
|                     :placeholder="t('mtu_placeholder')" :min="400" :max="1380" fluid /> | ||||
|                 </div> | ||||
|               </div> | ||||
|  | ||||
|               <div class="flex flex-row gap-x-9 flex-wrap"> | ||||
|                 <div class="flex flex-col gap-2 basis-5/12 grow"> | ||||
|                   <div class="flex"> | ||||
|                     <label for="relay_network_whitelist">{{ t('relay_network_whitelist') }}</label> | ||||
|                     <span class="pi pi-question-circle ml-2 self-center" | ||||
|                           v-tooltip="t('relay_network_whitelist_help')"></span> | ||||
|                       v-tooltip="t('relay_network_whitelist_help')"></span> | ||||
|                   </div> | ||||
|                   <ToggleButton v-model="curNetwork.enable_relay_network_whitelist" on-icon="pi pi-check" off-icon="pi pi-times" | ||||
|                                 :on-label="t('off_text')" :off-label="t('on_text')" class="w-48" /> | ||||
|                   <ToggleButton v-model="curNetwork.enable_relay_network_whitelist" on-icon="pi pi-check" | ||||
|                     off-icon="pi pi-times" :on-label="t('off_text')" :off-label="t('on_text')" class="w-48" /> | ||||
|                   <div v-if="curNetwork.enable_relay_network_whitelist" class="items-center flex flex-row gap-x-4"> | ||||
|                     <div class="min-w-64 w-full"> | ||||
|                       <AutoComplete id="relay_network_whitelist" v-model="curNetwork.relay_network_whitelist" | ||||
|                                     :placeholder="t('relay_network_whitelist')" class="w-full" multiple fluid | ||||
|                                     :suggestions="whitelistSuggestions" @complete="searchWhitelistSuggestions" /> | ||||
|                         :placeholder="t('relay_network_whitelist')" class="w-full" multiple fluid | ||||
|                         :suggestions="whitelistSuggestions" @complete="searchWhitelistSuggestions" /> | ||||
|                     </div> | ||||
|                   </div> | ||||
|                 </div> | ||||
| @@ -335,12 +360,12 @@ const bool_flags: BoolFlag[] = [ | ||||
|                     <span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('manual_routes_help')"></span> | ||||
|                   </div> | ||||
|                   <ToggleButton v-model="curNetwork.enable_manual_routes" on-icon="pi pi-check" off-icon="pi pi-times" | ||||
|                                 :on-label="t('off_text')" :off-label="t('on_text')" class="w-48" /> | ||||
|                     :on-label="t('off_text')" :off-label="t('on_text')" class="w-48" /> | ||||
|                   <div v-if="curNetwork.enable_manual_routes" class="items-center flex flex-row gap-x-4"> | ||||
|                     <div class="min-w-64 w-full"> | ||||
|                       <AutoComplete id="routes" v-model="curNetwork.routes" | ||||
|                                     :placeholder="t('chips_placeholder', ['192.168.0.0/16'])" class="w-full" multiple fluid | ||||
|                                     :suggestions="inetSuggestions" @complete="searchInetSuggestions" /> | ||||
|                         :placeholder="t('chips_placeholder', ['192.168.0.0/16'])" class="w-full" multiple fluid | ||||
|                         :suggestions="inetSuggestions" @complete="searchInetSuggestions" /> | ||||
|                     </div> | ||||
|                   </div> | ||||
|                 </div> | ||||
| @@ -353,11 +378,11 @@ const bool_flags: BoolFlag[] = [ | ||||
|                     <span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('socks5_help')"></span> | ||||
|                   </div> | ||||
|                   <ToggleButton v-model="curNetwork.enable_socks5" on-icon="pi pi-check" off-icon="pi pi-times" | ||||
|                                 :on-label="t('off_text')" :off-label="t('on_text')" class="w-48" /> | ||||
|                     :on-label="t('off_text')" :off-label="t('on_text')" class="w-48" /> | ||||
|                   <div v-if="curNetwork.enable_socks5" class="items-center flex flex-row gap-x-4"> | ||||
|                     <div class="min-w-64 w-full"> | ||||
|                       <InputNumber id="socks5_port" v-model="curNetwork.socks5_port" aria-describedby="rpc_port-help" | ||||
|                                    :format="false" :allow-empty="false" :min="0" :max="65535" class="w-full"/> | ||||
|                         :format="false" :allow-empty="false" :min="0" :max="65535" class="w-full" /> | ||||
|                     </div> | ||||
|                   </div> | ||||
|                 </div> | ||||
| @@ -370,8 +395,20 @@ const bool_flags: BoolFlag[] = [ | ||||
|                     <span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('exit_nodes_help')"></span> | ||||
|                   </div> | ||||
|                   <AutoComplete id="exit_nodes" v-model="curNetwork.exit_nodes" | ||||
|                                 :placeholder="t('chips_placeholder', ['192.168.8.8'])" class="w-full" multiple fluid | ||||
|                                 :suggestions="exitNodesSuggestions" @complete="searchExitNodesSuggestions" /> | ||||
|                     :placeholder="t('chips_placeholder', ['192.168.8.8'])" class="w-full" multiple fluid | ||||
|                     :suggestions="exitNodesSuggestions" @complete="searchExitNodesSuggestions" /> | ||||
|                 </div> | ||||
|               </div> | ||||
|  | ||||
|               <div class="flex flex-row gap-x-9 flex-wrap w-full"> | ||||
|                 <div class="flex flex-col gap-2 grow p-fluid"> | ||||
|                   <div class="flex"> | ||||
|                     <label for="mapped_listeners">{{ t('mapped_listeners') }}</label> | ||||
|                     <span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('mapped_listeners_help')"></span> | ||||
|                   </div> | ||||
|                   <AutoComplete id="mapped_listeners" v-model="curNetwork.mapped_listeners" | ||||
|                     :placeholder="t('chips_placeholder', ['tcp://123.123.123.123:11223'])" class="w-full" multiple fluid | ||||
|                     :suggestions="peerSuggestions" @complete="searchPeerSuggestions" /> | ||||
|                 </div> | ||||
|               </div> | ||||
|  | ||||
|   | ||||
							
								
								
									
										103
									
								
								easytier-web/frontend-lib/src/components/ConfigEditDialog.vue
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										103
									
								
								easytier-web/frontend-lib/src/components/ConfigEditDialog.vue
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,103 @@ | ||||
| <script setup lang="ts"> | ||||
| import { onMounted, ref, watch } from 'vue'; | ||||
| import { NetworkConfig } from '../types/network'; | ||||
| import { Divider, Button, Dialog, Textarea } from 'primevue' | ||||
| import { useI18n } from 'vue-i18n' | ||||
|  | ||||
| const { t } = useI18n() | ||||
|  | ||||
| const props = defineProps({ | ||||
|     readonly: { | ||||
|         type: Boolean, | ||||
|         default: false, | ||||
|     }, | ||||
|     generateConfig: { | ||||
|         type: Object as () => (config: NetworkConfig) => Promise<string>, | ||||
|         required: true, | ||||
|     }, | ||||
|     saveConfig: { | ||||
|         type: Object as () => (config: string) => Promise<void>, | ||||
|         required: true, | ||||
|     }, | ||||
| }) | ||||
|  | ||||
| const curNetwork = defineModel('curNetwork', { | ||||
|     type: Object as () => NetworkConfig | undefined, | ||||
|     required: true, | ||||
| }) | ||||
|  | ||||
| const visible = defineModel('visible', { | ||||
|     type: Boolean, | ||||
|     default: false, | ||||
| }) | ||||
| watch([visible, curNetwork], async ([newVisible, newCurNetwork]) => { | ||||
|     if (!newVisible) { | ||||
|         tomlConfig.value = ''; | ||||
|         return; | ||||
|     } | ||||
|     if (!newCurNetwork) { | ||||
|         tomlConfig.value = ''; | ||||
|         return; | ||||
|     } | ||||
|     const config = newCurNetwork; | ||||
|     try { | ||||
|         errorMessage.value = ''; | ||||
|         tomlConfig.value = await props.generateConfig(config); | ||||
|     } catch (e) { | ||||
|         errorMessage.value = 'Failed to generate config: ' + (e instanceof Error ? e.message : String(e)); | ||||
|         tomlConfig.value = ''; | ||||
|     } | ||||
| }) | ||||
| onMounted(async () => { | ||||
|     if (!visible.value) { | ||||
|         return; | ||||
|     } | ||||
|     if (!curNetwork.value) { | ||||
|         tomlConfig.value = ''; | ||||
|         return; | ||||
|     } | ||||
|     const config = curNetwork.value; | ||||
|     try { | ||||
|         tomlConfig.value = await props.generateConfig(config); | ||||
|         errorMessage.value = ''; | ||||
|     } catch (e) { | ||||
|         errorMessage.value = 'Failed to generate config: ' + (e instanceof Error ? e.message : String(e)); | ||||
|         tomlConfig.value = ''; | ||||
|     } | ||||
| }); | ||||
|  | ||||
| const handleConfigSave = async () => { | ||||
|     if (props.readonly) return; | ||||
|     try { | ||||
|         await props.saveConfig(tomlConfig.value); | ||||
|         visible.value = false; | ||||
|     } catch (e) { | ||||
|         errorMessage.value = 'Failed to save config: ' + (e instanceof Error ? e.message : String(e)); | ||||
|     } | ||||
| }; | ||||
|  | ||||
| const tomlConfig = ref<string>('') | ||||
| const tomlConfigRows = ref<number>(1); | ||||
| const errorMessage = ref<string>(''); | ||||
|  | ||||
| watch(tomlConfig, (newValue) => { | ||||
|     tomlConfigRows.value = newValue.split('\n').length; | ||||
|     errorMessage.value = ''; | ||||
| }); | ||||
|  | ||||
| </script> | ||||
| <template> | ||||
|     <Dialog v-model:visible="visible" modal :header="t('config_file')" :style="{ width: '70%' }"> | ||||
|         <pre v-if="errorMessage" | ||||
|             class="mb-2 p-2 rounded text-sm overflow-auto bg-red-100 text-red-700 max-h-40">{{ errorMessage }}</pre> | ||||
|         <div class="flex w-full" style="max-height: 60vh; overflow-y: auto;"> | ||||
|             <Textarea v-model="tomlConfig" class="w-full h-full font-mono flex flex-col resize-none" :rows="tomlConfigRows" | ||||
|                 spellcheck="false" :readonly="props.readonly"></Textarea> | ||||
|         </div> | ||||
|         <Divider /> | ||||
|         <div class="flex gap-2 justify-end"> | ||||
|             <Button v-if="!props.readonly" type="button" :label="t('save')" @click="handleConfigSave" /> | ||||
|             <Button type="button" :label="t('close')" @click="visible = false" /> | ||||
|         </div> | ||||
|     </Dialog> | ||||
| </template> | ||||
| @@ -5,7 +5,7 @@ import { NetworkInstance, type NodeInfo, type PeerRoutePair } from '../types/net | ||||
| import { useI18n } from 'vue-i18n'; | ||||
| import { computed, onMounted, onUnmounted, ref } from 'vue'; | ||||
| import { ipv4InetToString, ipv4ToString, ipv6ToString } from '../modules/utils'; | ||||
| import { DataTable, Column, Tag, Chip, Button, Dialog, ScrollPanel, Timeline, Divider, Panel, } from 'primevue'; | ||||
| import { DataTable, Column, Tag, Chip, Button, Dialog, ScrollPanel, Timeline, Divider, Card, } from 'primevue'; | ||||
|  | ||||
| const props = defineProps<{ | ||||
|   curNetworkInst: NetworkInstance | null, | ||||
| @@ -106,6 +106,10 @@ function ipFormat(info: PeerRoutePair) { | ||||
|   return ip ? `${IPv4.fromNumber(ip.address.addr)}/${ip.network_length}` : '' | ||||
| } | ||||
|  | ||||
| function tunnelProto(info: PeerRoutePair) { | ||||
|   return [...new Set(info.peer?.conns.map(c => c.tunnel?.tunnel_type))].join(',') | ||||
| } | ||||
|  | ||||
| const myNodeInfo = computed(() => { | ||||
|   if (!props.curNetworkInst) | ||||
|     return {} as NodeInfo | ||||
| @@ -319,101 +323,108 @@ function showEventLogs() { | ||||
|       </Timeline> | ||||
|     </Dialog> | ||||
|  | ||||
|     <Panel v-if="curNetworkInst?.error_msg"> | ||||
|       <template #header> | ||||
|     <Card v-if="curNetworkInst?.error_msg"> | ||||
|       <template #title> | ||||
|         Run Network Error | ||||
|       </template> | ||||
|       <div class="flex flex-col gap-y-5"> | ||||
|         <div class="text-red-500"> | ||||
|           {{ curNetworkInst.error_msg }} | ||||
|       <template #content> | ||||
|         <div class="flex flex-col gap-y-5"> | ||||
|           <div class="text-red-500"> | ||||
|             {{ curNetworkInst.error_msg }} | ||||
|           </div> | ||||
|         </div> | ||||
|       </div> | ||||
|     </Panel> | ||||
|       </template> | ||||
|     </Card> | ||||
|  | ||||
|     <template v-else> | ||||
|       <Panel> | ||||
|         <template #header> | ||||
|       <Card> | ||||
|         <template #title> | ||||
|           {{ t('my_node_info') }} | ||||
|         </template> | ||||
|         <div class="flex w-full flex-col gap-y-5"> | ||||
|           <div class="m-0 flex flex-row justify-center gap-x-5"> | ||||
|             <div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid green"> | ||||
|               <div class="font-bold"> | ||||
|                 {{ t('peer_count') }} | ||||
|         <template #content> | ||||
|           <div class="flex w-full flex-col gap-y-5"> | ||||
|             <div class="m-0 flex flex-row justify-center gap-x-5"> | ||||
|               <div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid green"> | ||||
|                 <div class="font-bold"> | ||||
|                   {{ t('peer_count') }} | ||||
|                 </div> | ||||
|                 <div class="text-5xl mt-1"> | ||||
|                   {{ peerCount }} | ||||
|                 </div> | ||||
|               </div> | ||||
|               <div class="text-5xl mt-1"> | ||||
|                 {{ peerCount }} | ||||
|  | ||||
|               <div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid purple"> | ||||
|                 <div class="font-bold"> | ||||
|                   {{ t('upload') }} | ||||
|                 </div> | ||||
|                 <div class="text-xl mt-2"> | ||||
|                   {{ txRate }}/s | ||||
|                 </div> | ||||
|               </div> | ||||
|  | ||||
|               <div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid fuchsia"> | ||||
|                 <div class="font-bold"> | ||||
|                   {{ t('download') }} | ||||
|                 </div> | ||||
|                 <div class="text-xl mt-2"> | ||||
|                   {{ rxRate }}/s | ||||
|                 </div> | ||||
|               </div> | ||||
|             </div> | ||||
|  | ||||
|             <div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid purple"> | ||||
|               <div class="font-bold"> | ||||
|                 {{ t('upload') }} | ||||
|               </div> | ||||
|               <div class="text-xl mt-2"> | ||||
|                 {{ txRate }}/s | ||||
|               </div> | ||||
|             <div class="flex flex-row items-center flex-wrap w-full max-h-40 overflow-scroll"> | ||||
|               <Chip v-for="(chip, i) in myNodeInfoChips" :key="i" :label="chip.label" :icon="chip.icon" | ||||
|                 class="mr-2 mt-2 text-sm" /> | ||||
|             </div> | ||||
|  | ||||
|             <div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid fuchsia"> | ||||
|               <div class="font-bold"> | ||||
|                 {{ t('download') }} | ||||
|               </div> | ||||
|               <div class="text-xl mt-2"> | ||||
|                 {{ rxRate }}/s | ||||
|               </div> | ||||
|             <div v-if="myNodeInfo" class="m-0 flex flex-row justify-center gap-x-5 text-sm"> | ||||
|               <Button severity="info" :label="t('show_vpn_portal_config')" @click="showVpnPortalConfig" /> | ||||
|               <Button severity="info" :label="t('show_event_log')" @click="showEventLogs" /> | ||||
|             </div> | ||||
|           </div> | ||||
|  | ||||
|           <div class="flex flex-row items-center flex-wrap w-full max-h-40 overflow-scroll"> | ||||
|             <Chip v-for="(chip, i) in myNodeInfoChips" :key="i" :label="chip.label" :icon="chip.icon" | ||||
|               class="mr-2 mt-2 text-sm" /> | ||||
|           </div> | ||||
|  | ||||
|           <div v-if="myNodeInfo" class="m-0 flex flex-row justify-center gap-x-5 text-sm"> | ||||
|             <Button severity="info" :label="t('show_vpn_portal_config')" @click="showVpnPortalConfig" /> | ||||
|             <Button severity="info" :label="t('show_event_log')" @click="showEventLogs" /> | ||||
|           </div> | ||||
|         </div> | ||||
|       </Panel> | ||||
|         </template> | ||||
|       </Card> | ||||
|  | ||||
|       <Divider /> | ||||
|  | ||||
|       <Panel> | ||||
|         <template #header> | ||||
|       <Card> | ||||
|         <template #title> | ||||
|           {{ t('peer_info') }} | ||||
|         </template> | ||||
|         <DataTable :value="peerRouteInfos" column-resize-mode="fit" table-class="w-full"> | ||||
|           <Column :field="ipFormat" :header="t('virtual_ipv4')" /> | ||||
|           <Column :header="t('hostname')"> | ||||
|             <template #body="slotProps"> | ||||
|               <div v-if="!slotProps.data.route.cost || !slotProps.data.route.feature_flag.is_public_server" | ||||
|                 v-tooltip="slotProps.data.route.hostname"> | ||||
|                 {{ | ||||
|                   slotProps.data.route.hostname }} | ||||
|               </div> | ||||
|               <div v-else v-tooltip="slotProps.data.route.hostname" class="space-x-1"> | ||||
|                 <Tag v-if="slotProps.data.route.feature_flag.is_public_server" severity="info" value="Info"> | ||||
|                   {{ t('status.server') }} | ||||
|                 </Tag> | ||||
|                 <Tag v-if="slotProps.data.route.feature_flag.avoid_relay_data" severity="warn" value="Warn"> | ||||
|                   {{ t('status.relay') }} | ||||
|                 </Tag> | ||||
|               </div> | ||||
|             </template> | ||||
|           </Column> | ||||
|           <Column :field="routeCost" :header="t('route_cost')" /> | ||||
|           <Column :field="latencyMs" :header="t('latency')" /> | ||||
|           <Column :field="txBytes" :header="t('upload_bytes')" /> | ||||
|           <Column :field="rxBytes" :header="t('download_bytes')" /> | ||||
|           <Column :field="lossRate" :header="t('loss_rate')" /> | ||||
|           <Column :header="t('status.version')"> | ||||
|             <template #body="slotProps"> | ||||
|               <span>{{ version(slotProps.data) }}</span> | ||||
|             </template> | ||||
|           </Column> | ||||
|         </DataTable> | ||||
|       </Panel> | ||||
|         <template #content> | ||||
|           <DataTable :value="peerRouteInfos" column-resize-mode="fit" table-class="w-full"> | ||||
|             <Column :field="ipFormat" :header="t('virtual_ipv4')" /> | ||||
|             <Column :header="t('hostname')"> | ||||
|               <template #body="slotProps"> | ||||
|                 <div v-if="!slotProps.data.route.cost || !slotProps.data.route.feature_flag.is_public_server" | ||||
|                   v-tooltip="slotProps.data.route.hostname"> | ||||
|                   {{ | ||||
|                     slotProps.data.route.hostname }} | ||||
|                 </div> | ||||
|                 <div v-else v-tooltip="slotProps.data.route.hostname" class="space-x-1"> | ||||
|                   <Tag v-if="slotProps.data.route.feature_flag.is_public_server" severity="info" value="Info"> | ||||
|                     {{ t('status.server') }} | ||||
|                   </Tag> | ||||
|                   <Tag v-if="slotProps.data.route.feature_flag.avoid_relay_data" severity="warn" value="Warn"> | ||||
|                     {{ t('status.relay') }} | ||||
|                   </Tag> | ||||
|                 </div> | ||||
|               </template> | ||||
|             </Column> | ||||
|             <Column :field="routeCost" :header="t('route_cost')" /> | ||||
|             <Column :field="tunnelProto" :header="t('tunnel_proto')" /> | ||||
|             <Column :field="latencyMs" :header="t('latency')" /> | ||||
|             <Column :field="txBytes" :header="t('upload_bytes')" /> | ||||
|             <Column :field="rxBytes" :header="t('download_bytes')" /> | ||||
|             <Column :field="lossRate" :header="t('loss_rate')" /> | ||||
|             <Column :header="t('status.version')"> | ||||
|               <template #body="slotProps"> | ||||
|                 <span>{{ version(slotProps.data) }}</span> | ||||
|               </template> | ||||
|             </Column> | ||||
|           </DataTable> | ||||
|         </template> | ||||
|       </Card> | ||||
|     </template> | ||||
|   </div> | ||||
| </template> | ||||
|   | ||||
| @@ -1,2 +1,3 @@ | ||||
| export { default as Config } from './Config.vue'; | ||||
| export { default as Status } from './Status.vue'; | ||||
| export { default as ConfigEditDialog } from './ConfigEditDialog.vue'; | ||||
|   | ||||
| @@ -1,7 +1,7 @@ | ||||
| import './style.css' | ||||
|  | ||||
| import type { App } from 'vue'; | ||||
| import { Config, Status } from "./components"; | ||||
| import { Config, Status, ConfigEditDialog } from "./components"; | ||||
| import Aura from '@primevue/themes/aura' | ||||
| import PrimeVue from 'primevue/config' | ||||
|  | ||||
| @@ -41,10 +41,11 @@ export default { | ||||
|         }); | ||||
|  | ||||
|         app.component('Config', Config); | ||||
|         app.component('ConfigEditDialog', ConfigEditDialog); | ||||
|         app.component('Status', Status); | ||||
|         app.component('HumanEvent', HumanEvent); | ||||
|         app.directive('tooltip', vTooltip as any); | ||||
|     } | ||||
| }; | ||||
|  | ||||
| export { Config, Status, I18nUtils, NetworkTypes, Api, Utils }; | ||||
| export { Config, ConfigEditDialog, Status, I18nUtils, NetworkTypes, Api, Utils }; | ||||
|   | ||||
| @@ -18,6 +18,7 @@ advanced_settings: 高级设置 | ||||
| basic_settings: 基础设置 | ||||
| listener_urls: 监听地址 | ||||
| rpc_port: RPC端口 | ||||
| rpc_portal_whitelists: RPC白名单 | ||||
| config_network: 配置网络 | ||||
| running: 运行中 | ||||
| error_msg: 错误信息 | ||||
| @@ -50,7 +51,11 @@ dev_name_placeholder: 注意:当多个网络同时使用相同的TUN接口名 | ||||
| off_text: 点击关闭 | ||||
| on_text: 点击开启 | ||||
| show_config: 显示配置 | ||||
| edit_config: 编辑配置文件 | ||||
| config_file: 配置文件 | ||||
| close: 关闭 | ||||
| save: 保存 | ||||
| config_saved: 配置已保存 | ||||
|  | ||||
| use_latency_first: 延迟优先模式 | ||||
| my_node_info: 当前节点信息 | ||||
| @@ -64,6 +69,7 @@ event_log: 事件日志 | ||||
| peer_info: 节点信息 | ||||
| hostname: 主机名 | ||||
| route_cost: 路由 | ||||
| tunnel_proto: 协议 | ||||
| latency: 延迟 | ||||
| upload_bytes: 上传 | ||||
| download_bytes: 下载 | ||||
| @@ -83,6 +89,12 @@ enable_kcp_proxy_help: 将 TCP 流量转为 KCP 流量,降低传输延迟, | ||||
| disable_kcp_input: 禁用 KCP 输入 | ||||
| disable_kcp_input_help: 禁用 KCP 入站流量,其他开启 KCP 代理的节点仍然使用 TCP 连接到本节点。 | ||||
|  | ||||
| enable_quic_proxy: 启用 QUIC 代理 | ||||
| enable_quic_proxy_help: 将 TCP 流量转为 QUIC 流量,降低传输延迟,提升传输速度。 | ||||
|  | ||||
| disable_quic_input: 禁用 QUIC 输入 | ||||
| disable_quic_input_help: 禁用 QUIC 入站流量,其他开启 QUIC 代理的节点仍然使用 TCP 连接到本节点。 | ||||
|  | ||||
| disable_p2p: 禁用 P2P | ||||
| disable_p2p_help: 禁用 P2P 模式,所有流量通过手动指定的服务器中转。 | ||||
|  | ||||
| @@ -109,6 +121,17 @@ proxy_forward_by_system_help: 通过系统内核转发子网代理数据包, | ||||
| disable_encryption: 禁用加密 | ||||
| disable_encryption_help: 禁用对等节点通信的加密,默认为false,必须与对等节点相同 | ||||
|  | ||||
| disable_udp_hole_punching: 禁用UDP打洞 | ||||
| disable_udp_hole_punching_help: 禁用UDP打洞功能 | ||||
|  | ||||
| enable_magic_dns: 启用魔法DNS | ||||
| enable_magic_dns_help: | | ||||
|   启用魔法DNS,允许通过EasyTier的DNS服务器访问其他节点的虚拟IPv4地址, 如 node1.et.net。 | ||||
|  | ||||
| enable_private_mode: 启用私有模式 | ||||
| enable_private_mode_help: | | ||||
|   启用私有模式,则不允许使用了与本网络不相同的网络名称和密码的节点通过本节点进行握手或中转。 | ||||
|  | ||||
| relay_network_whitelist: 网络白名单 | ||||
| relay_network_whitelist_help: | | ||||
|   仅转发白名单网络的流量,支持通配符字符串。多个网络名称间可以使用英文空格间隔。 | ||||
| @@ -125,6 +148,16 @@ socks5_help: | | ||||
| exit_nodes: 出口节点列表 | ||||
| exit_nodes_help: 转发所有流量的出口节点,虚拟IPv4地址,优先级由列表顺序决定 | ||||
|  | ||||
| mtu: MTU | ||||
| mtu_help: | | ||||
|   TUN设备的MTU,默认为非加密时为1380,加密时为1360。范围:400-1380 | ||||
| mtu_placeholder: 留空为默认值1380 | ||||
|  | ||||
| mapped_listeners: 监听映射 | ||||
| mapped_listeners_help: | | ||||
|   手动指定监听器的公网地址,其他节点可以使用该地址连接到本节点。 | ||||
|   例如:tcp://123.123.123.123:11223,可以指定多个。 | ||||
|  | ||||
| status: | ||||
|   version: 内核版本 | ||||
|   local: 本机 | ||||
| @@ -169,4 +202,4 @@ event: | ||||
|   VpnPortalClientDisconnected: VPN门户客户端已断开连接 | ||||
|   DhcpIpv4Changed: DHCP IPv4地址更改 | ||||
|   DhcpIpv4Conflicted: DHCP IPv4地址冲突 | ||||
|  | ||||
|   PortForwardAdded: 端口转发添加 | ||||
|   | ||||
| @@ -18,6 +18,7 @@ advanced_settings: Advanced Settings | ||||
| basic_settings: Basic Settings | ||||
| listener_urls: Listener URLs | ||||
| rpc_port: RPC Port | ||||
| rpc_portal_whitelists: RPC Whitelist | ||||
| config_network: Config Network | ||||
| running: Running | ||||
| error_msg: Error Message | ||||
| @@ -51,7 +52,11 @@ dev_name_placeholder: 'Note: When multiple networks use the same TUN interface n | ||||
| off_text: Press to disable | ||||
| on_text: Press to enable | ||||
| show_config: Show Config | ||||
| edit_config: Edit Config File | ||||
| config_file: Config File | ||||
| close: Close | ||||
| save: Save | ||||
| config_saved: Configuration saved | ||||
| my_node_info: My Node Info | ||||
| peer_count: Connected | ||||
| upload: Upload | ||||
| @@ -62,6 +67,7 @@ show_event_log: Show Event Log | ||||
| event_log: Event Log | ||||
| peer_info: Peer Info | ||||
| route_cost: Route Cost | ||||
| tunnel_proto: Protocol | ||||
| hostname: Hostname | ||||
| latency: Latency | ||||
| upload_bytes: Upload | ||||
| @@ -82,6 +88,12 @@ enable_kcp_proxy_help: Convert TCP traffic to KCP traffic to reduce latency and | ||||
| disable_kcp_input: Disable KCP Input | ||||
| disable_kcp_input_help: Disable inbound KCP traffic, while nodes with KCP proxy enabled continue to connect using TCP. | ||||
|  | ||||
| enable_quic_proxy: Enable QUIC Proxy | ||||
| enable_quic_proxy_help: Convert TCP traffic to QUIC traffic to reduce latency and boost transmission speed. | ||||
|  | ||||
| disable_quic_input: Disable QUIC Input | ||||
| disable_quic_input_help: Disable inbound QUIC traffic, while nodes with QUIC proxy enabled continue to connect using TCP. | ||||
|  | ||||
| disable_p2p: Disable P2P | ||||
| disable_p2p_help: Disable P2P mode; route all traffic through a manually specified relay server. | ||||
|  | ||||
| @@ -108,6 +120,17 @@ proxy_forward_by_system_help: Forward packet to proxy networks via system kernel | ||||
| disable_encryption: Disable Encryption | ||||
| disable_encryption_help: Disable encryption for peers communication, default is false, must be same with peers | ||||
|  | ||||
| disable_udp_hole_punching: Disable UDP Hole Punching | ||||
| disable_udp_hole_punching_help: Disable udp hole punching | ||||
|  | ||||
| enable_magic_dns: Enable Magic DNS | ||||
| enable_magic_dns_help: | | ||||
|   Enable magic dns, all nodes in the network can access each other by domain name, e.g.: node1.et.net. | ||||
|  | ||||
| enable_private_mode: Enable Private Mode | ||||
| enable_private_mode_help: | | ||||
|   Enable private mode, nodes with different network names or passwords from this network are not allowed to perform handshake or relay through this node. | ||||
|  | ||||
| relay_network_whitelist: Network Whitelist | ||||
| relay_network_whitelist_help: | | ||||
|   Only forward traffic from the whitelist networks, supporting wildcard strings, multiple network names can be separated by spaces. | ||||
| @@ -125,6 +148,16 @@ socks5_help: | | ||||
| exit_nodes: Exit Nodes | ||||
| exit_nodes_help: Exit nodes to forward all traffic to, a virtual ipv4 address, priority is determined by the order of the list | ||||
|  | ||||
| mtu: MTU | ||||
| mtu_help: | | ||||
|   MTU of the TUN device, default is 1380 for non-encryption, 1360 for encryption. Range:400-1380 | ||||
| mtu_placeholder: Leave blank as default value 1380 | ||||
|  | ||||
| mapped_listeners: Map Listeners | ||||
| mapped_listeners_help: | | ||||
|   Manually specify the public address of the listener, other nodes can use this address to connect to this node. | ||||
|   e.g.: tcp://123.123.123.123:11223, can specify multiple. | ||||
|  | ||||
| status: | ||||
|   version: Version | ||||
|   local: Local | ||||
| @@ -169,3 +202,4 @@ event: | ||||
|   VpnPortalClientDisconnected: VpnPortalClientDisconnected | ||||
|   DhcpIpv4Changed: DhcpIpv4Changed | ||||
|   DhcpIpv4Conflicted: DhcpIpv4Conflicted | ||||
|   PortForwardAdded: PortForwardAdded | ||||
|   | ||||
| @@ -47,6 +47,15 @@ export interface GenerateConfigResponse { | ||||
|     error?: string; | ||||
| } | ||||
|  | ||||
| export interface ParseConfigRequest { | ||||
|     toml_config: string; | ||||
| } | ||||
|  | ||||
| export interface ParseConfigResponse { | ||||
|     config?: NetworkConfig; | ||||
|     error?: string; | ||||
| } | ||||
|  | ||||
| export class ApiClient { | ||||
|     private client: AxiosInstance; | ||||
|     private authFailedCb: Function | undefined; | ||||
| @@ -215,6 +224,18 @@ export class ApiClient { | ||||
|             return { error: 'Unknown error: ' + error }; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     public async parse_config(config: ParseConfigRequest): Promise<ParseConfigResponse> { | ||||
|         try { | ||||
|             const response = await this.client.post<any, ParseConfigResponse>('/parse-config', config); | ||||
|             return response; | ||||
|         } catch (error) { | ||||
|             if (error instanceof AxiosError) { | ||||
|                 return { error: error.response?.data }; | ||||
|             } | ||||
|             return { error: 'Unknown error: ' + error }; | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| export default ApiClient; | ||||
| @@ -39,6 +39,8 @@ export interface NetworkConfig { | ||||
|   use_smoltcp?: boolean | ||||
|   enable_kcp_proxy?: boolean | ||||
|   disable_kcp_input?: boolean | ||||
|   enable_quic_proxy?: boolean | ||||
|   disable_quic_input?: boolean | ||||
|   disable_p2p?: boolean | ||||
|   bind_device?: boolean | ||||
|   no_tun?: boolean | ||||
| @@ -47,6 +49,7 @@ export interface NetworkConfig { | ||||
|   multi_thread?: boolean | ||||
|   proxy_forward_by_system?: boolean | ||||
|   disable_encryption?: boolean | ||||
|   disable_udp_hole_punching?: boolean | ||||
|  | ||||
|   enable_relay_network_whitelist?: boolean | ||||
|   relay_network_whitelist: string[] | ||||
| @@ -58,6 +61,14 @@ export interface NetworkConfig { | ||||
|  | ||||
|   enable_socks5?: boolean | ||||
|   socks5_port: number | ||||
|  | ||||
|   mtu: number | null | ||||
|   mapped_listeners: string[] | ||||
|  | ||||
|   enable_magic_dns?: boolean | ||||
|   enable_private_mode?: boolean | ||||
|  | ||||
|   rpc_portal_whitelists: string[] | ||||
| } | ||||
|  | ||||
| export function DEFAULT_NETWORK_CONFIG(): NetworkConfig { | ||||
| @@ -96,6 +107,8 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig { | ||||
|     use_smoltcp: false, | ||||
|     enable_kcp_proxy: false, | ||||
|     disable_kcp_input: false, | ||||
|     enable_quic_proxy: false, | ||||
|     disable_quic_input: false, | ||||
|     disable_p2p: false, | ||||
|     bind_device: true, | ||||
|     no_tun: false, | ||||
| @@ -104,6 +117,7 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig { | ||||
|     multi_thread: true, | ||||
|     proxy_forward_by_system: false, | ||||
|     disable_encryption: false, | ||||
|     disable_udp_hole_punching: false, | ||||
|     enable_relay_network_whitelist: false, | ||||
|     relay_network_whitelist: [], | ||||
|     enable_manual_routes: false, | ||||
| @@ -111,6 +125,11 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig { | ||||
|     exit_nodes: [], | ||||
|     enable_socks5: false, | ||||
|     socks5_port: 1080, | ||||
|     mtu: null, | ||||
|     mapped_listeners: [], | ||||
|     enable_magic_dns: false, | ||||
|     enable_private_mode: false, | ||||
|     rpc_portal_whitelists: [], | ||||
|   } | ||||
| } | ||||
|  | ||||
| @@ -257,4 +276,6 @@ export enum EventType { | ||||
|  | ||||
|   DhcpIpv4Changed = 'DhcpIpv4Changed', // ipv4 | null, ipv4 | null | ||||
|   DhcpIpv4Conflicted = 'DhcpIpv4Conflicted', // ipv4 | null | ||||
|  | ||||
|   PortForwardAdded = 'PortForwardAdded', // PortForwardConfigPb | ||||
| } | ||||
|   | ||||
| @@ -5,6 +5,7 @@ | ||||
|     <link rel="icon" type="image/png" href="/easytier.png" /> | ||||
|     <meta name="viewport" content="width=device-width, initial-scale=1.0" /> | ||||
|     <title>EasyTier Dashboard</title> | ||||
|     <script src="/api_meta.js"></script> | ||||
|   </head> | ||||
|   <body> | ||||
|     <div id="app"></div> | ||||
|   | ||||
| @@ -9,11 +9,11 @@ | ||||
|     "preview": "vite preview" | ||||
|   }, | ||||
|   "dependencies": { | ||||
|     "@primevue/themes": "^4.2.1", | ||||
|     "@primevue/themes": "4.3.3", | ||||
|     "aura": "link:@primevue/themes/aura", | ||||
|     "axios": "^1.7.7", | ||||
|     "easytier-frontend-lib": "workspace:*", | ||||
|     "primevue": "^4.2.1", | ||||
|     "primevue": "4.3.3", | ||||
|     "tailwindcss-primeui": "^0.3.4", | ||||
|     "vue": "^3.5.12", | ||||
|     "vue-router": "4" | ||||
|   | ||||
| @@ -1,38 +1,93 @@ | ||||
| <script setup lang="ts"> | ||||
| import { NetworkTypes } from 'easytier-frontend-lib'; | ||||
| import { ref } from 'vue'; | ||||
| import {computed, ref} from 'vue'; | ||||
| import { Api } from 'easytier-frontend-lib' | ||||
| import {AutoComplete, Divider, Button, Textarea} from "primevue"; | ||||
| import {getInitialApiHost, cleanAndLoadApiHosts, saveApiHost} from "../modules/api-host" | ||||
|  | ||||
| const defaultApiHost = 'https://config-server.easytier.cn' | ||||
| const api = new Api.ApiClient(defaultApiHost); | ||||
| const api = computed<Api.ApiClient>(() => new Api.ApiClient(apiHost.value)); | ||||
|  | ||||
| const apiHost = ref<string>(getInitialApiHost()) | ||||
| const apiHostSuggestions = ref<Array<string>>([]) | ||||
| const apiHostSearch = async (event: { query: string }) => { | ||||
|   apiHostSuggestions.value = []; | ||||
|   let hosts = cleanAndLoadApiHosts(); | ||||
|   if (event.query) { | ||||
|     apiHostSuggestions.value.push(event.query); | ||||
|   } | ||||
|   hosts.forEach((host) => { | ||||
|     apiHostSuggestions.value.push(host.value); | ||||
|   }); | ||||
| } | ||||
|  | ||||
| const newNetworkConfig = ref<NetworkTypes.NetworkConfig>(NetworkTypes.DEFAULT_NETWORK_CONFIG()); | ||||
| const toml_config = ref<string>("Press 'Run Network' to generate TOML configuration"); | ||||
| const toml_config = ref<string>(""); | ||||
| const errorMessage = ref<string>(""); | ||||
|  | ||||
| const generateConfig = (config: NetworkTypes.NetworkConfig) => { | ||||
|     api.generate_config({ | ||||
|   saveApiHost(apiHost.value) | ||||
|   errorMessage.value = ""; | ||||
|   api.value?.generate_config({ | ||||
|         config: config | ||||
|     }).then((res) => { | ||||
|         if (res.error) { | ||||
|             toml_config.value = res.error; | ||||
|             errorMessage.value = "Generation failed: " + res.error; | ||||
|         } else if (res.toml_config) { | ||||
|             toml_config.value = res.toml_config; | ||||
|         } else { | ||||
|             toml_config.value = "Api server returned an unexpected response"; | ||||
|             errorMessage.value = "Api server returned an unexpected response"; | ||||
|         } | ||||
|     }).catch(err => { | ||||
|         errorMessage.value = "Generate request failed: " + (err instanceof Error ? err.message : String(err)); | ||||
|     }); | ||||
| }; | ||||
|  | ||||
| const parseConfig = async () => { | ||||
|   try { | ||||
|     errorMessage.value = ""; | ||||
|     const res = await api.value?.parse_config({ | ||||
|       toml_config: toml_config.value | ||||
|     }); | ||||
|      | ||||
|     if (res.error) { | ||||
|       errorMessage.value = "Parse failed: " + res.error; | ||||
|     } else if (res.config) { | ||||
|       newNetworkConfig.value = res.config; | ||||
|     } else { | ||||
|       errorMessage.value = "API returned an unexpected response"; | ||||
|     } | ||||
|   } catch (e) { | ||||
|     errorMessage.value = "Parse request failed: " + (e instanceof Error ? e.message : String(e)); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| </script> | ||||
|  | ||||
| <template> | ||||
|     <div class="flex items-center justify-center m-5"> | ||||
|         <div class="sm:block md:flex w-full"> | ||||
|             <div class="sm:w-full md:w-1/2 p-4"> | ||||
|                 <div class="flex flex-col"> | ||||
|                   <div class="w-11/12 self-center "> | ||||
|                     <label>ApiHost</label> | ||||
|                     <AutoComplete id="api-host" v-model="apiHost" dropdown :suggestions="apiHostSuggestions" | ||||
|                                   @complete="apiHostSearch" class="w-full" /> | ||||
|                     <Divider /> | ||||
|                   </div> | ||||
|                 </div> | ||||
|                 <Config :cur-network="newNetworkConfig" @run-network="generateConfig" /> | ||||
|             </div> | ||||
|             <div class="sm:w-full md:w-1/2 p-4 bg-gray-100"> | ||||
|                 <pre class="whitespace-pre-wrap">{{ toml_config }}</pre> | ||||
|             <div class="sm:w-full md:w-1/2 p-4 flex flex-col h-[calc(100vh-80px)]"> | ||||
|                 <pre v-if="errorMessage" class="mb-2 p-2 rounded text-sm overflow-auto bg-red-100 text-red-700 max-h-40">{{ errorMessage }}</pre> | ||||
|                 <Textarea  | ||||
|                     v-model="toml_config"  | ||||
|                     spellcheck="false" | ||||
|                     class="w-full flex-grow p-2 bg-gray-100 whitespace-pre-wrap font-mono border-none focus:outline-none resize-none"  | ||||
|                     placeholder="Press 'Run Network' to generate TOML configuration, or paste your TOML configuration here to parse it" | ||||
|                 ></Textarea> | ||||
|                 <div class="mt-3 flex justify-center"> | ||||
|                   <Button label="Parse Config" icon="pi pi-arrow-left" icon-pos="left" @click="parseConfig" /> | ||||
|                 </div> | ||||
|             </div> | ||||
|         </div> | ||||
|     </div> | ||||
|   | ||||
| @@ -1,6 +1,6 @@ | ||||
| <script setup lang="ts"> | ||||
| import {Toolbar, IftaLabel, Select, Button, ConfirmPopup, Dialog, useConfirm, useToast, Divider} from 'primevue'; | ||||
| import { NetworkTypes, Status, Utils, Api, } from 'easytier-frontend-lib'; | ||||
| import { Toolbar, IftaLabel, Select, Button, ConfirmPopup, Dialog, useConfirm, useToast, Divider } from 'primevue'; | ||||
| import { NetworkTypes, Status, Utils, Api, ConfigEditDialog } from 'easytier-frontend-lib'; | ||||
| import { watch, computed, onMounted, onUnmounted, ref } from 'vue'; | ||||
| import { useRoute, useRouter } from 'vue-router'; | ||||
|  | ||||
| @@ -33,6 +33,7 @@ const curNetworkInfo = ref<NetworkTypes.NetworkInstance | null>(null); | ||||
|  | ||||
| const isEditing = ref(false); | ||||
| const showCreateNetworkDialog = ref(false); | ||||
| const showConfigEditDialog = ref(false); | ||||
| const newNetworkConfig = ref<NetworkTypes.NetworkConfig>(NetworkTypes.DEFAULT_NETWORK_CONFIG()); | ||||
|  | ||||
| const listInstanceIdResponse = ref<Api.ListNetworkInstanceIdResponse | undefined>(undefined); | ||||
| @@ -103,7 +104,12 @@ const updateNetworkState = async (disabled: boolean) => { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     await props.api?.update_device_instance_state(deviceId.value, selectedInstanceId.value.uuid, disabled); | ||||
|     if (disabled || !disabledNetworkConfig.value) { | ||||
|         await props.api?.update_device_instance_state(deviceId.value, selectedInstanceId.value.uuid, disabled); | ||||
|     } else if (disabledNetworkConfig.value) { | ||||
|         await props.api?.delete_network(deviceId.value, disabledNetworkConfig.value.instance_id); | ||||
|         await props.api?.run_network(deviceId.value, disabledNetworkConfig.value); | ||||
|     } | ||||
|     await loadNetworkInstanceIds(); | ||||
| } | ||||
|  | ||||
| @@ -160,6 +166,7 @@ const createNewNetwork = async () => { | ||||
|  | ||||
| const newNetwork = () => { | ||||
|     newNetworkConfig.value = NetworkTypes.DEFAULT_NETWORK_CONFIG(); | ||||
|     newNetworkConfig.value.hostname = deviceInfo.value?.hostname; | ||||
|     isEditing.value = false; | ||||
|     showCreateNetworkDialog.value = true; | ||||
| } | ||||
| @@ -210,62 +217,97 @@ const loadDeviceInfo = async () => { | ||||
| } | ||||
|  | ||||
| const exportConfig = async () => { | ||||
|   if (!deviceId.value || !instanceId.value) { | ||||
|     toast.add({ severity: 'error', summary: 'Error', detail: 'No network instance selected', life: 2000 }); | ||||
|     return; | ||||
|   } | ||||
|     if (!deviceId.value || !instanceId.value) { | ||||
|         toast.add({ severity: 'error', summary: 'Error', detail: 'No network instance selected', life: 2000 }); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|   try { | ||||
|     let ret = await props.api?.get_network_config(deviceId.value, instanceId.value); | ||||
|     delete ret.instance_id; | ||||
|     exportJsonFile(JSON.stringify(ret, null, 2),instanceId.value +'.json'); | ||||
|   } catch (e: any) { | ||||
|     console.error(e); | ||||
|     toast.add({ severity: 'error', summary: 'Error', detail: 'Failed to export network config, error: ' + JSON.stringify(e.response.data), life: 2000 }); | ||||
|     return; | ||||
|   } | ||||
|     try { | ||||
|         let networkConfig = await props.api?.get_network_config(deviceId.value, instanceId.value); | ||||
|         delete networkConfig.instance_id; | ||||
|         let { toml_config: tomlConfig, error } = await props.api?.generate_config({ | ||||
|             config: networkConfig | ||||
|         }); | ||||
|         if (error) { | ||||
|             throw { response: { data: error } }; | ||||
|         } | ||||
|         exportTomlFile(tomlConfig ?? '', instanceId.value + '.toml'); | ||||
|     } catch (e: any) { | ||||
|         console.error(e); | ||||
|         toast.add({ severity: 'error', summary: 'Error', detail: 'Failed to export network config, error: ' + JSON.stringify(e.response.data), life: 2000 }); | ||||
|         return; | ||||
|     } | ||||
| } | ||||
|  | ||||
| const importConfig = () => { | ||||
|   configFile.value.click(); | ||||
|     configFile.value.click(); | ||||
| } | ||||
|  | ||||
| const handleFileUpload = (event: Event) => { | ||||
|   const files = (event.target as HTMLInputElement).files; | ||||
|   const file = files ? files[0] : null; | ||||
|   if (file) { | ||||
|     const files = (event.target as HTMLInputElement).files; | ||||
|     const file = files ? files[0] : null; | ||||
|     if (!file) return; | ||||
|     const reader = new FileReader(); | ||||
|     reader.onload = (e) => { | ||||
|       try { | ||||
|         let str = e.target?.result?.toString(); | ||||
|         if(str){ | ||||
|           const config = JSON.parse(str); | ||||
|           if(config === null || typeof config !== "object"){ | ||||
|             throw new Error(); | ||||
|           } | ||||
|           Object.assign(newNetworkConfig.value, config); | ||||
|           toast.add({ severity: 'success', summary: 'Import Success', detail: "Config file import success", life: 2000 }); | ||||
|     reader.onload = async (e) => { | ||||
|         try { | ||||
|             let tomlConfig = e.target?.result?.toString(); | ||||
|             if (!tomlConfig) return; | ||||
|             const resp = await props.api?.parse_config({ toml_config: tomlConfig }); | ||||
|             if (resp.error) { | ||||
|                 throw resp.error; | ||||
|             } | ||||
|  | ||||
|             const config = resp.config; | ||||
|             if (!config) return; | ||||
|  | ||||
|             config.instance_id = newNetworkConfig.value?.instance_id ?? config?.instance_id; | ||||
|  | ||||
|             Object.assign(newNetworkConfig.value, resp.config); | ||||
|             toast.add({ severity: 'success', summary: 'Import Success', detail: "Config file import success", life: 2000 }); | ||||
|         } catch (error) { | ||||
|             toast.add({ severity: 'error', summary: 'Error', detail: 'Config file parse error: ' + error, life: 2000 }); | ||||
|         } | ||||
|       } catch (error) { | ||||
|         toast.add({ severity: 'error', summary: 'Error', detail: 'Config file parse error.', life: 2000 }); | ||||
|       } | ||||
|       configFile.value.value = null; | ||||
|         configFile.value.value = null; | ||||
|     } | ||||
|     reader.readAsText(file); | ||||
|   } | ||||
| } | ||||
|  | ||||
| const exportJsonFile = (context: string, name: string) => { | ||||
|   let url = window.URL.createObjectURL(new Blob([context], { type: 'application/json' })); | ||||
|   let link = document.createElement('a'); | ||||
|   link.style.display = 'none'; | ||||
|   link.href = url; | ||||
|   link.setAttribute('download', name); | ||||
|   document.body.appendChild(link); | ||||
|   link.click(); | ||||
| const exportTomlFile = (context: string, name: string) => { | ||||
|     let url = window.URL.createObjectURL(new Blob([context], { type: 'application/toml' })); | ||||
|     let link = document.createElement('a'); | ||||
|     link.style.display = 'none'; | ||||
|     link.href = url; | ||||
|     link.setAttribute('download', name); | ||||
|     document.body.appendChild(link); | ||||
|     link.click(); | ||||
|  | ||||
|   document.body.removeChild(link); | ||||
|   window.URL.revokeObjectURL(url); | ||||
|     document.body.removeChild(link); | ||||
|     window.URL.revokeObjectURL(url); | ||||
| } | ||||
|  | ||||
| const generateConfig = async (config: NetworkTypes.NetworkConfig): Promise<string> => { | ||||
|     let { toml_config: tomlConfig, error } = await props.api?.generate_config({ config }); | ||||
|     if (error) { | ||||
|         throw error; | ||||
|     } | ||||
|     return tomlConfig ?? ''; | ||||
| } | ||||
|  | ||||
| const saveConfig = async (tomlConfig: string): Promise<void> => { | ||||
|     let resp = await props.api?.parse_config({ toml_config: tomlConfig }); | ||||
|     if (resp.error) { | ||||
|         throw resp.error; | ||||
|     }; | ||||
|     const config = resp.config; | ||||
|     if (!config) { | ||||
|         throw new Error("Parsed config is empty"); | ||||
|     } | ||||
|     config.instance_id = disabledNetworkConfig.value?.instance_id ?? config?.instance_id; | ||||
|     if (networkIsDisabled.value) { | ||||
|         disabledNetworkConfig.value = config; | ||||
|     } else { | ||||
|         newNetworkConfig.value = config; | ||||
|     } | ||||
| } | ||||
|  | ||||
| let periodFunc = new Utils.PeriodicTask(async () => { | ||||
| @@ -287,18 +329,23 @@ onUnmounted(() => { | ||||
| </script> | ||||
|  | ||||
| <template> | ||||
|     <input type="file" @change="handleFileUpload" class="hidden" accept="application/json" ref="configFile"/> | ||||
|     <input type="file" @change="handleFileUpload" class="hidden" accept="application/toml" ref="configFile" /> | ||||
|     <ConfirmPopup></ConfirmPopup> | ||||
|     <Dialog v-model:visible="showCreateNetworkDialog" modal :header="!isEditing ? 'Create New Network' : 'Edit Network'" | ||||
|         :style="{ width: '55rem' }"> | ||||
|     <Dialog v-if="!networkIsDisabled" v-model:visible="showCreateNetworkDialog" modal | ||||
|         :header="!isEditing ? 'Create New Network' : 'Edit Network'" :style="{ width: '55rem' }"> | ||||
|         <div class="flex flex-col"> | ||||
|           <div class="w-11/12 self-center "> | ||||
|             <Button @click="importConfig" icon="pi pi-file-import" label="Import" iconPos="right" /> | ||||
|             <Divider /> | ||||
|           </div> | ||||
|             <div class="w-11/12 self-center space-x-2"> | ||||
|                 <Button @click="showConfigEditDialog = true" icon="pi pi-pen-to-square" label="Edit File" iconPos="right" /> | ||||
|                 <Button @click="importConfig" icon="pi pi-file-import" label="Import" iconPos="right" /> | ||||
|             </div> | ||||
|         </div> | ||||
|         <Divider /> | ||||
|         <Config :cur-network="newNetworkConfig" @run-network="createNewNetwork"></Config> | ||||
|     </Dialog> | ||||
|     <ConfigEditDialog v-if="networkIsDisabled" v-model:visible="showCreateNetworkDialog" | ||||
|         :cur-network="disabledNetworkConfig" :generate-config="generateConfig" :save-config="saveConfig" /> | ||||
|     <ConfigEditDialog v-else v-model:visible="showConfigEditDialog" :cur-network="newNetworkConfig" | ||||
|         :generate-config="generateConfig" :save-config="saveConfig" /> | ||||
|  | ||||
|     <Toolbar> | ||||
|         <template #start> | ||||
| @@ -328,7 +375,7 @@ onUnmounted(() => { | ||||
|         </Status> | ||||
|         <Divider /> | ||||
|         <div class="text-center"> | ||||
|           <Button @click="updateNetworkState(true)" label="Disable Network" severity="warn" /> | ||||
|             <Button @click="updateNetworkState(true)" label="Disable Network" severity="warn" /> | ||||
|         </div> | ||||
|     </div> | ||||
|  | ||||
|   | ||||
| @@ -4,6 +4,7 @@ import { Card, InputText, Password, Button, AutoComplete } from 'primevue'; | ||||
| import { useRouter } from 'vue-router'; | ||||
| import { useToast } from 'primevue/usetoast'; | ||||
| import { Api } from 'easytier-frontend-lib'; | ||||
| import {getInitialApiHost, cleanAndLoadApiHosts, saveApiHost} from "../modules/api-host" | ||||
|  | ||||
| defineProps<{ | ||||
|     isRegistering: boolean; | ||||
| @@ -20,56 +21,6 @@ const registerPassword = ref(''); | ||||
| const captcha = ref(''); | ||||
| const captchaSrc = computed(() => api.value.captcha_url()); | ||||
|  | ||||
| interface ApiHost { | ||||
|     value: string; | ||||
|     usedAt: number; | ||||
| } | ||||
|  | ||||
| const isValidHttpUrl = (s: string): boolean => { | ||||
|     let url; | ||||
|  | ||||
|     try { | ||||
|         url = new URL(s); | ||||
|     } catch (_) { | ||||
|         return false; | ||||
|     } | ||||
|  | ||||
|     return url.protocol === "http:" || url.protocol === "https:"; | ||||
| } | ||||
|  | ||||
| const cleanAndLoadApiHosts = (): Array<ApiHost> => { | ||||
|     const maxHosts = 10; | ||||
|     const apiHosts = localStorage.getItem('apiHosts'); | ||||
|     if (apiHosts) { | ||||
|         const hosts: Array<ApiHost> = JSON.parse(apiHosts); | ||||
|         // sort by usedAt | ||||
|         hosts.sort((a, b) => b.usedAt - a.usedAt); | ||||
|  | ||||
|         // only keep the first 10 | ||||
|         if (hosts.length > maxHosts) { | ||||
|             hosts.splice(maxHosts); | ||||
|         } | ||||
|  | ||||
|         localStorage.setItem('apiHosts', JSON.stringify(hosts)); | ||||
|         return hosts; | ||||
|     } else { | ||||
|         return []; | ||||
|     } | ||||
| }; | ||||
|  | ||||
| const saveApiHost = (host: string) => { | ||||
|     console.log('Save API Host:', host); | ||||
|     if (!isValidHttpUrl(host)) { | ||||
|         console.error('Invalid API Host:', host); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let hosts = cleanAndLoadApiHosts(); | ||||
|     const newHost: ApiHost = { value: host, usedAt: Date.now() }; | ||||
|     hosts = hosts.filter((h) => h.value !== host); | ||||
|     hosts.push(newHost); | ||||
|     localStorage.setItem('apiHosts', JSON.stringify(hosts)); | ||||
| }; | ||||
|  | ||||
| const onSubmit = async () => { | ||||
|     // Add your login logic here | ||||
| @@ -100,16 +51,6 @@ const onRegister = async () => { | ||||
|     } | ||||
| }; | ||||
|  | ||||
| const getInitialApiHost = (): string => { | ||||
|     const hosts = cleanAndLoadApiHosts(); | ||||
|     if (hosts.length > 0) { | ||||
|         return hosts[0].value; | ||||
|     } else { | ||||
|         return defaultApiHost; | ||||
|     } | ||||
| }; | ||||
|  | ||||
| const defaultApiHost = 'https://config-server.easytier.cn' | ||||
| const apiHost = ref<string>(getInitialApiHost()) | ||||
| const apiHostSuggestions = ref<Array<string>>([]) | ||||
| const apiHostSearch = async (event: { query: string }) => { | ||||
| @@ -124,10 +65,7 @@ const apiHostSearch = async (event: { query: string }) => { | ||||
| } | ||||
|  | ||||
| onMounted(() => { | ||||
|     let hosts = cleanAndLoadApiHosts(); | ||||
|     if (hosts.length === 0) { | ||||
|         saveApiHost(defaultApiHost); | ||||
|     } | ||||
|  | ||||
| }); | ||||
|  | ||||
| </script> | ||||
|   | ||||
							
								
								
									
										71
									
								
								easytier-web/frontend/src/modules/api-host.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								easytier-web/frontend/src/modules/api-host.ts
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,71 @@ | ||||
| interface ApiHost { | ||||
|     value: string; | ||||
|     usedAt: number; | ||||
| } | ||||
|  | ||||
| let apiMeta: { | ||||
|     api_host: string; | ||||
| } | undefined = (window as any).apiMeta; | ||||
|  | ||||
| // remove trailing slashes from the URL | ||||
| const cleanUrl = (url: string) => url.replace(/\/+$/, ''); | ||||
|  | ||||
| const defaultApiHost = cleanUrl(apiMeta?.api_host ?? `${location.origin}${location.pathname}`); | ||||
|  | ||||
| const isValidHttpUrl = (s: string): boolean => { | ||||
|     let url; | ||||
|  | ||||
|     try { | ||||
|         url = new URL(s); | ||||
|     } catch (_) { | ||||
|         return false; | ||||
|     } | ||||
|  | ||||
|     return url.protocol === "http:" || url.protocol === "https:"; | ||||
| }; | ||||
|  | ||||
| const cleanAndLoadApiHosts = (): Array<ApiHost> => { | ||||
|     const maxHosts = 10; | ||||
|     const apiHosts = localStorage.getItem('apiHosts'); | ||||
|     if (apiHosts) { | ||||
|         const hosts: Array<ApiHost> = JSON.parse(apiHosts); | ||||
|         // sort by usedAt | ||||
|         hosts.sort((a, b) => b.usedAt - a.usedAt); | ||||
|  | ||||
|         // only keep the first 10 | ||||
|         if (hosts.length > maxHosts) { | ||||
|             hosts.splice(maxHosts); | ||||
|         } | ||||
|  | ||||
|         localStorage.setItem('apiHosts', JSON.stringify(hosts)); | ||||
|         return hosts; | ||||
|     } else { | ||||
|         return []; | ||||
|     } | ||||
| }; | ||||
|  | ||||
| const saveApiHost = (host: string) => { | ||||
|     console.log('Save API Host:', host); | ||||
|     if (!isValidHttpUrl(host)) { | ||||
|         console.error('Invalid API Host:', host); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     let hosts = cleanAndLoadApiHosts(); | ||||
|     const newHost: ApiHost = { value: host, usedAt: Date.now() }; | ||||
|     hosts = hosts.filter((h) => h.value !== host); | ||||
|     hosts.push(newHost); | ||||
|     localStorage.setItem('apiHosts', JSON.stringify(hosts)); | ||||
| }; | ||||
|  | ||||
| const getInitialApiHost = (): string => { | ||||
|     const hosts = cleanAndLoadApiHosts(); | ||||
|     if (hosts.length > 0) { | ||||
|         return hosts[0].value; | ||||
|     } else { | ||||
|         saveApiHost(defaultApiHost) | ||||
|         return defaultApiHost; | ||||
|     } | ||||
| }; | ||||
|  | ||||
| export { getInitialApiHost, cleanAndLoadApiHosts, saveApiHost } | ||||
| @@ -1,9 +1,22 @@ | ||||
| import { defineConfig } from 'vite' | ||||
| import vue from '@vitejs/plugin-vue' | ||||
| import { viteSingleFile } from "vite-plugin-singlefile" | ||||
| // import { viteSingleFile } from "vite-plugin-singlefile" | ||||
|  | ||||
| const WEB_BASE_URL = process.env.WEB_BASE_URL || ''; | ||||
| const API_BASE_URL = process.env.API_BASE_URL || 'http://localhost:11211'; | ||||
|  | ||||
| // https://vite.dev/config/ | ||||
| export default defineConfig({ | ||||
|   base: '', | ||||
|   plugins: [vue(), viteSingleFile()], | ||||
|   base: WEB_BASE_URL, | ||||
|   plugins: [vue(),/* viteSingleFile() */], | ||||
|   server: { | ||||
|     proxy: { | ||||
|       "/api": { | ||||
|         target: API_BASE_URL, | ||||
|       }, | ||||
|       "/api_meta.js": { | ||||
|         target: API_BASE_URL, | ||||
|       }, | ||||
|     } | ||||
|   } | ||||
| }) | ||||
|   | ||||
| @@ -22,3 +22,12 @@ cli: | ||||
|   api_server_port: | ||||
|     en: "The port to listen for the restful server, acting as ApiHost and used by the web frontend" | ||||
|     zh-CN: "restful 服务器的监听端口,作为 ApiHost 并被 web 前端使用" | ||||
|   web_server_port: | ||||
|     en: "The port to listen for the web dashboard server, default is same as the api server port" | ||||
|     zh-CN: "web dashboard 服务器的监听端口, 默认为与 api 服务器端口相同" | ||||
|   no_web: | ||||
|     en: "Do not run the web dashboard server" | ||||
|     zh-CN: "不运行 web dashboard 服务器" | ||||
|   api_host: | ||||
|     en: "The URL of the API server, used by the web frontend to connect to" | ||||
|     zh-CN: "API 服务器的 URL,用于 web 前端连接" | ||||
| @@ -1,21 +1,24 @@ | ||||
| pub mod session; | ||||
| pub mod storage; | ||||
|  | ||||
| use std::sync::Arc; | ||||
| use std::sync::{ | ||||
|     atomic::{AtomicU32, Ordering}, | ||||
|     Arc, | ||||
| }; | ||||
|  | ||||
| use dashmap::DashMap; | ||||
| use easytier::{ | ||||
|     common::scoped_task::ScopedTask, proto::web::HeartbeatRequest, tunnel::TunnelListener, | ||||
| }; | ||||
| use easytier::{proto::web::HeartbeatRequest, tunnel::TunnelListener}; | ||||
| use session::Session; | ||||
| use storage::{Storage, StorageToken}; | ||||
| use tokio::task::JoinSet; | ||||
|  | ||||
| use crate::db::Db; | ||||
| use crate::db::{Db, UserIdInDb}; | ||||
|  | ||||
| #[derive(Debug)] | ||||
| pub struct ClientManager { | ||||
|     accept_task: Option<ScopedTask<()>>, | ||||
|     clear_task: Option<ScopedTask<()>>, | ||||
|     tasks: JoinSet<()>, | ||||
|  | ||||
|     listeners_cnt: Arc<AtomicU32>, | ||||
|  | ||||
|     client_sessions: Arc<DashMap<url::Url, Arc<Session>>>, | ||||
|     storage: Storage, | ||||
| @@ -23,24 +26,35 @@ pub struct ClientManager { | ||||
|  | ||||
| impl ClientManager { | ||||
|     pub fn new(db: Db) -> Self { | ||||
|         let client_sessions = Arc::new(DashMap::new()); | ||||
|         let sessions: Arc<DashMap<url::Url, Arc<Session>>> = client_sessions.clone(); | ||||
|         let mut tasks = JoinSet::new(); | ||||
|         tasks.spawn(async move { | ||||
|             loop { | ||||
|                 tokio::time::sleep(std::time::Duration::from_secs(15)).await; | ||||
|                 sessions.retain(|_, session| session.is_running()); | ||||
|             } | ||||
|         }); | ||||
|         ClientManager { | ||||
|             accept_task: None, | ||||
|             clear_task: None, | ||||
|             tasks, | ||||
|  | ||||
|             client_sessions: Arc::new(DashMap::new()), | ||||
|             listeners_cnt: Arc::new(AtomicU32::new(0)), | ||||
|  | ||||
|             client_sessions, | ||||
|             storage: Storage::new(db), | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     pub async fn serve<L: TunnelListener + 'static>( | ||||
|     pub async fn add_listener<L: TunnelListener + 'static>( | ||||
|         &mut self, | ||||
|         mut listener: L, | ||||
|     ) -> Result<(), anyhow::Error> { | ||||
|         listener.listen().await?; | ||||
|  | ||||
|         self.listeners_cnt.fetch_add(1, Ordering::Relaxed); | ||||
|         let sessions = self.client_sessions.clone(); | ||||
|         let storage = self.storage.weak_ref(); | ||||
|         let task = tokio::spawn(async move { | ||||
|         let listeners_cnt = self.listeners_cnt.clone(); | ||||
|         self.tasks.spawn(async move { | ||||
|             while let Ok(tunnel) = listener.accept().await { | ||||
|                 let info = tunnel.info().unwrap(); | ||||
|                 let client_url: url::Url = info.remote_addr.unwrap().into(); | ||||
| @@ -49,24 +63,14 @@ impl ClientManager { | ||||
|                 session.serve(tunnel).await; | ||||
|                 sessions.insert(client_url, Arc::new(session)); | ||||
|             } | ||||
|             listeners_cnt.fetch_sub(1, Ordering::Relaxed); | ||||
|         }); | ||||
|  | ||||
|         self.accept_task = Some(ScopedTask::from(task)); | ||||
|  | ||||
|         let sessions = self.client_sessions.clone(); | ||||
|         let task = tokio::spawn(async move { | ||||
|             loop { | ||||
|                 tokio::time::sleep(std::time::Duration::from_secs(15)).await; | ||||
|                 sessions.retain(|_, session| session.is_running()); | ||||
|             } | ||||
|         }); | ||||
|         self.clear_task = Some(ScopedTask::from(task)); | ||||
|  | ||||
|         Ok(()) | ||||
|     } | ||||
|  | ||||
|     pub fn is_running(&self) -> bool { | ||||
|         self.accept_task.is_some() && self.clear_task.is_some() | ||||
|         self.listeners_cnt.load(Ordering::Relaxed) > 0 | ||||
|     } | ||||
|  | ||||
|     pub async fn list_sessions(&self) -> Vec<StorageToken> { | ||||
| @@ -86,15 +90,21 @@ impl ClientManager { | ||||
|         ret | ||||
|     } | ||||
|  | ||||
|     pub fn get_session_by_machine_id(&self, machine_id: &uuid::Uuid) -> Option<Arc<Session>> { | ||||
|         let c_url = self.storage.get_client_url_by_machine_id(machine_id)?; | ||||
|     pub fn get_session_by_machine_id( | ||||
|         &self, | ||||
|         user_id: UserIdInDb, | ||||
|         machine_id: &uuid::Uuid, | ||||
|     ) -> Option<Arc<Session>> { | ||||
|         let c_url = self | ||||
|             .storage | ||||
|             .get_client_url_by_machine_id(user_id, machine_id)?; | ||||
|         self.client_sessions | ||||
|             .get(&c_url) | ||||
|             .map(|item| item.value().clone()) | ||||
|     } | ||||
|  | ||||
|     pub async fn list_machine_by_token(&self, token: String) -> Vec<url::Url> { | ||||
|         self.storage.list_token_clients(&token) | ||||
|     pub async fn list_machine_by_user_id(&self, user_id: UserIdInDb) -> Vec<url::Url> { | ||||
|         self.storage.list_user_clients(user_id) | ||||
|     } | ||||
|  | ||||
|     pub async fn get_heartbeat_requests(&self, client_url: &url::Url) -> Option<HeartbeatRequest> { | ||||
| @@ -118,6 +128,7 @@ mod tests { | ||||
|         }, | ||||
|         web_client::WebClient, | ||||
|     }; | ||||
|     use sqlx::Executor; | ||||
|  | ||||
|     use crate::{client_manager::ClientManager, db::Db}; | ||||
|  | ||||
| @@ -125,10 +136,16 @@ mod tests { | ||||
|     async fn test_client() { | ||||
|         let listener = UdpTunnelListener::new("udp://0.0.0.0:54333".parse().unwrap()); | ||||
|         let mut mgr = ClientManager::new(Db::memory_db().await); | ||||
|         mgr.serve(Box::new(listener)).await.unwrap(); | ||||
|         mgr.add_listener(Box::new(listener)).await.unwrap(); | ||||
|  | ||||
|         mgr.db() | ||||
|             .inner() | ||||
|             .execute("INSERT INTO users (username, password) VALUES ('test', 'test')") | ||||
|             .await | ||||
|             .unwrap(); | ||||
|  | ||||
|         let connector = UdpTunnelConnector::new("udp://127.0.0.1:54333".parse().unwrap()); | ||||
|         let _c = WebClient::new(connector, "test"); | ||||
|         let _c = WebClient::new(connector, "test", "test"); | ||||
|  | ||||
|         wait_for_condition( | ||||
|             || async { mgr.client_sessions.len() == 1 }, | ||||
|   | ||||
| @@ -1,5 +1,6 @@ | ||||
| use std::{fmt::Debug, str::FromStr as _, sync::Arc}; | ||||
|  | ||||
| use anyhow::Context; | ||||
| use easytier::{ | ||||
|     common::scoped_task::ScopedTask, | ||||
|     proto::{ | ||||
| @@ -68,6 +69,66 @@ struct SessionRpcService { | ||||
|     data: SharedSessionData, | ||||
| } | ||||
|  | ||||
| impl SessionRpcService { | ||||
|     async fn handle_heartbeat( | ||||
|         &self, | ||||
|         req: HeartbeatRequest, | ||||
|     ) -> rpc_types::error::Result<HeartbeatResponse> { | ||||
|         let mut data = self.data.write().await; | ||||
|  | ||||
|         let Ok(storage) = Storage::try_from(data.storage.clone()) else { | ||||
|             tracing::error!("Failed to get storage"); | ||||
|             return Ok(HeartbeatResponse {}); | ||||
|         }; | ||||
|  | ||||
|         let machine_id: uuid::Uuid = | ||||
|             req.machine_id | ||||
|                 .clone() | ||||
|                 .map(Into::into) | ||||
|                 .ok_or(anyhow::anyhow!( | ||||
|                     "Machine id is not set correctly, expect uuid but got: {:?}", | ||||
|                     req.machine_id | ||||
|                 ))?; | ||||
|  | ||||
|         let user_id = storage | ||||
|             .db() | ||||
|             .get_user_id_by_token(req.user_token.clone()) | ||||
|             .await | ||||
|             .with_context(|| { | ||||
|                 format!( | ||||
|                     "Failed to get user id by token from db: {:?}", | ||||
|                     req.user_token | ||||
|                 ) | ||||
|             })? | ||||
|             .ok_or(anyhow::anyhow!( | ||||
|                 "User not found by token: {:?}", | ||||
|                 req.user_token | ||||
|             ))?; | ||||
|  | ||||
|         if data.req.replace(req.clone()).is_none() { | ||||
|             assert!(data.storage_token.is_none()); | ||||
|             data.storage_token = Some(StorageToken { | ||||
|                 token: req.user_token.clone().into(), | ||||
|                 client_url: data.client_url.clone(), | ||||
|                 machine_id, | ||||
|                 user_id, | ||||
|             }); | ||||
|         } | ||||
|  | ||||
|         let Ok(report_time) = chrono::DateTime::<chrono::Local>::from_str(&req.report_time) else { | ||||
|             tracing::error!("Failed to parse report time: {:?}", req.report_time); | ||||
|             return Ok(HeartbeatResponse {}); | ||||
|         }; | ||||
|         storage.update_client( | ||||
|             data.storage_token.as_ref().unwrap().clone(), | ||||
|             report_time.timestamp(), | ||||
|         ); | ||||
|  | ||||
|         let _ = data.notifier.send(req); | ||||
|         Ok(HeartbeatResponse {}) | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[async_trait::async_trait] | ||||
| impl WebServerService for SessionRpcService { | ||||
|     type Controller = BaseController; | ||||
| @@ -77,34 +138,13 @@ impl WebServerService for SessionRpcService { | ||||
|         _: BaseController, | ||||
|         req: HeartbeatRequest, | ||||
|     ) -> rpc_types::error::Result<HeartbeatResponse> { | ||||
|         let mut data = self.data.write().await; | ||||
|         if data.req.replace(req.clone()).is_none() { | ||||
|             assert!(data.storage_token.is_none()); | ||||
|             data.storage_token = Some(StorageToken { | ||||
|                 token: req.user_token.clone().into(), | ||||
|                 client_url: data.client_url.clone(), | ||||
|                 machine_id: req | ||||
|                     .machine_id | ||||
|                     .clone() | ||||
|                     .map(Into::into) | ||||
|                     .unwrap_or(uuid::Uuid::new_v4()), | ||||
|             }); | ||||
|         let ret = self.handle_heartbeat(req).await; | ||||
|         if ret.is_err() { | ||||
|             tracing::warn!("Failed to handle heartbeat: {:?}", ret); | ||||
|             // sleep for a while to avoid client busy loop | ||||
|             tokio::time::sleep(std::time::Duration::from_secs(2)).await; | ||||
|         } | ||||
|  | ||||
|         if let Ok(storage) = Storage::try_from(data.storage.clone()) { | ||||
|             let Ok(report_time) = chrono::DateTime::<chrono::Local>::from_str(&req.report_time) | ||||
|             else { | ||||
|                 tracing::error!("Failed to parse report time: {:?}", req.report_time); | ||||
|                 return Ok(HeartbeatResponse {}); | ||||
|             }; | ||||
|             storage.update_client( | ||||
|                 data.storage_token.as_ref().unwrap().clone(), | ||||
|                 report_time.timestamp(), | ||||
|             ); | ||||
|         } | ||||
|  | ||||
|         let _ = data.notifier.send(req); | ||||
|         Ok(HeartbeatResponse {}) | ||||
|         ret | ||||
|     } | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -2,7 +2,7 @@ use std::sync::{Arc, Weak}; | ||||
|  | ||||
| use dashmap::DashMap; | ||||
|  | ||||
| use crate::db::Db; | ||||
| use crate::db::{Db, UserIdInDb}; | ||||
|  | ||||
| // use this to maintain Storage | ||||
| #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] | ||||
| @@ -10,21 +10,19 @@ pub struct StorageToken { | ||||
|     pub token: String, | ||||
|     pub client_url: url::Url, | ||||
|     pub machine_id: uuid::Uuid, | ||||
|     pub user_id: UserIdInDb, | ||||
| } | ||||
|  | ||||
| #[derive(Debug, Clone)] | ||||
| struct ClientInfo { | ||||
|     client_url: url::Url, | ||||
|     machine_id: uuid::Uuid, | ||||
|     token: String, | ||||
|     storage_token: StorageToken, | ||||
|     report_time: i64, | ||||
| } | ||||
|  | ||||
| #[derive(Debug)] | ||||
| pub struct StorageInner { | ||||
|     // some map for indexing | ||||
|     token_clients_map: DashMap<String, DashMap<uuid::Uuid, ClientInfo>>, | ||||
|     machine_client_url_map: DashMap<uuid::Uuid, ClientInfo>, | ||||
|     user_clients_map: DashMap<UserIdInDb, DashMap<uuid::Uuid, ClientInfo>>, | ||||
|     pub db: Db, | ||||
| } | ||||
|  | ||||
| @@ -43,8 +41,7 @@ impl TryFrom<WeakRefStorage> for Storage { | ||||
| impl Storage { | ||||
|     pub fn new(db: Db) -> Self { | ||||
|         Storage(Arc::new(StorageInner { | ||||
|             token_clients_map: DashMap::new(), | ||||
|             machine_client_url_map: DashMap::new(), | ||||
|             user_clients_map: DashMap::new(), | ||||
|             db, | ||||
|         })) | ||||
|     } | ||||
| @@ -54,17 +51,22 @@ impl Storage { | ||||
|         machine_id: &uuid::Uuid, | ||||
|         client_url: &url::Url, | ||||
|     ) { | ||||
|         map.remove_if(&machine_id, |_, v| v.client_url == *client_url); | ||||
|         map.remove_if(&machine_id, |_, v| { | ||||
|             v.storage_token.client_url == *client_url | ||||
|         }); | ||||
|     } | ||||
|  | ||||
|     fn update_mid_to_client_info_map( | ||||
|         map: &DashMap<uuid::Uuid, ClientInfo>, | ||||
|         client_info: &ClientInfo, | ||||
|     ) { | ||||
|         map.entry(client_info.machine_id) | ||||
|         map.entry(client_info.storage_token.machine_id) | ||||
|             .and_modify(|e| { | ||||
|                 if e.report_time < client_info.report_time { | ||||
|                     assert_eq!(e.machine_id, client_info.machine_id); | ||||
|                     assert_eq!( | ||||
|                         e.storage_token.machine_id, | ||||
|                         client_info.storage_token.machine_id | ||||
|                     ); | ||||
|                     *e = client_info.clone(); | ||||
|                 } | ||||
|             }) | ||||
| @@ -74,53 +76,51 @@ impl Storage { | ||||
|     pub fn update_client(&self, stoken: StorageToken, report_time: i64) { | ||||
|         let inner = self | ||||
|             .0 | ||||
|             .token_clients_map | ||||
|             .entry(stoken.token.clone()) | ||||
|             .user_clients_map | ||||
|             .entry(stoken.user_id) | ||||
|             .or_insert_with(DashMap::new); | ||||
|  | ||||
|         let client_info = ClientInfo { | ||||
|             client_url: stoken.client_url.clone(), | ||||
|             machine_id: stoken.machine_id, | ||||
|             token: stoken.token.clone(), | ||||
|             storage_token: stoken.clone(), | ||||
|             report_time, | ||||
|         }; | ||||
|  | ||||
|         Self::update_mid_to_client_info_map(&inner, &client_info); | ||||
|         Self::update_mid_to_client_info_map(&self.0.machine_client_url_map, &client_info); | ||||
|     } | ||||
|  | ||||
|     pub fn remove_client(&self, stoken: &StorageToken) { | ||||
|         self.0.token_clients_map.remove_if(&stoken.token, |_, set| { | ||||
|             Self::remove_mid_to_client_info_map(set, &stoken.machine_id, &stoken.client_url); | ||||
|             set.is_empty() | ||||
|         }); | ||||
|  | ||||
|         Self::remove_mid_to_client_info_map( | ||||
|             &self.0.machine_client_url_map, | ||||
|             &stoken.machine_id, | ||||
|             &stoken.client_url, | ||||
|         ); | ||||
|         self.0 | ||||
|             .user_clients_map | ||||
|             .remove_if(&stoken.user_id, |_, set| { | ||||
|                 Self::remove_mid_to_client_info_map(set, &stoken.machine_id, &stoken.client_url); | ||||
|                 set.is_empty() | ||||
|             }); | ||||
|     } | ||||
|  | ||||
|     pub fn weak_ref(&self) -> WeakRefStorage { | ||||
|         Arc::downgrade(&self.0) | ||||
|     } | ||||
|  | ||||
|     pub fn get_client_url_by_machine_id(&self, machine_id: &uuid::Uuid) -> Option<url::Url> { | ||||
|         self.0 | ||||
|             .machine_client_url_map | ||||
|             .get(&machine_id) | ||||
|             .map(|info| info.client_url.clone()) | ||||
|     pub fn get_client_url_by_machine_id( | ||||
|         &self, | ||||
|         user_id: UserIdInDb, | ||||
|         machine_id: &uuid::Uuid, | ||||
|     ) -> Option<url::Url> { | ||||
|         self.0.user_clients_map.get(&user_id).and_then(|info_map| { | ||||
|             info_map | ||||
|                 .get(machine_id) | ||||
|                 .map(|info| info.storage_token.client_url.clone()) | ||||
|         }) | ||||
|     } | ||||
|  | ||||
|     pub fn list_token_clients(&self, token: &str) -> Vec<url::Url> { | ||||
|     pub fn list_user_clients(&self, user_id: UserIdInDb) -> Vec<url::Url> { | ||||
|         self.0 | ||||
|             .token_clients_map | ||||
|             .get(token) | ||||
|             .user_clients_map | ||||
|             .get(&user_id) | ||||
|             .map(|info_map| { | ||||
|                 info_map | ||||
|                     .iter() | ||||
|                     .map(|info| info.value().client_url.clone()) | ||||
|                     .map(|info| info.value().storage_token.client_url.clone()) | ||||
|                     .collect() | ||||
|             }) | ||||
|             .unwrap_or_default() | ||||
|   | ||||
| @@ -12,7 +12,7 @@ use sqlx::{migrate::MigrateDatabase as _, types::chrono, Sqlite, SqlitePool}; | ||||
|  | ||||
| use crate::migrator; | ||||
|  | ||||
| type UserIdInDb = i32; | ||||
| pub type UserIdInDb = i32; | ||||
|  | ||||
| pub enum ListNetworkProps { | ||||
|     All, | ||||
|   | ||||
| @@ -5,14 +5,17 @@ extern crate rust_i18n; | ||||
|  | ||||
| use std::sync::Arc; | ||||
|  | ||||
| use clap::{command, Parser}; | ||||
| use clap::Parser; | ||||
| use easytier::{ | ||||
|     common::{ | ||||
|         config::{ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig, TomlConfigLoader}, | ||||
|         config::{ConsoleLoggerConfig, FileLoggerConfig, LoggingConfigLoader}, | ||||
|         constants::EASYTIER_VERSION, | ||||
|         error::Error, | ||||
|         network::{local_ipv4, local_ipv6}, | ||||
|     }, | ||||
|     tunnel::{ | ||||
|         tcp::TcpTunnelListener, udp::UdpTunnelListener, websocket::WSTunnelListener, TunnelListener, | ||||
|     }, | ||||
|     tunnel::{tcp::TcpTunnelListener, udp::UdpTunnelListener, TunnelListener}, | ||||
|     utils::{init_logger, setup_panic_handler}, | ||||
| }; | ||||
|  | ||||
| @@ -21,10 +24,13 @@ mod db; | ||||
| mod migrator; | ||||
| mod restful; | ||||
|  | ||||
| #[cfg(feature = "embed")] | ||||
| mod web; | ||||
|  | ||||
| rust_i18n::i18n!("locales", fallback = "en"); | ||||
|  | ||||
| #[derive(Parser, Debug)] | ||||
| #[command(name = "easytier-core", author, version = EASYTIER_VERSION , about, long_about = None)] | ||||
| #[command(name = "easytier-web", author, version = EASYTIER_VERSION , about, long_about = None)] | ||||
| struct Cli { | ||||
|     #[arg(short, long, default_value = "et.db", help = t!("cli.db").to_string())] | ||||
|     db: String, | ||||
| @@ -70,20 +76,83 @@ struct Cli { | ||||
|         help = t!("cli.api_server_port").to_string(), | ||||
|     )] | ||||
|     api_server_port: u16, | ||||
|  | ||||
|     #[cfg(feature = "embed")] | ||||
|     #[arg( | ||||
|         long, | ||||
|         short='l', | ||||
|         help = t!("cli.web_server_port").to_string(), | ||||
|     )] | ||||
|     web_server_port: Option<u16>, | ||||
|  | ||||
|     #[cfg(feature = "embed")] | ||||
|     #[arg( | ||||
|         long, | ||||
|         help = t!("cli.no_web").to_string(), | ||||
|         default_value = "false" | ||||
|     )] | ||||
|     no_web: bool, | ||||
|  | ||||
|     #[cfg(feature = "embed")] | ||||
|     #[arg( | ||||
|         long, | ||||
|         help = t!("cli.api_host").to_string() | ||||
|     )] | ||||
|     api_host: Option<url::Url>, | ||||
| } | ||||
|  | ||||
| pub fn get_listener_by_url( | ||||
|     l: &url::Url, | ||||
| ) -> Result<Box<dyn TunnelListener>, Error> { | ||||
| impl LoggingConfigLoader for &Cli { | ||||
|     fn get_console_logger_config(&self) -> ConsoleLoggerConfig { | ||||
|         ConsoleLoggerConfig { | ||||
|             level: self.console_log_level.clone(), | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     fn get_file_logger_config(&self) -> FileLoggerConfig { | ||||
|         FileLoggerConfig { | ||||
|             dir: self.file_log_dir.clone(), | ||||
|             level: self.file_log_level.clone(), | ||||
|             file: None, | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| pub fn get_listener_by_url(l: &url::Url) -> Result<Box<dyn TunnelListener>, Error> { | ||||
|     Ok(match l.scheme() { | ||||
|         "tcp" => Box::new(TcpTunnelListener::new(l.clone())), | ||||
|         "udp" => Box::new(UdpTunnelListener::new(l.clone())), | ||||
|         "ws" => Box::new(WSTunnelListener::new(l.clone())), | ||||
|         _ => { | ||||
|             return Err(Error::InvalidUrl(l.to_string())); | ||||
|         } | ||||
|     }) | ||||
| } | ||||
|  | ||||
| async fn get_dual_stack_listener( | ||||
|     protocol: &str, | ||||
|     port: u16, | ||||
| ) -> Result< | ||||
|     ( | ||||
|         Option<Box<dyn TunnelListener>>, | ||||
|         Option<Box<dyn TunnelListener>>, | ||||
|     ), | ||||
|     Error, | ||||
| > { | ||||
|     let is_protocol_support_dual_stack = | ||||
|         protocol.trim().to_lowercase() == "tcp" || protocol.trim().to_lowercase() == "udp"; | ||||
|     let v6_listener = if is_protocol_support_dual_stack && local_ipv6().await.is_ok() { | ||||
|         get_listener_by_url(&format!("{}://[::0]:{}", protocol, port).parse().unwrap()).ok() | ||||
|     } else { | ||||
|         None | ||||
|     }; | ||||
|     let v4_listener = if let Ok(_) = local_ipv4().await { | ||||
|         get_listener_by_url(&format!("{}://0.0.0.0:{}", protocol, port).parse().unwrap()).ok() | ||||
|     } else { | ||||
|         None | ||||
|     }; | ||||
|     Ok((v6_listener, v4_listener)) | ||||
| } | ||||
|  | ||||
| #[tokio::main] | ||||
| async fn main() { | ||||
|     let locale = sys_locale::get_locale().unwrap_or_else(|| String::from("en-US")); | ||||
| @@ -91,35 +160,71 @@ async fn main() { | ||||
|     setup_panic_handler(); | ||||
|  | ||||
|     let cli = Cli::parse(); | ||||
|     let config = TomlConfigLoader::default(); | ||||
|     config.set_console_logger_config(ConsoleLoggerConfig { | ||||
|         level: cli.console_log_level, | ||||
|     }); | ||||
|     config.set_file_logger_config(FileLoggerConfig { | ||||
|         dir: cli.file_log_dir, | ||||
|         level: cli.file_log_level, | ||||
|         file: None, | ||||
|     }); | ||||
|     init_logger(config, false).unwrap(); | ||||
|     init_logger(&cli, false).unwrap(); | ||||
|  | ||||
|     // let db = db::Db::new(":memory:").await.unwrap(); | ||||
|     let db = db::Db::new(cli.db).await.unwrap(); | ||||
|  | ||||
|     let listener = get_listener_by_url( | ||||
|         &format!("{}://0.0.0.0:{}", cli.config_server_protocol, cli.config_server_port).parse().unwrap(), | ||||
|     ) | ||||
|     .unwrap(); | ||||
|     let mut mgr = client_manager::ClientManager::new(db.clone()); | ||||
|     mgr.serve(listener).await.unwrap(); | ||||
|     let (v6_listener, v4_listener) = | ||||
|         get_dual_stack_listener(&cli.config_server_protocol, cli.config_server_port) | ||||
|             .await | ||||
|             .unwrap(); | ||||
|     if v4_listener.is_none() && v6_listener.is_none() { | ||||
|         panic!("Listen to both IPv4 and IPv6 failed"); | ||||
|     } | ||||
|     if let Some(listener) = v6_listener { | ||||
|         mgr.add_listener(listener).await.unwrap(); | ||||
|     } | ||||
|     if let Some(listener) = v4_listener { | ||||
|         mgr.add_listener(listener).await.unwrap(); | ||||
|     } | ||||
|  | ||||
|     let mgr = Arc::new(mgr); | ||||
|  | ||||
|     let mut restful_server = restful::RestfulServer::new( | ||||
|     #[cfg(feature = "embed")] | ||||
|     let (web_router_restful, web_router_static) = if cli.no_web { | ||||
|         (None, None) | ||||
|     } else { | ||||
|         let web_router = web::build_router(cli.api_host.clone()); | ||||
|         if cli.web_server_port.is_none() || cli.web_server_port == Some(cli.api_server_port) { | ||||
|             (Some(web_router), None) | ||||
|         } else { | ||||
|             (None, Some(web_router)) | ||||
|         } | ||||
|     }; | ||||
|     #[cfg(not(feature = "embed"))] | ||||
|     let web_router_restful = None; | ||||
|  | ||||
|     let _restful_server_tasks = restful::RestfulServer::new( | ||||
|         format!("0.0.0.0:{}", cli.api_server_port).parse().unwrap(), | ||||
|         mgr.clone(), | ||||
|         db, | ||||
|         web_router_restful, | ||||
|     ) | ||||
|     .await | ||||
|     .unwrap() | ||||
|     .start() | ||||
|     .await | ||||
|     .unwrap(); | ||||
|     restful_server.start().await.unwrap(); | ||||
|  | ||||
|     #[cfg(feature = "embed")] | ||||
|     let _web_server_task = if let Some(web_router) = web_router_static { | ||||
|         Some( | ||||
|             web::WebServer::new( | ||||
|                 format!("0.0.0.0:{}", cli.web_server_port.unwrap_or(0)) | ||||
|                     .parse() | ||||
|                     .unwrap(), | ||||
|                 web_router, | ||||
|             ) | ||||
|             .await | ||||
|             .unwrap() | ||||
|             .start() | ||||
|             .await | ||||
|             .unwrap(), | ||||
|         ) | ||||
|     } else { | ||||
|         None | ||||
|     }; | ||||
|  | ||||
|     tokio::signal::ctrl_c().await.unwrap(); | ||||
| } | ||||
|   | ||||
| @@ -9,9 +9,9 @@ use axum::http::StatusCode; | ||||
| use axum::routing::post; | ||||
| use axum::{extract::State, routing::get, Json, Router}; | ||||
| use axum_login::tower_sessions::{ExpiredDeletion, SessionManagerLayer}; | ||||
| use axum_login::{login_required, AuthManagerLayerBuilder, AuthzBackend}; | ||||
| use axum_login::{login_required, AuthManagerLayerBuilder, AuthUser, AuthzBackend}; | ||||
| use axum_messages::MessagesManagerLayer; | ||||
| use easytier::common::config::ConfigLoader; | ||||
| use easytier::common::config::{ConfigLoader, TomlConfigLoader}; | ||||
| use easytier::common::scoped_task::ScopedTask; | ||||
| use easytier::launcher::NetworkConfig; | ||||
| use easytier::proto::rpc_types; | ||||
| @@ -24,20 +24,26 @@ use tower_sessions::Expiry; | ||||
| use tower_sessions_sqlx_store::SqliteStore; | ||||
| use users::{AuthSession, Backend}; | ||||
|  | ||||
| use crate::client_manager::session::Session; | ||||
| use crate::client_manager::storage::StorageToken; | ||||
| use crate::client_manager::ClientManager; | ||||
| use crate::db::Db; | ||||
|  | ||||
| /// Embed assets for web dashboard, build frontend first | ||||
| #[cfg(feature = "embed")] | ||||
| #[derive(rust_embed::RustEmbed, Clone)] | ||||
| #[folder = "frontend/dist/"] | ||||
| struct Assets; | ||||
|  | ||||
| pub struct RestfulServer { | ||||
|     bind_addr: SocketAddr, | ||||
|     client_mgr: Arc<ClientManager>, | ||||
|     db: Db, | ||||
|  | ||||
|     serve_task: Option<ScopedTask<()>>, | ||||
|     delete_task: Option<ScopedTask<tower_sessions::session_store::Result<()>>>, | ||||
|  | ||||
|     // serve_task: Option<ScopedTask<()>>, | ||||
|     // delete_task: Option<ScopedTask<tower_sessions::session_store::Result<()>>>, | ||||
|     network_api: NetworkApi, | ||||
|  | ||||
|     web_router: Option<Router>, | ||||
| } | ||||
|  | ||||
| type AppStateInner = Arc<ClientManager>; | ||||
| @@ -62,6 +68,17 @@ struct GenerateConfigResponse { | ||||
|     toml_config: Option<String>, | ||||
| } | ||||
|  | ||||
| #[derive(Debug, serde::Deserialize, serde::Serialize)] | ||||
| struct ParseConfigRequest { | ||||
|     toml_config: String, | ||||
| } | ||||
|  | ||||
| #[derive(Debug, serde::Deserialize, serde::Serialize)] | ||||
| struct ParseConfigResponse { | ||||
|     error: Option<String>, | ||||
|     config: Option<NetworkConfig>, | ||||
| } | ||||
|  | ||||
| #[derive(Debug, serde::Deserialize, serde::Serialize)] | ||||
| pub struct Error { | ||||
|     message: String, | ||||
| @@ -87,6 +104,7 @@ impl RestfulServer { | ||||
|         bind_addr: SocketAddr, | ||||
|         client_mgr: Arc<ClientManager>, | ||||
|         db: Db, | ||||
|         web_router: Option<Router>, | ||||
|     ) -> anyhow::Result<Self> { | ||||
|         assert!(client_mgr.is_running()); | ||||
|  | ||||
| @@ -96,23 +114,13 @@ impl RestfulServer { | ||||
|             bind_addr, | ||||
|             client_mgr, | ||||
|             db, | ||||
|             serve_task: None, | ||||
|             delete_task: None, | ||||
|             // serve_task: None, | ||||
|             // delete_task: None, | ||||
|             network_api, | ||||
|             web_router, | ||||
|         }) | ||||
|     } | ||||
|  | ||||
|     async fn get_session_by_machine_id( | ||||
|         client_mgr: &ClientManager, | ||||
|         machine_id: &uuid::Uuid, | ||||
|     ) -> Result<Arc<Session>, HttpHandleError> { | ||||
|         let Some(result) = client_mgr.get_session_by_machine_id(machine_id) else { | ||||
|             return Err((StatusCode::NOT_FOUND, other_error("No such session").into())); | ||||
|         }; | ||||
|  | ||||
|         Ok(result) | ||||
|     } | ||||
|  | ||||
|     async fn handle_list_all_sessions( | ||||
|         auth_session: AuthSession, | ||||
|         State(client_mgr): AppState, | ||||
| @@ -135,9 +143,7 @@ impl RestfulServer { | ||||
|             return Err((StatusCode::UNAUTHORIZED, other_error("No such user").into())); | ||||
|         }; | ||||
|  | ||||
|         let machines = client_mgr | ||||
|             .list_machine_by_token(user.tokens[0].clone()) | ||||
|             .await; | ||||
|         let machines = client_mgr.list_machine_by_user_id(user.id().clone()).await; | ||||
|  | ||||
|         Ok(GetSummaryJsonResp { | ||||
|             device_count: machines.len() as u32, | ||||
| @@ -163,7 +169,34 @@ impl RestfulServer { | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     pub async fn start(&mut self) -> Result<(), anyhow::Error> { | ||||
|     async fn handle_parse_config( | ||||
|         Json(req): Json<ParseConfigRequest>, | ||||
|     ) -> Result<Json<ParseConfigResponse>, HttpHandleError> { | ||||
|         let config = TomlConfigLoader::new_from_str(&req.toml_config) | ||||
|             .and_then(|config| NetworkConfig::new_from_config(&config)); | ||||
|         match config { | ||||
|             Ok(c) => Ok(ParseConfigResponse { | ||||
|                 error: None, | ||||
|                 config: Some(c), | ||||
|             } | ||||
|             .into()), | ||||
|             Err(e) => Ok(ParseConfigResponse { | ||||
|                 error: Some(format!("{:?}", e)), | ||||
|                 config: None, | ||||
|             } | ||||
|             .into()), | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     pub async fn start( | ||||
|         mut self, | ||||
|     ) -> Result< | ||||
|         ( | ||||
|             ScopedTask<()>, | ||||
|             ScopedTask<tower_sessions::session_store::Result<()>>, | ||||
|         ), | ||||
|         anyhow::Error, | ||||
|     > { | ||||
|         let listener = TcpListener::bind(self.bind_addr).await?; | ||||
|  | ||||
|         // Session layer. | ||||
| @@ -173,14 +206,13 @@ impl RestfulServer { | ||||
|         let session_store = SqliteStore::new(self.db.inner()); | ||||
|         session_store.migrate().await?; | ||||
|  | ||||
|         self.delete_task.replace( | ||||
|         let delete_task: ScopedTask<tower_sessions::session_store::Result<()>> = | ||||
|             tokio::task::spawn( | ||||
|                 session_store | ||||
|                     .clone() | ||||
|                     .continuously_delete_expired(tokio::time::Duration::from_secs(60)), | ||||
|             ) | ||||
|             .into(), | ||||
|         ); | ||||
|             .into(); | ||||
|  | ||||
|         // Generate a cryptographic key to sign the session cookie. | ||||
|         let key = Key::generate(); | ||||
| @@ -214,16 +246,24 @@ impl RestfulServer { | ||||
|                 "/api/v1/generate-config", | ||||
|                 post(Self::handle_generate_config), | ||||
|             ) | ||||
|             .route("/api/v1/parse-config", post(Self::handle_parse_config)) | ||||
|             .layer(MessagesManagerLayer) | ||||
|             .layer(auth_layer) | ||||
|             .layer(tower_http::cors::CorsLayer::very_permissive()) | ||||
|             .layer(compression_layer); | ||||
|  | ||||
|         let task = tokio::spawn(async move { | ||||
|             axum::serve(listener, app).await.unwrap(); | ||||
|         }); | ||||
|         self.serve_task = Some(task.into()); | ||||
|         #[cfg(feature = "embed")] | ||||
|         let app = if let Some(web_router) = self.web_router.take() { | ||||
|             app.merge(web_router) | ||||
|         } else { | ||||
|             app | ||||
|         }; | ||||
|  | ||||
|         Ok(()) | ||||
|         let serve_task: ScopedTask<()> = tokio::spawn(async move { | ||||
|             axum::serve(listener, app).await.unwrap(); | ||||
|         }) | ||||
|         .into(); | ||||
|  | ||||
|         Ok((serve_task, delete_task)) | ||||
|     } | ||||
| } | ||||
|   | ||||
| @@ -5,7 +5,6 @@ use axum::http::StatusCode; | ||||
| use axum::routing::{delete, post}; | ||||
| use axum::{extract::State, routing::get, Json, Router}; | ||||
| use axum_login::AuthUser; | ||||
| use dashmap::DashSet; | ||||
| use easytier::launcher::NetworkConfig; | ||||
| use easytier::proto::common::Void; | ||||
| use easytier::proto::rpc_types::controller::BaseController; | ||||
| @@ -13,7 +12,7 @@ use easytier::proto::web::*; | ||||
|  | ||||
| use crate::client_manager::session::Session; | ||||
| use crate::client_manager::ClientManager; | ||||
| use crate::db::ListNetworkProps; | ||||
| use crate::db::{ListNetworkProps, UserIdInDb}; | ||||
|  | ||||
| use super::users::AuthSession; | ||||
| use super::{ | ||||
| @@ -81,12 +80,24 @@ impl NetworkApi { | ||||
|         Self {} | ||||
|     } | ||||
|  | ||||
|     fn get_user_id(auth_session: &AuthSession) -> Result<UserIdInDb, (StatusCode, Json<Error>)> { | ||||
|         let Some(user_id) = auth_session.user.as_ref().map(|x| x.id()) else { | ||||
|             return Err(( | ||||
|                 StatusCode::UNAUTHORIZED, | ||||
|                 other_error(format!("No user id found")).into(), | ||||
|             )); | ||||
|         }; | ||||
|         Ok(user_id) | ||||
|     } | ||||
|  | ||||
|     async fn get_session_by_machine_id( | ||||
|         auth_session: &AuthSession, | ||||
|         client_mgr: &ClientManager, | ||||
|         machine_id: &uuid::Uuid, | ||||
|     ) -> Result<Arc<Session>, HttpHandleError> { | ||||
|         let Some(result) = client_mgr.get_session_by_machine_id(machine_id) else { | ||||
|         let user_id = Self::get_user_id(auth_session)?; | ||||
|  | ||||
|         let Some(result) = client_mgr.get_session_by_machine_id(user_id, machine_id) else { | ||||
|             return Err(( | ||||
|                 StatusCode::NOT_FOUND, | ||||
|                 other_error(format!("No such session: {}", machine_id)).into(), | ||||
| @@ -289,23 +300,13 @@ impl NetworkApi { | ||||
|         auth_session: AuthSession, | ||||
|         State(client_mgr): AppState, | ||||
|     ) -> Result<Json<ListMachineJsonResp>, HttpHandleError> { | ||||
|         let tokens = auth_session | ||||
|             .user | ||||
|             .as_ref() | ||||
|             .map(|x| x.tokens.clone()) | ||||
|             .unwrap_or_default(); | ||||
|         let user_id = Self::get_user_id(&auth_session)?; | ||||
|  | ||||
|         let client_urls = DashSet::new(); | ||||
|         for token in tokens { | ||||
|             let urls = client_mgr.list_machine_by_token(token).await; | ||||
|             for url in urls { | ||||
|                 client_urls.insert(url); | ||||
|             } | ||||
|         } | ||||
|         let client_urls = client_mgr.list_machine_by_user_id(user_id).await; | ||||
|  | ||||
|         let mut machines = vec![]; | ||||
|         for item in client_urls.iter() { | ||||
|             let client_url = item.key().clone(); | ||||
|             let client_url = item.clone(); | ||||
|             let session = client_mgr.get_heartbeat_requests(&client_url).await; | ||||
|             machines.push(ListMachineItem { | ||||
|                 client_url: Some(client_url), | ||||
|   | ||||
							
								
								
									
										86
									
								
								easytier-web/src/web/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										86
									
								
								easytier-web/src/web/mod.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,86 @@ | ||||
| use axum::{ | ||||
|     extract::State, | ||||
|     http::header, | ||||
|     response::{IntoResponse, Response}, | ||||
|     routing, Router, | ||||
| }; | ||||
| use axum_embed::ServeEmbed; | ||||
| use easytier::common::scoped_task::ScopedTask; | ||||
| use rust_embed::RustEmbed; | ||||
| use std::net::SocketAddr; | ||||
| use tokio::net::TcpListener; | ||||
|  | ||||
| /// Embed assets for web dashboard, build frontend first | ||||
| #[derive(RustEmbed, Clone)] | ||||
| #[folder = "frontend/dist/"] | ||||
| struct Assets; | ||||
|  | ||||
| #[derive(Debug, serde::Deserialize, serde::Serialize)] | ||||
| struct ApiMetaResponse { | ||||
|     api_host: String, | ||||
| } | ||||
|  | ||||
| async fn handle_api_meta(State(api_host): State<url::Url>) -> impl IntoResponse { | ||||
|     Response::builder() | ||||
|         .header( | ||||
|             header::CONTENT_TYPE, | ||||
|             "application/javascript; charset=utf-8", | ||||
|         ) | ||||
|         .header(header::CACHE_CONTROL, "no-cache, no-store, must-revalidate") | ||||
|         .header(header::PRAGMA, "no-cache") | ||||
|         .header(header::EXPIRES, "0") | ||||
|         .body(format!( | ||||
|             "window.apiMeta = {}", | ||||
|             serde_json::to_string(&ApiMetaResponse { | ||||
|                 api_host: api_host.to_string() | ||||
|             }) | ||||
|             .unwrap(), | ||||
|         )) | ||||
|         .unwrap() | ||||
| } | ||||
|  | ||||
| pub fn build_router(api_host: Option<url::Url>) -> Router { | ||||
|     let service = ServeEmbed::<Assets>::new(); | ||||
|     let router = Router::new(); | ||||
|  | ||||
|     let router = if let Some(api_host) = api_host { | ||||
|         let sub_router = Router::new() | ||||
|             .route("/api_meta.js", routing::get(handle_api_meta)) | ||||
|             .with_state(api_host); | ||||
|         router.merge(sub_router) | ||||
|     } else { | ||||
|         router | ||||
|     }; | ||||
|  | ||||
|     let router = router.fallback_service(service); | ||||
|  | ||||
|     router | ||||
| } | ||||
|  | ||||
| pub struct WebServer { | ||||
|     bind_addr: SocketAddr, | ||||
|     router: Router, | ||||
|     serve_task: Option<ScopedTask<()>>, | ||||
| } | ||||
|  | ||||
| impl WebServer { | ||||
|     pub async fn new(bind_addr: SocketAddr, router: Router) -> anyhow::Result<Self> { | ||||
|         Ok(WebServer { | ||||
|             bind_addr, | ||||
|             router, | ||||
|             serve_task: None, | ||||
|         }) | ||||
|     } | ||||
|  | ||||
|     pub async fn start(self) -> Result<ScopedTask<()>, anyhow::Error> { | ||||
|         let listener = TcpListener::bind(self.bind_addr).await?; | ||||
|         let app = self.router; | ||||
|  | ||||
|         let task = tokio::spawn(async move { | ||||
|             axum::serve(listener, app).await.unwrap(); | ||||
|         }) | ||||
|         .into(); | ||||
|  | ||||
|         Ok(task) | ||||
|     } | ||||
| } | ||||
| @@ -3,7 +3,7 @@ name = "easytier" | ||||
| description = "A full meshed p2p VPN, connecting all your devices in one network with one command." | ||||
| homepage = "https://github.com/EasyTier/EasyTier" | ||||
| repository = "https://github.com/EasyTier/EasyTier" | ||||
| version = "2.2.4" | ||||
| version = "2.3.2" | ||||
| edition = "2021" | ||||
| authors = ["kkrainbow"] | ||||
| keywords = ["vpn", "p2p", "network", "easytier"] | ||||
| @@ -64,7 +64,8 @@ bytes = "1.5.0" | ||||
| pin-project-lite = "0.2.13" | ||||
| tachyonix = "0.3.0" | ||||
|  | ||||
| quinn = { version = "0.11.0", optional = true, features = ["ring"] } | ||||
| quinn = { version = "0.11.8", optional = true, features = ["ring"] } | ||||
|  | ||||
| rustls = { version = "0.23.0", features = [ | ||||
|     "ring", | ||||
| ], default-features = false, optional = true } | ||||
| @@ -129,6 +130,7 @@ clap = { version = "4.5.30", features = [ | ||||
|     "unicode", | ||||
|     "derive", | ||||
|     "wrap_help", | ||||
|     "env", | ||||
| ] } | ||||
|  | ||||
| async-recursion = "1.0.5" | ||||
| @@ -136,7 +138,8 @@ async-recursion = "1.0.5" | ||||
| network-interface = "2.0" | ||||
|  | ||||
| # for ospf route | ||||
| petgraph = "0.7.1" | ||||
| petgraph = "0.8.1" | ||||
| hashbrown = "0.15.3" | ||||
|  | ||||
| # for wireguard | ||||
| boringtun = { package = "boringtun-easytier", version = "0.6.1", optional = true } | ||||
| @@ -152,7 +155,7 @@ humansize = "2.1.3" | ||||
|  | ||||
| base64 = "0.22" | ||||
|  | ||||
| mimalloc-rust = { version = "0.2.1", optional = true } | ||||
| mimalloc = { version = "*", optional = true } | ||||
|  | ||||
| # mips | ||||
| atomic-shim = "0.2.0" | ||||
| @@ -162,8 +165,14 @@ smoltcp = { version = "0.12.0", optional = true, default-features = false, featu | ||||
|     "medium-ip", | ||||
|     "proto-ipv4", | ||||
|     "proto-ipv6", | ||||
|     "proto-ipv4-fragmentation", | ||||
|     "fragmentation-buffer-size-8192", | ||||
|     "assembler-max-segment-count-16", | ||||
|     "reassembly-buffer-size-8192", | ||||
|     "reassembly-buffer-count-16", | ||||
|     "socket-tcp", | ||||
|     "socket-tcp-cubic", | ||||
|     "socket-udp", | ||||
|     # "socket-tcp-cubic", | ||||
|     "async", | ||||
| ] } | ||||
| parking_lot = { version = "0.12.0", optional = true } | ||||
| @@ -176,9 +185,9 @@ sys-locale = "0.3" | ||||
| ringbuf = "0.4.5" | ||||
| async-ringbuf = "0.3.1" | ||||
|  | ||||
| service-manager = {git = "https://github.com/chipsenkbeil/service-manager-rs.git", branch = "main"} | ||||
| service-manager = { git = "https://github.com/chipsenkbeil/service-manager-rs.git", branch = "main" } | ||||
|  | ||||
| async-compression = { version = "0.4.17", default-features = false, features = ["zstd", "tokio"] } | ||||
| zstd = { version = "0.13" } | ||||
|  | ||||
| kcp-sys = { git = "https://github.com/EasyTier/kcp-sys" } | ||||
|  | ||||
| @@ -187,12 +196,29 @@ prost-reflect = { version = "0.14.5", default-features = false, features = [ | ||||
| ] } | ||||
|  | ||||
| # for http connector | ||||
| http_req = { git = "https://github.com/EasyTier/http_req.git", default-features = false, features = ["rust-tls"] } | ||||
| http_req = { git = "https://github.com/EasyTier/http_req.git", default-features = false, features = [ | ||||
|     "rust-tls", | ||||
| ] } | ||||
|  | ||||
| # for dns connector | ||||
| hickory-resolver = "0.24.4" | ||||
| hickory-resolver = "0.25.2" | ||||
| hickory-proto = "0.25.2" | ||||
|  | ||||
| bounded_join_set = "0.3.0" | ||||
| # for magic dns | ||||
| hickory-client = "0.25.2" | ||||
| hickory-server = { version = "0.25.2", features = ["resolver"] } | ||||
| derive_builder = "0.20.2" | ||||
| humantime-serde = "1.1.1" | ||||
| multimap = "0.10.0" | ||||
| version-compare = "0.2.0" | ||||
|  | ||||
| jemallocator = { version = "0.5.4", optional = true } | ||||
| jemalloc-ctl = { version = "0.5.4", optional = true } | ||||
| jemalloc-sys = { version = "0.5.4", features = [ | ||||
|     "stats", | ||||
|     "profiling", | ||||
|     "unprefixed_malloc_on_supported_platforms", | ||||
| ], optional = true } | ||||
|  | ||||
| [target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "freebsd"))'.dependencies] | ||||
| machine-uid = "0.5.3" | ||||
| @@ -202,6 +228,10 @@ netlink-sys = "0.8.7" | ||||
| netlink-packet-route = "0.21.0" | ||||
| netlink-packet-core = { version = "0.7.0" } | ||||
| netlink-packet-utils = "0.5.2" | ||||
| # for magic dns | ||||
| resolv-conf = "0.7.3" | ||||
| dbus = { version = "0.9.7", features = ["vendored"] } | ||||
| which = "7.0.3" | ||||
|  | ||||
| [target.'cfg(windows)'.dependencies] | ||||
| windows = { version = "0.52.0", features = [ | ||||
| @@ -212,7 +242,7 @@ windows = { version = "0.52.0", features = [ | ||||
|     "Win32_System_Ole", | ||||
|     "Win32_Networking_WinSock", | ||||
|     "Win32_System_IO", | ||||
| ]} | ||||
| ] } | ||||
| encoding = "0.2" | ||||
| winreg = "0.52" | ||||
| windows-service = "0.7.0" | ||||
| @@ -222,18 +252,28 @@ tonic-build = "0.12" | ||||
| globwalk = "0.8.1" | ||||
| regex = "1" | ||||
| prost-build = "0.13.2" | ||||
| rpc_build = { package = "easytier-rpc-build", version = "0.1.0", features = ["internal-namespace"] } | ||||
| rpc_build = { package = "easytier-rpc-build", version = "0.1.0", features = [ | ||||
|     "internal-namespace", | ||||
| ] } | ||||
| prost-reflect-build = { version = "0.14.0" } | ||||
|  | ||||
| [target.'cfg(windows)'.build-dependencies] | ||||
| reqwest = { version = "0.12.12", features = ["blocking"] } | ||||
| zip = "0.6.6" | ||||
| zip = "4.0.0" | ||||
|  | ||||
| # enable thunk-rs when compiling for x86_64 or i686 windows | ||||
| [target.x86_64-pc-windows-msvc.build-dependencies] | ||||
| thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] } | ||||
|  | ||||
| [target.i686-pc-windows-msvc.build-dependencies] | ||||
| thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] } | ||||
|  | ||||
|  | ||||
| [dev-dependencies] | ||||
| serial_test = "3.0.0" | ||||
| rstest = "0.18.2" | ||||
| rstest = "0.25.0" | ||||
| futures-util = "0.3.30" | ||||
| maplit = "1.0.2" | ||||
|  | ||||
| [target.'cfg(target_os = "linux")'.dev-dependencies] | ||||
| defguard_wireguard_rs = "0.4.2" | ||||
| @@ -241,9 +281,8 @@ tokio-socks = "0.5.2" | ||||
|  | ||||
|  | ||||
| [features] | ||||
| default = ["wireguard", "mimalloc", "websocket", "smoltcp", "tun", "socks5"] | ||||
| default = ["wireguard", "mimalloc", "websocket", "smoltcp", "tun", "socks5", "quic"] | ||||
| full = [ | ||||
|     "quic", | ||||
|     "websocket", | ||||
|     "wireguard", | ||||
|     "mimalloc", | ||||
| @@ -252,10 +291,9 @@ full = [ | ||||
|     "tun", | ||||
|     "socks5", | ||||
| ] | ||||
| mips = ["aes-gcm", "mimalloc", "wireguard", "tun", "smoltcp", "socks5"] | ||||
| wireguard = ["dep:boringtun", "dep:ring"] | ||||
| quic = ["dep:quinn", "dep:rustls", "dep:rcgen"] | ||||
| mimalloc = ["dep:mimalloc-rust"] | ||||
| mimalloc = ["dep:mimalloc"] | ||||
| aes-gcm = ["dep:aes-gcm"] | ||||
| tun = ["dep:tun"] | ||||
| websocket = [ | ||||
| @@ -267,3 +305,4 @@ websocket = [ | ||||
| ] | ||||
| smoltcp = ["dep:smoltcp", "dep:parking_lot"] | ||||
| socks5 = ["dep:smoltcp"] | ||||
| jemalloc = ["dep:jemallocator", "dep:jemalloc-ctl", "dep:jemalloc-sys"] | ||||
|   | ||||
| @@ -71,6 +71,8 @@ impl WindowsBuild { | ||||
|  | ||||
|         if target.contains("x86_64") { | ||||
|             println!("cargo:rustc-link-search=native=easytier/third_party/"); | ||||
|         } else if target.contains("i686") { | ||||
|             println!("cargo:rustc-link-search=native=easytier/third_party/i686/"); | ||||
|         } else if target.contains("aarch64") { | ||||
|             println!("cargo:rustc-link-search=native=easytier/third_party/arm64/"); | ||||
|         } | ||||
| @@ -125,6 +127,15 @@ fn check_locale() { | ||||
| } | ||||
|  | ||||
| fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|     // enable thunk-rs when target os is windows and arch is x86_64 or i686 | ||||
|     #[cfg(target_os = "windows")] | ||||
|     if !std::env::var("TARGET") | ||||
|         .unwrap_or_default() | ||||
|         .contains("aarch64") | ||||
|     { | ||||
|         thunk::thunk(); | ||||
|     } | ||||
|  | ||||
|     #[cfg(target_os = "windows")] | ||||
|     WindowsBuild::check_for_win(); | ||||
|  | ||||
| @@ -135,6 +146,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> { | ||||
|         "src/proto/tests.proto", | ||||
|         "src/proto/cli.proto", | ||||
|         "src/proto/web.proto", | ||||
|         "src/proto/magic_dns.proto", | ||||
|     ]; | ||||
|  | ||||
|     for proto_file in proto_files.iter().chain(proto_files_reflect.iter()) { | ||||
|   | ||||
| @@ -10,9 +10,14 @@ core_clap: | ||||
|       配置服务器地址。允许格式: | ||||
|       完整URL:--config-server udp://127.0.0.1:22020/admin | ||||
|       仅用户名:--config-server admin,将使用官方的服务器 | ||||
|   machine_id: | ||||
|     en: |+ | ||||
|       the machine id to identify this machine, used for config recovery after disconnection, must be unique and fixed. default is from system. | ||||
|     zh-CN: |+ | ||||
|       Web 配置服务器通过 machine id 来识别机器,用于断线重连后的配置恢复,需要保证唯一且固定不变。默认从系统获得。 | ||||
|   config_file: | ||||
|     en: "path to the config file, NOTE: if this is set, all other options will be ignored" | ||||
|     zh-CN: "配置文件路径,注意:如果设置了这个选项,其他所有选项都将被忽略" | ||||
|     en: "path to the config file, NOTE: the options set by cmdline args will override options in config file" | ||||
|     zh-CN: "配置文件路径,注意:命令行中的配置的选项会覆盖配置文件中的选项" | ||||
|   network_name: | ||||
|     en: "network name to identify this vpn network" | ||||
|     zh-CN: "用于标识此VPN网络的网络名称" | ||||
| @@ -32,11 +37,20 @@ core_clap: | ||||
|     en: "use a public shared node to discover peers" | ||||
|     zh-CN: "使用公共共享节点来发现对等节点" | ||||
|   proxy_networks: | ||||
|     en: "export local networks to other peers in the vpn" | ||||
|     zh-CN: "将本地网络导出到VPN中的其他对等节点" | ||||
|     en: |+ | ||||
|       export local networks to other peers in the vpn,  e.g.: 10.0.0.0/24. | ||||
|       also support mapping proxy network to other cidr, e.g.: 10.0.0.0/24->192.168.0.0/24 | ||||
|       other peers can access 10.0.0.1 with ip 192.168.0.1 | ||||
|     zh-CN: |+ | ||||
|       将本地网络导出到VPN中的其他对等节点,例如:10.0.0.0/24。 | ||||
|       还支持将代理网络映射到其他CIDR,例如:10.0.0.0/24->192.168.0.0/24 | ||||
|       其他对等节点可以通过 IP 192.168.0.1 来访问 10.0.0.1 | ||||
|   rpc_portal: | ||||
|     en: "rpc portal address to listen for management. 0 means random port, 12345 means listen on 12345 of localhost, 0.0.0.0:12345 means listen on 12345 of all interfaces. default is 0 and will try 15888 first" | ||||
|     zh-CN: "用于管理的RPC门户地址。0表示随机端口,12345表示在localhost的12345上监听,0.0.0.0:12345表示在所有接口的12345上监听。默认是0,首先尝试15888" | ||||
|   rpc_portal_whitelist: | ||||
|     en: "rpc portal whitelist, only allow these addresses to access rpc portal, e.g.: 127.0.0.1,127.0.0.0/8,::1/128" | ||||
|     zh-CN: "RPC门户白名单,仅允许这些地址访问RPC门户,例如:127.0.0.1/32,127.0.0.0/8,::1/128" | ||||
|   listeners: | ||||
|     en: |+ | ||||
|         listeners to accept connections, allow format: | ||||
| @@ -149,6 +163,21 @@ core_clap: | ||||
|   disable_kcp_input: | ||||
|     en: "do not allow other nodes to use kcp to proxy tcp streams to this node. when a node with kcp proxy enabled accesses this node, the original tcp connection is preserved." | ||||
|     zh-CN: "不允许其他节点使用 KCP 代理 TCP 流到此节点。开启 KCP 代理的节点访问此节点时,依然使用原始 TCP 连接。" | ||||
|   enable_quic_proxy: | ||||
|     en: "proxy tcp streams with QUIC, improving the latency and throughput on the network with udp packet loss." | ||||
|     zh-CN: "使用 QUIC 代理 TCP 流,提高在 UDP 丢包网络上的延迟和吞吐量。" | ||||
|   disable_quic_input: | ||||
|     en: "do not allow other nodes to use QUIC to proxy tcp streams to this node. when a node with QUIC proxy enabled accesses this node, the original tcp connection is preserved." | ||||
|     zh-CN: "不允许其他节点使用 QUIC 代理 TCP 流到此节点。开启 QUIC 代理的节点访问此节点时,依然使用原始 TCP 连接。" | ||||
|   port_forward: | ||||
|     en: "forward local port to remote port in virtual network. e.g.: udp://0.0.0.0:12345/10.126.126.1:23456, means forward local udp port 12345 to 10.126.126.1:23456 in the virtual network. can specify multiple." | ||||
|     zh-CN: "将本地端口转发到虚拟网络中的远程端口。例如:udp://0.0.0.0:12345/10.126.126.1:23456,表示将本地UDP端口12345转发到虚拟网络中的10.126.126.1:23456。可以指定多个。" | ||||
|   accept_dns: | ||||
|     en: "if true, enable magic dns. with magic dns, you can access other nodes with a domain name, e.g.: <hostname>.et.net. magic dns will modify your system dns settings, enable it carefully." | ||||
|     zh-CN: "如果为true,则启用魔法DNS。使用魔法DNS,您可以使用域名访问其他节点,例如:<hostname>.et.net。魔法DNS将修改您的系统DNS设置,请谨慎启用。" | ||||
|   private_mode: | ||||
|     en: "if true, nodes with different network names or passwords from this network are not allowed to perform handshake or relay through this node." | ||||
|     zh-CN: "如果为true,则不允许使用了与本网络不相同的网络名称和密码的节点通过本节点进行握手或中转" | ||||
|  | ||||
| core_app: | ||||
|   panic_backtrace_save: | ||||
|   | ||||
| @@ -1,5 +1,7 @@ | ||||
| use async_compression::tokio::write::{ZstdDecoder, ZstdEncoder}; | ||||
| use tokio::io::AsyncWriteExt; | ||||
| use anyhow::Context; | ||||
| use dashmap::DashMap; | ||||
| use std::cell::RefCell; | ||||
| use zstd::bulk; | ||||
|  | ||||
| use zerocopy::{AsBytes as _, FromBytes as _}; | ||||
|  | ||||
| @@ -29,17 +31,19 @@ impl DefaultCompressor { | ||||
|         data: &[u8], | ||||
|         compress_algo: CompressorAlgo, | ||||
|     ) -> Result<Vec<u8>, Error> { | ||||
|         let buf = match compress_algo { | ||||
|             CompressorAlgo::ZstdDefault => { | ||||
|                 let mut o = ZstdEncoder::new(Vec::new()); | ||||
|                 o.write_all(data).await?; | ||||
|                 o.shutdown().await?; | ||||
|                 o.into_inner() | ||||
|             } | ||||
|             CompressorAlgo::None => data.to_vec(), | ||||
|         }; | ||||
|  | ||||
|         Ok(buf) | ||||
|         match compress_algo { | ||||
|             CompressorAlgo::ZstdDefault => CTX_MAP.with(|map_cell| { | ||||
|                 let map = map_cell.borrow(); | ||||
|                 let mut ctx_entry = map.entry(compress_algo).or_default(); | ||||
|                 ctx_entry.compress(data).with_context(|| { | ||||
|                     format!( | ||||
|                         "Failed to compress data with algorithm: {:?}", | ||||
|                         compress_algo | ||||
|                     ) | ||||
|                 }) | ||||
|             }), | ||||
|             CompressorAlgo::None => Ok(data.to_vec()), | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     pub async fn decompress_raw( | ||||
| @@ -47,17 +51,30 @@ impl DefaultCompressor { | ||||
|         data: &[u8], | ||||
|         compress_algo: CompressorAlgo, | ||||
|     ) -> Result<Vec<u8>, Error> { | ||||
|         let buf = match compress_algo { | ||||
|             CompressorAlgo::ZstdDefault => { | ||||
|                 let mut o = ZstdDecoder::new(Vec::new()); | ||||
|                 o.write_all(data).await?; | ||||
|                 o.shutdown().await?; | ||||
|                 o.into_inner() | ||||
|             } | ||||
|             CompressorAlgo::None => data.to_vec(), | ||||
|         }; | ||||
|  | ||||
|         Ok(buf) | ||||
|         match compress_algo { | ||||
|             CompressorAlgo::ZstdDefault => DCTX_MAP.with(|map_cell| { | ||||
|                 let map = map_cell.borrow(); | ||||
|                 let mut ctx_entry = map.entry(compress_algo).or_default(); | ||||
|                 for i in 1..=5 { | ||||
|                     let mut len = data.len() * 2usize.pow(i); | ||||
|                     if i == 5 && len < 64 * 1024 { | ||||
|                         len = 64 * 1024; // Ensure a minimum buffer size | ||||
|                     } | ||||
|                     match ctx_entry.decompress(data, len) { | ||||
|                         Ok(buf) => return Ok(buf), | ||||
|                         Err(e) if e.to_string().contains("buffer is too small") => { | ||||
|                             continue; // Try with a larger buffer | ||||
|                         } | ||||
|                         Err(e) => return Err(e.into()), | ||||
|                     } | ||||
|                 } | ||||
|                 Err(anyhow::anyhow!( | ||||
|                     "Failed to decompress data after multiple attempts with algorithm: {:?}", | ||||
|                     compress_algo | ||||
|                 )) | ||||
|             }), | ||||
|             CompressorAlgo::None => Ok(data.to_vec()), | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -146,6 +163,11 @@ impl Compressor for DefaultCompressor { | ||||
|     } | ||||
| } | ||||
|  | ||||
| thread_local! { | ||||
|     static CTX_MAP: RefCell<DashMap<CompressorAlgo, bulk::Compressor<'static>>> = RefCell::new(DashMap::new()); | ||||
|     static DCTX_MAP: RefCell<DashMap<CompressorAlgo, bulk::Decompressor<'static>>> = RefCell::new(DashMap::new()); | ||||
| } | ||||
|  | ||||
| #[cfg(test)] | ||||
| pub mod tests { | ||||
|     use super::*; | ||||
| @@ -158,10 +180,21 @@ pub mod tests { | ||||
|  | ||||
|         let compressor = DefaultCompressor {}; | ||||
|  | ||||
|         println!( | ||||
|             "Uncompressed packet: {:?}, len: {}", | ||||
|             packet, | ||||
|             packet.payload_len() | ||||
|         ); | ||||
|  | ||||
|         compressor | ||||
|             .compress(&mut packet, CompressorAlgo::ZstdDefault) | ||||
|             .await | ||||
|             .unwrap(); | ||||
|         println!( | ||||
|             "Compressed packet: {:?}, len: {}", | ||||
|             packet, | ||||
|             packet.payload_len() | ||||
|         ); | ||||
|         assert_eq!(packet.peer_manager_header().unwrap().is_compressed(), true); | ||||
|  | ||||
|         compressor.decompress(&mut packet).await.unwrap(); | ||||
|   | ||||
| @@ -2,12 +2,17 @@ use std::{ | ||||
|     net::{Ipv4Addr, SocketAddr}, | ||||
|     path::PathBuf, | ||||
|     sync::{Arc, Mutex}, | ||||
|     u64, | ||||
| }; | ||||
|  | ||||
| use anyhow::Context; | ||||
| use cidr::IpCidr; | ||||
| use serde::{Deserialize, Serialize}; | ||||
|  | ||||
| use crate::{proto::common::CompressionAlgoPb, tunnel::generate_digest_from_str}; | ||||
| use crate::{ | ||||
|     proto::common::{CompressionAlgoPb, PortForwardConfigPb, SocketType}, | ||||
|     tunnel::generate_digest_from_str, | ||||
| }; | ||||
|  | ||||
| pub type Flags = crate::proto::common::FlagsInConfig; | ||||
|  | ||||
| @@ -33,6 +38,11 @@ pub fn gen_default_flags() -> Flags { | ||||
|         enable_kcp_proxy: false, | ||||
|         disable_kcp_input: false, | ||||
|         disable_relay_kcp: true, | ||||
|         accept_dns: false, | ||||
|         private_mode: false, | ||||
|         enable_quic_proxy: false, | ||||
|         disable_quic_input: false, | ||||
|         foreign_relay_bps_limit: u64::MAX, | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -56,24 +66,19 @@ pub trait ConfigLoader: Send + Sync { | ||||
|     fn get_dhcp(&self) -> bool; | ||||
|     fn set_dhcp(&self, dhcp: bool); | ||||
|  | ||||
|     fn add_proxy_cidr(&self, cidr: cidr::IpCidr); | ||||
|     fn remove_proxy_cidr(&self, cidr: cidr::IpCidr); | ||||
|     fn get_proxy_cidrs(&self) -> Vec<cidr::IpCidr>; | ||||
|     fn add_proxy_cidr(&self, cidr: cidr::Ipv4Cidr, mapped_cidr: Option<cidr::Ipv4Cidr>); | ||||
|     fn remove_proxy_cidr(&self, cidr: cidr::Ipv4Cidr); | ||||
|     fn get_proxy_cidrs(&self) -> Vec<ProxyNetworkConfig>; | ||||
|  | ||||
|     fn get_network_identity(&self) -> NetworkIdentity; | ||||
|     fn set_network_identity(&self, identity: NetworkIdentity); | ||||
|  | ||||
|     fn get_listener_uris(&self) -> Vec<url::Url>; | ||||
|  | ||||
|     fn get_file_logger_config(&self) -> FileLoggerConfig; | ||||
|     fn set_file_logger_config(&self, config: FileLoggerConfig); | ||||
|     fn get_console_logger_config(&self) -> ConsoleLoggerConfig; | ||||
|     fn set_console_logger_config(&self, config: ConsoleLoggerConfig); | ||||
|  | ||||
|     fn get_peers(&self) -> Vec<PeerConfig>; | ||||
|     fn set_peers(&self, peers: Vec<PeerConfig>); | ||||
|  | ||||
|     fn get_listeners(&self) -> Vec<url::Url>; | ||||
|     fn get_listeners(&self) -> Option<Vec<url::Url>>; | ||||
|     fn set_listeners(&self, listeners: Vec<url::Url>); | ||||
|  | ||||
|     fn get_mapped_listeners(&self) -> Vec<url::Url>; | ||||
| @@ -82,6 +87,9 @@ pub trait ConfigLoader: Send + Sync { | ||||
|     fn get_rpc_portal(&self) -> Option<SocketAddr>; | ||||
|     fn set_rpc_portal(&self, addr: SocketAddr); | ||||
|  | ||||
|     fn get_rpc_portal_whitelist(&self) -> Option<Vec<IpCidr>>; | ||||
|     fn set_rpc_portal_whitelist(&self, whitelist: Option<Vec<IpCidr>>); | ||||
|  | ||||
|     fn get_vpn_portal_config(&self) -> Option<VpnPortalConfig>; | ||||
|     fn set_vpn_portal_config(&self, config: VpnPortalConfig); | ||||
|  | ||||
| @@ -97,9 +105,18 @@ pub trait ConfigLoader: Send + Sync { | ||||
|     fn get_socks5_portal(&self) -> Option<url::Url>; | ||||
|     fn set_socks5_portal(&self, addr: Option<url::Url>); | ||||
|  | ||||
|     fn get_port_forwards(&self) -> Vec<PortForwardConfig>; | ||||
|     fn set_port_forwards(&self, forwards: Vec<PortForwardConfig>); | ||||
|  | ||||
|     fn dump(&self) -> String; | ||||
| } | ||||
|  | ||||
| pub trait LoggingConfigLoader { | ||||
|     fn get_file_logger_config(&self) -> FileLoggerConfig; | ||||
|  | ||||
|     fn get_console_logger_config(&self) -> ConsoleLoggerConfig; | ||||
| } | ||||
|  | ||||
| pub type NetworkSecretDigest = [u8; 32]; | ||||
|  | ||||
| #[derive(Debug, Clone, Deserialize, Serialize, Default, Eq, Hash)] | ||||
| @@ -158,7 +175,8 @@ pub struct PeerConfig { | ||||
|  | ||||
| #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] | ||||
| pub struct ProxyNetworkConfig { | ||||
|     pub cidr: String, | ||||
|     pub cidr: cidr::Ipv4Cidr,                // the CIDR of the proxy network | ||||
|     pub mapped_cidr: Option<cidr::Ipv4Cidr>, // allow remap the proxy CIDR to another CIDR | ||||
|     pub allow: Option<Vec<String>>, | ||||
| } | ||||
|  | ||||
| @@ -174,12 +192,65 @@ pub struct ConsoleLoggerConfig { | ||||
|     pub level: Option<String>, | ||||
| } | ||||
|  | ||||
| #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, derive_builder::Builder)] | ||||
| pub struct LoggingConfig { | ||||
|     #[builder(setter(into, strip_option), default = None)] | ||||
|     file_logger: Option<FileLoggerConfig>, | ||||
|     #[builder(setter(into, strip_option), default = None)] | ||||
|     console_logger: Option<ConsoleLoggerConfig>, | ||||
| } | ||||
|  | ||||
| impl LoggingConfigLoader for &LoggingConfig { | ||||
|     fn get_file_logger_config(&self) -> FileLoggerConfig { | ||||
|         self.file_logger.clone().unwrap_or_default() | ||||
|     } | ||||
|  | ||||
|     fn get_console_logger_config(&self) -> ConsoleLoggerConfig { | ||||
|         self.console_logger.clone().unwrap_or_default() | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] | ||||
| pub struct VpnPortalConfig { | ||||
|     pub client_cidr: cidr::Ipv4Cidr, | ||||
|     pub wireguard_listen: SocketAddr, | ||||
| } | ||||
|  | ||||
| #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] | ||||
| pub struct PortForwardConfig { | ||||
|     pub bind_addr: SocketAddr, | ||||
|     pub dst_addr: SocketAddr, | ||||
|     pub proto: String, | ||||
| } | ||||
|  | ||||
| impl From<PortForwardConfigPb> for PortForwardConfig { | ||||
|     fn from(config: PortForwardConfigPb) -> Self { | ||||
|         PortForwardConfig { | ||||
|             bind_addr: config.bind_addr.unwrap_or_default().into(), | ||||
|             dst_addr: config.dst_addr.unwrap_or_default().into(), | ||||
|             proto: match SocketType::try_from(config.socket_type) { | ||||
|                 Ok(SocketType::Tcp) => "tcp".to_string(), | ||||
|                 Ok(SocketType::Udp) => "udp".to_string(), | ||||
|                 _ => "tcp".to_string(), | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| impl Into<PortForwardConfigPb> for PortForwardConfig { | ||||
|     fn into(self) -> PortForwardConfigPb { | ||||
|         PortForwardConfigPb { | ||||
|             bind_addr: Some(self.bind_addr.into()), | ||||
|             dst_addr: Some(self.dst_addr.into()), | ||||
|             socket_type: match self.proto.to_lowercase().as_str() { | ||||
|                 "tcp" => SocketType::Tcp as i32, | ||||
|                 "udp" => SocketType::Udp as i32, | ||||
|                 _ => SocketType::Tcp as i32, | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] | ||||
| struct Config { | ||||
|     netns: Option<String>, | ||||
| @@ -196,10 +267,8 @@ struct Config { | ||||
|     peer: Option<Vec<PeerConfig>>, | ||||
|     proxy_network: Option<Vec<ProxyNetworkConfig>>, | ||||
|  | ||||
|     file_logger: Option<FileLoggerConfig>, | ||||
|     console_logger: Option<ConsoleLoggerConfig>, | ||||
|  | ||||
|     rpc_portal: Option<SocketAddr>, | ||||
|     rpc_portal_whitelist: Option<Vec<IpCidr>>, | ||||
|  | ||||
|     vpn_portal_config: Option<VpnPortalConfig>, | ||||
|  | ||||
| @@ -207,6 +276,8 @@ struct Config { | ||||
|  | ||||
|     socks5_proxy: Option<url::Url>, | ||||
|  | ||||
|     port_forward: Option<Vec<PortForwardConfig>>, | ||||
|  | ||||
|     flags: Option<serde_json::Map<String, serde_json::Value>>, | ||||
|  | ||||
|     #[serde(skip)] | ||||
| @@ -231,20 +302,23 @@ impl TomlConfigLoader { | ||||
|  | ||||
|         config.flags_struct = Some(Self::gen_flags(config.flags.clone().unwrap_or_default())); | ||||
|  | ||||
|         Ok(TomlConfigLoader { | ||||
|         let config = TomlConfigLoader { | ||||
|             config: Arc::new(Mutex::new(config)), | ||||
|         }) | ||||
|         }; | ||||
|  | ||||
|         let old_ns = config.get_network_identity(); | ||||
|         config.set_network_identity(NetworkIdentity::new( | ||||
|             old_ns.network_name, | ||||
|             old_ns.network_secret.unwrap_or_default(), | ||||
|         )); | ||||
|  | ||||
|         Ok(config) | ||||
|     } | ||||
|  | ||||
|     pub fn new(config_path: &PathBuf) -> Result<Self, anyhow::Error> { | ||||
|         let config_str = std::fs::read_to_string(config_path) | ||||
|             .with_context(|| format!("failed to read config file: {:?}", config_path))?; | ||||
|         let ret = Self::new_from_str(&config_str)?; | ||||
|         let old_ns = ret.get_network_identity(); | ||||
|         ret.set_network_identity(NetworkIdentity::new( | ||||
|             old_ns.network_name, | ||||
|             old_ns.network_secret.unwrap_or_default(), | ||||
|         )); | ||||
|  | ||||
|         Ok(ret) | ||||
|     } | ||||
| @@ -349,50 +423,52 @@ impl ConfigLoader for TomlConfigLoader { | ||||
|         self.config.lock().unwrap().dhcp = Some(dhcp); | ||||
|     } | ||||
|  | ||||
|     fn add_proxy_cidr(&self, cidr: cidr::IpCidr) { | ||||
|     fn add_proxy_cidr(&self, cidr: cidr::Ipv4Cidr, mapped_cidr: Option<cidr::Ipv4Cidr>) { | ||||
|         let mut locked_config = self.config.lock().unwrap(); | ||||
|         if locked_config.proxy_network.is_none() { | ||||
|             locked_config.proxy_network = Some(vec![]); | ||||
|         } | ||||
|         let cidr_str = cidr.to_string(); | ||||
|         if let Some(mapped_cidr) = mapped_cidr.as_ref() { | ||||
|             assert_eq!( | ||||
|                 cidr.network_length(), | ||||
|                 mapped_cidr.network_length(), | ||||
|                 "Mapped CIDR must have the same network length as the original CIDR", | ||||
|             ); | ||||
|         } | ||||
|         // insert if no duplicate | ||||
|         if !locked_config | ||||
|             .proxy_network | ||||
|             .as_ref() | ||||
|             .unwrap() | ||||
|             .iter() | ||||
|             .any(|c| c.cidr == cidr_str) | ||||
|             .any(|c| c.cidr == cidr && c.mapped_cidr == mapped_cidr) | ||||
|         { | ||||
|             locked_config | ||||
|                 .proxy_network | ||||
|                 .as_mut() | ||||
|                 .unwrap() | ||||
|                 .push(ProxyNetworkConfig { | ||||
|                     cidr: cidr_str, | ||||
|                     cidr, | ||||
|                     mapped_cidr, | ||||
|                     allow: None, | ||||
|                 }); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     fn remove_proxy_cidr(&self, cidr: cidr::IpCidr) { | ||||
|     fn remove_proxy_cidr(&self, cidr: cidr::Ipv4Cidr) { | ||||
|         let mut locked_config = self.config.lock().unwrap(); | ||||
|         if let Some(proxy_cidrs) = &mut locked_config.proxy_network { | ||||
|             let cidr_str = cidr.to_string(); | ||||
|             proxy_cidrs.retain(|c| c.cidr != cidr_str); | ||||
|             proxy_cidrs.retain(|c| c.cidr != cidr); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     fn get_proxy_cidrs(&self) -> Vec<cidr::IpCidr> { | ||||
|     fn get_proxy_cidrs(&self) -> Vec<ProxyNetworkConfig> { | ||||
|         self.config | ||||
|             .lock() | ||||
|             .unwrap() | ||||
|             .proxy_network | ||||
|             .as_ref() | ||||
|             .map(|v| { | ||||
|                 v.iter() | ||||
|                     .map(|c| c.cidr.parse().unwrap()) | ||||
|                     .collect::<Vec<cidr::IpCidr>>() | ||||
|             }) | ||||
|             .cloned() | ||||
|             .unwrap_or_default() | ||||
|     } | ||||
|  | ||||
| @@ -433,32 +509,6 @@ impl ConfigLoader for TomlConfigLoader { | ||||
|             .unwrap_or_default() | ||||
|     } | ||||
|  | ||||
|     fn get_file_logger_config(&self) -> FileLoggerConfig { | ||||
|         self.config | ||||
|             .lock() | ||||
|             .unwrap() | ||||
|             .file_logger | ||||
|             .clone() | ||||
|             .unwrap_or_default() | ||||
|     } | ||||
|  | ||||
|     fn set_file_logger_config(&self, config: FileLoggerConfig) { | ||||
|         self.config.lock().unwrap().file_logger = Some(config); | ||||
|     } | ||||
|  | ||||
|     fn get_console_logger_config(&self) -> ConsoleLoggerConfig { | ||||
|         self.config | ||||
|             .lock() | ||||
|             .unwrap() | ||||
|             .console_logger | ||||
|             .clone() | ||||
|             .unwrap_or_default() | ||||
|     } | ||||
|  | ||||
|     fn set_console_logger_config(&self, config: ConsoleLoggerConfig) { | ||||
|         self.config.lock().unwrap().console_logger = Some(config); | ||||
|     } | ||||
|  | ||||
|     fn get_peers(&self) -> Vec<PeerConfig> { | ||||
|         self.config.lock().unwrap().peer.clone().unwrap_or_default() | ||||
|     } | ||||
| @@ -467,13 +517,8 @@ impl ConfigLoader for TomlConfigLoader { | ||||
|         self.config.lock().unwrap().peer = Some(peers); | ||||
|     } | ||||
|  | ||||
|     fn get_listeners(&self) -> Vec<url::Url> { | ||||
|         self.config | ||||
|             .lock() | ||||
|             .unwrap() | ||||
|             .listeners | ||||
|             .clone() | ||||
|             .unwrap_or_default() | ||||
|     fn get_listeners(&self) -> Option<Vec<url::Url>> { | ||||
|         self.config.lock().unwrap().listeners.clone() | ||||
|     } | ||||
|  | ||||
|     fn set_listeners(&self, listeners: Vec<url::Url>) { | ||||
| @@ -501,6 +546,14 @@ impl ConfigLoader for TomlConfigLoader { | ||||
|         self.config.lock().unwrap().rpc_portal = Some(addr); | ||||
|     } | ||||
|  | ||||
|     fn get_rpc_portal_whitelist(&self) -> Option<Vec<IpCidr>> { | ||||
|         self.config.lock().unwrap().rpc_portal_whitelist.clone() | ||||
|     } | ||||
|  | ||||
|     fn set_rpc_portal_whitelist(&self, whitelist: Option<Vec<IpCidr>>) { | ||||
|         self.config.lock().unwrap().rpc_portal_whitelist = whitelist; | ||||
|     } | ||||
|  | ||||
|     fn get_vpn_portal_config(&self) -> Option<VpnPortalConfig> { | ||||
|         self.config.lock().unwrap().vpn_portal_config.clone() | ||||
|     } | ||||
| @@ -534,6 +587,35 @@ impl ConfigLoader for TomlConfigLoader { | ||||
|         self.config.lock().unwrap().exit_nodes = Some(nodes); | ||||
|     } | ||||
|  | ||||
|     fn get_routes(&self) -> Option<Vec<cidr::Ipv4Cidr>> { | ||||
|         self.config.lock().unwrap().routes.clone() | ||||
|     } | ||||
|  | ||||
|     fn set_routes(&self, routes: Option<Vec<cidr::Ipv4Cidr>>) { | ||||
|         self.config.lock().unwrap().routes = routes; | ||||
|     } | ||||
|  | ||||
|     fn get_socks5_portal(&self) -> Option<url::Url> { | ||||
|         self.config.lock().unwrap().socks5_proxy.clone() | ||||
|     } | ||||
|  | ||||
|     fn set_socks5_portal(&self, addr: Option<url::Url>) { | ||||
|         self.config.lock().unwrap().socks5_proxy = addr; | ||||
|     } | ||||
|  | ||||
|     fn get_port_forwards(&self) -> Vec<PortForwardConfig> { | ||||
|         self.config | ||||
|             .lock() | ||||
|             .unwrap() | ||||
|             .port_forward | ||||
|             .clone() | ||||
|             .unwrap_or_default() | ||||
|     } | ||||
|  | ||||
|     fn set_port_forwards(&self, forwards: Vec<PortForwardConfig>) { | ||||
|         self.config.lock().unwrap().port_forward = Some(forwards); | ||||
|     } | ||||
|  | ||||
|     fn dump(&self) -> String { | ||||
|         let default_flags_json = serde_json::to_string(&gen_default_flags()).unwrap(); | ||||
|         let default_flags_hashmap = | ||||
| @@ -558,22 +640,6 @@ impl ConfigLoader for TomlConfigLoader { | ||||
|         config.flags = Some(flag_map); | ||||
|         toml::to_string_pretty(&config).unwrap() | ||||
|     } | ||||
|  | ||||
|     fn get_routes(&self) -> Option<Vec<cidr::Ipv4Cidr>> { | ||||
|         self.config.lock().unwrap().routes.clone() | ||||
|     } | ||||
|  | ||||
|     fn set_routes(&self, routes: Option<Vec<cidr::Ipv4Cidr>>) { | ||||
|         self.config.lock().unwrap().routes = routes; | ||||
|     } | ||||
|  | ||||
|     fn get_socks5_portal(&self) -> Option<url::Url> { | ||||
|         self.config.lock().unwrap().socks5_proxy.clone() | ||||
|     } | ||||
|  | ||||
|     fn set_socks5_portal(&self, addr: Option<url::Url>) { | ||||
|         self.config.lock().unwrap().socks5_proxy = addr; | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[cfg(test)] | ||||
| @@ -614,6 +680,11 @@ dir = "/tmp/easytier" | ||||
|  | ||||
| [console_logger] | ||||
| level = "warn" | ||||
|  | ||||
| [[port_forward]] | ||||
| bind_addr = "0.0.0.0:11011" | ||||
| dst_addr = "192.168.94.33:11011" | ||||
| proto = "tcp" | ||||
| "#; | ||||
|         let ret = TomlConfigLoader::new_from_str(config_str); | ||||
|         if let Err(e) = &ret { | ||||
| @@ -634,6 +705,14 @@ level = "warn" | ||||
|                 .collect::<Vec<String>>() | ||||
|         ); | ||||
|  | ||||
|         assert_eq!( | ||||
|             vec![PortForwardConfig { | ||||
|                 bind_addr: "0.0.0.0:11011".parse().unwrap(), | ||||
|                 dst_addr: "192.168.94.33:11011".parse().unwrap(), | ||||
|                 proto: "tcp".to_string(), | ||||
|             }], | ||||
|             ret.get_port_forwards() | ||||
|         ); | ||||
|         println!("{}", ret.dump()); | ||||
|     } | ||||
| } | ||||
|   | ||||
| @@ -1,21 +1,21 @@ | ||||
| macro_rules! define_global_var { | ||||
|     ($name:ident, $type:ty, $init:expr) => { | ||||
|         pub static $name: once_cell::sync::Lazy<tokio::sync::Mutex<$type>> = | ||||
|             once_cell::sync::Lazy::new(|| tokio::sync::Mutex::new($init)); | ||||
|         pub static $name: once_cell::sync::Lazy<std::sync::Mutex<$type>> = | ||||
|             once_cell::sync::Lazy::new(|| std::sync::Mutex::new($init)); | ||||
|     }; | ||||
| } | ||||
|  | ||||
| #[macro_export] | ||||
| macro_rules! use_global_var { | ||||
|     ($name:ident) => { | ||||
|         crate::common::constants::$name.lock().await.to_owned() | ||||
|         crate::common::constants::$name.lock().unwrap().to_owned() | ||||
|     }; | ||||
| } | ||||
|  | ||||
| #[macro_export] | ||||
| macro_rules! set_global_var { | ||||
|     ($name:ident, $val:expr) => { | ||||
|         *crate::common::constants::$name.lock().await = $val | ||||
|         *crate::common::constants::$name.lock().unwrap() = $val | ||||
|     }; | ||||
| } | ||||
|  | ||||
| @@ -23,6 +23,8 @@ define_global_var!(MANUAL_CONNECTOR_RECONNECT_INTERVAL_MS, u64, 1000); | ||||
|  | ||||
| define_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, u64, 10); | ||||
|  | ||||
| define_global_var!(MACHINE_UID, Option<String>, None); | ||||
|  | ||||
| pub const UDP_HOLE_PUNCH_CONNECTOR_SERVICE_ID: u32 = 2; | ||||
|  | ||||
| pub const WIN_SERVICE_WORK_DIR_REG_KEY: &str = "SOFTWARE\\EasyTier\\Service\\WorkDir"; | ||||
|   | ||||
							
								
								
									
										143
									
								
								easytier/src/common/dns.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										143
									
								
								easytier/src/common/dns.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,143 @@ | ||||
| use std::net::SocketAddr; | ||||
| use std::sync::atomic::AtomicBool; | ||||
| use std::sync::Arc; | ||||
|  | ||||
| use anyhow::Context; | ||||
| use hickory_proto::runtime::TokioRuntimeProvider; | ||||
| use hickory_proto::xfer::Protocol; | ||||
| use hickory_resolver::config::{LookupIpStrategy, NameServerConfig, ResolverConfig, ResolverOpts}; | ||||
| use hickory_resolver::name_server::{GenericConnector, TokioConnectionProvider}; | ||||
| use hickory_resolver::system_conf::read_system_conf; | ||||
| use hickory_resolver::{Resolver, TokioResolver}; | ||||
| use once_cell::sync::Lazy; | ||||
| use tokio::net::lookup_host; | ||||
|  | ||||
| use super::error::Error; | ||||
|  | ||||
| pub fn get_default_resolver_config() -> ResolverConfig { | ||||
|     let mut default_resolve_config = ResolverConfig::new(); | ||||
|     default_resolve_config.add_name_server(NameServerConfig::new( | ||||
|         "223.5.5.5:53".parse().unwrap(), | ||||
|         Protocol::Udp, | ||||
|     )); | ||||
|     default_resolve_config.add_name_server(NameServerConfig::new( | ||||
|         "180.184.1.1:53".parse().unwrap(), | ||||
|         Protocol::Udp, | ||||
|     )); | ||||
|     default_resolve_config | ||||
| } | ||||
|  | ||||
| pub static ALLOW_USE_SYSTEM_DNS_RESOLVER: Lazy<AtomicBool> = Lazy::new(|| AtomicBool::new(true)); | ||||
|  | ||||
| pub static RESOLVER: Lazy<Arc<Resolver<GenericConnector<TokioRuntimeProvider>>>> = | ||||
|     Lazy::new(|| { | ||||
|         let system_cfg = read_system_conf(); | ||||
|         let mut cfg = get_default_resolver_config(); | ||||
|         let mut opt = ResolverOpts::default(); | ||||
|         if let Ok(s) = system_cfg { | ||||
|             for ns in s.0.name_servers() { | ||||
|                 cfg.add_name_server(ns.clone()); | ||||
|             } | ||||
|             opt = s.1; | ||||
|         } | ||||
|         opt.ip_strategy = LookupIpStrategy::Ipv4AndIpv6; | ||||
|         let builder = TokioResolver::builder_with_config(cfg, TokioConnectionProvider::default()) | ||||
|             .with_options(opt); | ||||
|         Arc::new(builder.build()) | ||||
|     }); | ||||
|  | ||||
| pub async fn resolve_txt_record(domain_name: &str) -> Result<String, Error> { | ||||
|     let r = RESOLVER.clone(); | ||||
|     let response = r.txt_lookup(domain_name).await.with_context(|| { | ||||
|         format!( | ||||
|             "txt_lookup failed, domain_name: {}", | ||||
|             domain_name.to_string() | ||||
|         ) | ||||
|     })?; | ||||
|  | ||||
|     let txt_record = response.iter().next().with_context(|| { | ||||
|         format!( | ||||
|             "no txt record found, domain_name: {}", | ||||
|             domain_name.to_string() | ||||
|         ) | ||||
|     })?; | ||||
|  | ||||
|     let txt_data = String::from_utf8_lossy(&txt_record.txt_data()[0]); | ||||
|     tracing::info!(?txt_data, ?domain_name, "get txt record"); | ||||
|  | ||||
|     Ok(txt_data.to_string()) | ||||
| } | ||||
|  | ||||
| pub async fn socket_addrs( | ||||
|     url: &url::Url, | ||||
|     default_port_number: impl Fn() -> Option<u16>, | ||||
| ) -> Result<Vec<SocketAddr>, Error> { | ||||
|     let host = url.host_str().ok_or(Error::InvalidUrl(url.to_string()))?; | ||||
|     let port = url | ||||
|         .port() | ||||
|         .or_else(default_port_number) | ||||
|         .ok_or(Error::InvalidUrl(url.to_string()))?; | ||||
|     // See https://github.com/EasyTier/EasyTier/pull/947 | ||||
|     let port = match port { | ||||
|         0 => match url.scheme() { | ||||
|             "ws" => 80, | ||||
|             "wss" => 443, | ||||
|             _ => port, | ||||
|         }, | ||||
|         _ => port, | ||||
|     }; | ||||
|  | ||||
|     // if host is an ip address, return it directly | ||||
|     if let Ok(ip) = host.parse::<std::net::IpAddr>() { | ||||
|         return Ok(vec![SocketAddr::new(ip, port)]); | ||||
|     } | ||||
|  | ||||
|     if ALLOW_USE_SYSTEM_DNS_RESOLVER.load(std::sync::atomic::Ordering::Relaxed) { | ||||
|         let socket_addr = format!("{}:{}", host, port); | ||||
|         match lookup_host(socket_addr).await { | ||||
|             Ok(a) => { | ||||
|                 let a = a.collect(); | ||||
|                 tracing::debug!(?a, "system dns lookup done"); | ||||
|                 return Ok(a); | ||||
|             } | ||||
|             Err(e) => { | ||||
|                 tracing::error!(?e, "system dns lookup failed"); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // use hickory_resolver | ||||
|     let ret = RESOLVER.lookup_ip(host).await.with_context(|| { | ||||
|         format!( | ||||
|             "hickory dns lookup_ip failed, host: {}, port: {}", | ||||
|             host, port | ||||
|         ) | ||||
|     })?; | ||||
|     Ok(ret | ||||
|         .iter() | ||||
|         .map(|ip| SocketAddr::new(ip, port)) | ||||
|         .collect::<Vec<_>>()) | ||||
| } | ||||
|  | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use crate::defer; | ||||
|  | ||||
|     use super::*; | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn test_socket_addrs() { | ||||
|         let url = url::Url::parse("tcp://public.easytier.cn:80").unwrap(); | ||||
|         let addrs = socket_addrs(&url, || Some(80)).await.unwrap(); | ||||
|         assert_eq!(2, addrs.len(), "addrs: {:?}", addrs); | ||||
|         println!("addrs: {:?}", addrs); | ||||
|  | ||||
|         ALLOW_USE_SYSTEM_DNS_RESOLVER.store(false, std::sync::atomic::Ordering::Relaxed); | ||||
|         defer!( | ||||
|             ALLOW_USE_SYSTEM_DNS_RESOLVER.store(true, std::sync::atomic::Ordering::Relaxed); | ||||
|         ); | ||||
|         let addrs = socket_addrs(&url, || Some(80)).await.unwrap(); | ||||
|         assert_eq!(2, addrs.len(), "addrs: {:?}", addrs); | ||||
|         println!("addrs2: {:?}", addrs); | ||||
|     } | ||||
| } | ||||
| @@ -4,8 +4,10 @@ use std::{ | ||||
|     sync::{Arc, Mutex}, | ||||
| }; | ||||
|  | ||||
| use crate::common::config::ProxyNetworkConfig; | ||||
| use crate::common::token_bucket::TokenBucketManager; | ||||
| use crate::proto::cli::PeerConnInfo; | ||||
| use crate::proto::common::PeerFeatureFlag; | ||||
| use crate::proto::common::{PeerFeatureFlag, PortForwardConfigPb}; | ||||
| use crossbeam::atomic::AtomicCell; | ||||
|  | ||||
| use super::{ | ||||
| @@ -42,6 +44,8 @@ pub enum GlobalCtxEvent { | ||||
|  | ||||
|     DhcpIpv4Changed(Option<cidr::Ipv4Inet>, Option<cidr::Ipv4Inet>), // (old, new) | ||||
|     DhcpIpv4Conflicted(Option<cidr::Ipv4Inet>), | ||||
|  | ||||
|     PortForwardAdded(PortForwardConfigPb), | ||||
| } | ||||
|  | ||||
| pub type EventBus = tokio::sync::broadcast::Sender<GlobalCtxEvent>; | ||||
| @@ -57,13 +61,13 @@ pub struct GlobalCtx { | ||||
|     event_bus: EventBus, | ||||
|  | ||||
|     cached_ipv4: AtomicCell<Option<cidr::Ipv4Inet>>, | ||||
|     cached_proxy_cidrs: AtomicCell<Option<Vec<cidr::IpCidr>>>, | ||||
|     cached_proxy_cidrs: AtomicCell<Option<Vec<ProxyNetworkConfig>>>, | ||||
|  | ||||
|     ip_collector: Arc<IPCollector>, | ||||
|     ip_collector: Mutex<Option<Arc<IPCollector>>>, | ||||
|  | ||||
|     hostname: String, | ||||
|     hostname: Mutex<String>, | ||||
|  | ||||
|     stun_info_collection: Box<dyn StunInfoCollectorTrait>, | ||||
|     stun_info_collection: Mutex<Arc<dyn StunInfoCollectorTrait>>, | ||||
|  | ||||
|     running_listeners: Mutex<Vec<url::Url>>, | ||||
|  | ||||
| @@ -72,6 +76,10 @@ pub struct GlobalCtx { | ||||
|     no_tun: bool, | ||||
|  | ||||
|     feature_flags: AtomicCell<PeerFeatureFlag>, | ||||
|  | ||||
|     quic_proxy_port: AtomicCell<Option<u16>>, | ||||
|  | ||||
|     token_bucket_manager: TokenBucketManager, | ||||
| } | ||||
|  | ||||
| impl std::fmt::Debug for GlobalCtx { | ||||
| @@ -95,7 +103,7 @@ impl GlobalCtx { | ||||
|         let net_ns = NetNS::new(config_fs.get_netns()); | ||||
|         let hostname = config_fs.get_hostname(); | ||||
|  | ||||
|         let (event_bus, _) = tokio::sync::broadcast::channel(1024); | ||||
|         let (event_bus, _) = tokio::sync::broadcast::channel(8); | ||||
|  | ||||
|         let stun_info_collection = Arc::new(StunInfoCollector::new_with_default_servers()); | ||||
|  | ||||
| @@ -118,11 +126,14 @@ impl GlobalCtx { | ||||
|             cached_ipv4: AtomicCell::new(None), | ||||
|             cached_proxy_cidrs: AtomicCell::new(None), | ||||
|  | ||||
|             ip_collector: Arc::new(IPCollector::new(net_ns, stun_info_collection.clone())), | ||||
|             ip_collector: Mutex::new(Some(Arc::new(IPCollector::new( | ||||
|                 net_ns, | ||||
|                 stun_info_collection.clone(), | ||||
|             )))), | ||||
|  | ||||
|             hostname, | ||||
|             hostname: Mutex::new(hostname), | ||||
|  | ||||
|             stun_info_collection: Box::new(stun_info_collection), | ||||
|             stun_info_collection: Mutex::new(stun_info_collection), | ||||
|  | ||||
|             running_listeners: Mutex::new(Vec::new()), | ||||
|  | ||||
| @@ -131,6 +142,9 @@ impl GlobalCtx { | ||||
|             no_tun, | ||||
|  | ||||
|             feature_flags: AtomicCell::new(feature_flags), | ||||
|             quic_proxy_port: AtomicCell::new(None), | ||||
|  | ||||
|             token_bucket_manager: TokenBucketManager::new(), | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -139,10 +153,13 @@ impl GlobalCtx { | ||||
|     } | ||||
|  | ||||
|     pub fn issue_event(&self, event: GlobalCtxEvent) { | ||||
|         if self.event_bus.receiver_count() != 0 { | ||||
|             self.event_bus.send(event).unwrap(); | ||||
|         } else { | ||||
|             tracing::warn!("No subscriber for event: {:?}", event); | ||||
|         if let Err(e) = self.event_bus.send(event.clone()) { | ||||
|             tracing::warn!( | ||||
|                 "Failed to send event: {:?}, error: {:?}, receiver count: {}", | ||||
|                 event, | ||||
|                 e, | ||||
|                 self.event_bus.receiver_count() | ||||
|             ); | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -174,29 +191,6 @@ impl GlobalCtx { | ||||
|         self.cached_ipv4.store(None); | ||||
|     } | ||||
|  | ||||
|     pub fn add_proxy_cidr(&self, cidr: cidr::IpCidr) -> Result<(), std::io::Error> { | ||||
|         self.config.add_proxy_cidr(cidr); | ||||
|         self.cached_proxy_cidrs.store(None); | ||||
|         Ok(()) | ||||
|     } | ||||
|  | ||||
|     pub fn remove_proxy_cidr(&self, cidr: cidr::IpCidr) -> Result<(), std::io::Error> { | ||||
|         self.config.remove_proxy_cidr(cidr); | ||||
|         self.cached_proxy_cidrs.store(None); | ||||
|         Ok(()) | ||||
|     } | ||||
|  | ||||
|     pub fn get_proxy_cidrs(&self) -> Vec<cidr::IpCidr> { | ||||
|         if let Some(proxy_cidrs) = self.cached_proxy_cidrs.take() { | ||||
|             self.cached_proxy_cidrs.store(Some(proxy_cidrs.clone())); | ||||
|             return proxy_cidrs; | ||||
|         } | ||||
|  | ||||
|         let ret = self.config.get_proxy_cidrs(); | ||||
|         self.cached_proxy_cidrs.store(Some(ret.clone())); | ||||
|         ret | ||||
|     } | ||||
|  | ||||
|     pub fn get_id(&self) -> uuid::Uuid { | ||||
|         self.config.get_id() | ||||
|     } | ||||
| @@ -210,26 +204,30 @@ impl GlobalCtx { | ||||
|     } | ||||
|  | ||||
|     pub fn get_ip_collector(&self) -> Arc<IPCollector> { | ||||
|         self.ip_collector.clone() | ||||
|         self.ip_collector.lock().unwrap().as_ref().unwrap().clone() | ||||
|     } | ||||
|  | ||||
|     pub fn get_hostname(&self) -> String { | ||||
|         return self.hostname.clone(); | ||||
|         return self.hostname.lock().unwrap().clone(); | ||||
|     } | ||||
|  | ||||
|     pub fn get_stun_info_collector(&self) -> impl StunInfoCollectorTrait + '_ { | ||||
|         self.stun_info_collection.as_ref() | ||||
|     pub fn set_hostname(&self, hostname: String) { | ||||
|         *self.hostname.lock().unwrap() = hostname; | ||||
|     } | ||||
|  | ||||
|     pub fn get_stun_info_collector(&self) -> Arc<dyn StunInfoCollectorTrait> { | ||||
|         self.stun_info_collection.lock().unwrap().clone() | ||||
|     } | ||||
|  | ||||
|     pub fn replace_stun_info_collector(&self, collector: Box<dyn StunInfoCollectorTrait>) { | ||||
|         // force replace the stun_info_collection without mut and drop the old one | ||||
|         let ptr = &self.stun_info_collection as *const Box<dyn StunInfoCollectorTrait>; | ||||
|         let ptr = ptr as *mut Box<dyn StunInfoCollectorTrait>; | ||||
|         unsafe { | ||||
|             std::ptr::drop_in_place(ptr); | ||||
|             #[allow(invalid_reference_casting)] | ||||
|             std::ptr::write(ptr, collector); | ||||
|         } | ||||
|         let arc_collector: Arc<dyn StunInfoCollectorTrait> = Arc::new(collector); | ||||
|         *self.stun_info_collection.lock().unwrap() = arc_collector.clone(); | ||||
|  | ||||
|         // rebuild the ip collector | ||||
|         *self.ip_collector.lock().unwrap() = Some(Arc::new(IPCollector::new( | ||||
|             self.net_ns.clone(), | ||||
|             arc_collector, | ||||
|         ))); | ||||
|     } | ||||
|  | ||||
|     pub fn get_running_listeners(&self) -> Vec<url::Url> { | ||||
| @@ -291,11 +289,26 @@ impl GlobalCtx { | ||||
|     pub fn set_feature_flags(&self, flags: PeerFeatureFlag) { | ||||
|         self.feature_flags.store(flags); | ||||
|     } | ||||
|  | ||||
|     pub fn get_quic_proxy_port(&self) -> Option<u16> { | ||||
|         self.quic_proxy_port.load() | ||||
|     } | ||||
|  | ||||
|     pub fn set_quic_proxy_port(&self, port: Option<u16>) { | ||||
|         self.quic_proxy_port.store(port); | ||||
|     } | ||||
|  | ||||
|     pub fn token_bucket_manager(&self) -> &TokenBucketManager { | ||||
|         &self.token_bucket_manager | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[cfg(test)] | ||||
| pub mod tests { | ||||
|     use crate::common::{config::TomlConfigLoader, new_peer_id}; | ||||
|     use crate::{ | ||||
|         common::{config::TomlConfigLoader, new_peer_id, stun::MockStunInfoCollector}, | ||||
|         proto::common::NatType, | ||||
|     }; | ||||
|  | ||||
|     use super::*; | ||||
|  | ||||
| @@ -335,7 +348,12 @@ pub mod tests { | ||||
|         let config_fs = TomlConfigLoader::default(); | ||||
|         config_fs.set_inst_name(format!("test_{}", config_fs.get_id())); | ||||
|         config_fs.set_network_identity(network_identy.unwrap_or(NetworkIdentity::default())); | ||||
|         std::sync::Arc::new(GlobalCtx::new(config_fs)) | ||||
|  | ||||
|         let ctx = Arc::new(GlobalCtx::new(config_fs)); | ||||
|         ctx.replace_stun_info_collector(Box::new(MockStunInfoCollector { | ||||
|             udp_nat_type: NatType::Unknown, | ||||
|         })); | ||||
|         ctx | ||||
|     } | ||||
|  | ||||
|     pub fn get_mock_global_ctx() -> ArcGlobalCtx { | ||||
|   | ||||
| @@ -12,13 +12,15 @@ impl IfConfiguerTrait for MacIfConfiger { | ||||
|         name: &str, | ||||
|         address: Ipv4Addr, | ||||
|         cidr_prefix: u8, | ||||
|         cost: Option<i32>, | ||||
|     ) -> Result<(), Error> { | ||||
|         run_shell_cmd( | ||||
|             format!( | ||||
|                 "route -n add {} -netmask {} -interface {} -hopcount 7", | ||||
|                 "route -n add {} -netmask {} -interface {} -hopcount {}", | ||||
|                 address, | ||||
|                 cidr_to_subnet_mask(cidr_prefix), | ||||
|                 name | ||||
|                 name, | ||||
|                 cost.unwrap_or(7) | ||||
|             ) | ||||
|             .as_str(), | ||||
|         ) | ||||
|   | ||||
| @@ -21,6 +21,7 @@ pub trait IfConfiguerTrait: Send + Sync { | ||||
|         _name: &str, | ||||
|         _address: Ipv4Addr, | ||||
|         _cidr_prefix: u8, | ||||
|         _cost: Option<i32>, | ||||
|     ) -> Result<(), Error> { | ||||
|         Ok(()) | ||||
|     } | ||||
| @@ -125,3 +126,6 @@ pub type IfConfiger = windows::WindowsIfConfiger; | ||||
|     target_os = "freebsd", | ||||
| )))] | ||||
| pub type IfConfiger = DummyIfConfiger; | ||||
|  | ||||
| #[cfg(target_os = "windows")] | ||||
| pub use windows::RegistryManager; | ||||
|   | ||||
| @@ -350,6 +350,7 @@ impl IfConfiguerTrait for NetlinkIfConfiger { | ||||
|         name: &str, | ||||
|         address: Ipv4Addr, | ||||
|         cidr_prefix: u8, | ||||
|         cost: Option<i32>, | ||||
|     ) -> Result<(), Error> { | ||||
|         let mut message = RouteMessage::default(); | ||||
|  | ||||
| @@ -359,7 +360,9 @@ impl IfConfiguerTrait for NetlinkIfConfiger { | ||||
|         message.header.kind = RouteType::Unicast; | ||||
|         message.header.address_family = AddressFamily::Inet; | ||||
|         // metric | ||||
|         message.attributes.push(RouteAttribute::Priority(65535)); | ||||
|         message | ||||
|             .attributes | ||||
|             .push(RouteAttribute::Priority(cost.unwrap_or(65535) as u32)); | ||||
|         // output interface | ||||
|         message | ||||
|             .attributes | ||||
| @@ -550,7 +553,7 @@ mod tests { | ||||
|         ifcfg.set_link_status(DUMMY_IFACE_NAME, true).await.unwrap(); | ||||
|  | ||||
|         ifcfg | ||||
|             .add_ipv4_route(DUMMY_IFACE_NAME, "10.5.5.0".parse().unwrap(), 24) | ||||
|             .add_ipv4_route(DUMMY_IFACE_NAME, "10.5.5.0".parse().unwrap(), 24, None) | ||||
|             .await | ||||
|             .unwrap(); | ||||
|  | ||||
|   | ||||
| @@ -1,6 +1,10 @@ | ||||
| use std::net::Ipv4Addr; | ||||
| use std::{io, net::Ipv4Addr}; | ||||
|  | ||||
| use async_trait::async_trait; | ||||
| use winreg::{ | ||||
|     enums::{HKEY_LOCAL_MACHINE, KEY_READ, KEY_WRITE}, | ||||
|     RegKey, | ||||
| }; | ||||
|  | ||||
| use super::{cidr_to_subnet_mask, run_shell_cmd, Error, IfConfiguerTrait}; | ||||
|  | ||||
| @@ -59,16 +63,18 @@ impl IfConfiguerTrait for WindowsIfConfiger { | ||||
|         name: &str, | ||||
|         address: Ipv4Addr, | ||||
|         cidr_prefix: u8, | ||||
|         cost: Option<i32>, | ||||
|     ) -> Result<(), Error> { | ||||
|         let Some(idx) = Self::get_interface_index(name) else { | ||||
|             return Err(Error::NotFound); | ||||
|         }; | ||||
|         run_shell_cmd( | ||||
|             format!( | ||||
|                 "route ADD {} MASK {} 10.1.1.1 IF {} METRIC 9000", | ||||
|                 "route ADD {} MASK {} 10.1.1.1 IF {} METRIC {}", | ||||
|                 address, | ||||
|                 cidr_to_subnet_mask(cidr_prefix), | ||||
|                 idx | ||||
|                 idx, | ||||
|                 cost.unwrap_or(9000) | ||||
|             ) | ||||
|             .as_str(), | ||||
|         ) | ||||
| @@ -164,3 +170,220 @@ impl IfConfiguerTrait for WindowsIfConfiger { | ||||
|         .await | ||||
|     } | ||||
| } | ||||
|  | ||||
| pub struct RegistryManager; | ||||
|  | ||||
| impl RegistryManager { | ||||
|     pub const IPV4_TCPIP_INTERFACE_PREFIX: &str = | ||||
|         r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces\"; | ||||
|     pub const IPV6_TCPIP_INTERFACE_PREFIX: &str = | ||||
|         r"SYSTEM\CurrentControlSet\Services\Tcpip6\Parameters\Interfaces\"; | ||||
|     pub const NETBT_INTERFACE_PREFIX: &str = | ||||
|         r"SYSTEM\CurrentControlSet\Services\NetBT\Parameters\Interfaces\Tcpip_"; | ||||
|  | ||||
|     pub fn reg_delete_obsoleted_items(dev_name: &str) -> io::Result<()> { | ||||
|         use winreg::{enums::HKEY_LOCAL_MACHINE, enums::KEY_ALL_ACCESS, RegKey}; | ||||
|         let hklm = RegKey::predef(HKEY_LOCAL_MACHINE); | ||||
|         let profiles_key = hklm.open_subkey_with_flags( | ||||
|             "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Profiles", | ||||
|             KEY_ALL_ACCESS, | ||||
|         )?; | ||||
|         let unmanaged_key = hklm.open_subkey_with_flags( | ||||
|             "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Signatures\\Unmanaged", | ||||
|             KEY_ALL_ACCESS, | ||||
|         )?; | ||||
|         // collect subkeys to delete | ||||
|         let mut keys_to_delete = Vec::new(); | ||||
|         let mut keys_to_delete_unmanaged = Vec::new(); | ||||
|         for subkey_name in profiles_key.enum_keys().filter_map(Result::ok) { | ||||
|             let subkey = profiles_key.open_subkey(&subkey_name)?; | ||||
|             // check if ProfileName contains "et" | ||||
|             match subkey.get_value::<String, _>("ProfileName") { | ||||
|                 Ok(profile_name) => { | ||||
|                     if profile_name.contains("et_") | ||||
|                         || (!dev_name.is_empty() && dev_name == profile_name) | ||||
|                     { | ||||
|                         keys_to_delete.push(subkey_name); | ||||
|                     } | ||||
|                 } | ||||
|                 Err(e) => { | ||||
|                     tracing::error!( | ||||
|                         "Failed to read ProfileName for subkey {}: {}", | ||||
|                         subkey_name, | ||||
|                         e | ||||
|                     ); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|         for subkey_name in unmanaged_key.enum_keys().filter_map(Result::ok) { | ||||
|             let subkey = unmanaged_key.open_subkey(&subkey_name)?; | ||||
|             // check if ProfileName contains "et" | ||||
|             match subkey.get_value::<String, _>("Description") { | ||||
|                 Ok(profile_name) => { | ||||
|                     if profile_name.contains("et_") | ||||
|                         || (!dev_name.is_empty() && dev_name == profile_name) | ||||
|                     { | ||||
|                         keys_to_delete_unmanaged.push(subkey_name); | ||||
|                     } | ||||
|                 } | ||||
|                 Err(e) => { | ||||
|                     tracing::error!( | ||||
|                         "Failed to read ProfileName for subkey {}: {}", | ||||
|                         subkey_name, | ||||
|                         e | ||||
|                     ); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|         // delete collected subkeys | ||||
|         if !keys_to_delete.is_empty() { | ||||
|             for subkey_name in keys_to_delete { | ||||
|                 match profiles_key.delete_subkey_all(&subkey_name) { | ||||
|                     Ok(_) => tracing::trace!("Successfully deleted subkey: {}", subkey_name), | ||||
|                     Err(e) => tracing::error!("Failed to delete subkey {}: {}", subkey_name, e), | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|         if !keys_to_delete_unmanaged.is_empty() { | ||||
|             for subkey_name in keys_to_delete_unmanaged { | ||||
|                 match unmanaged_key.delete_subkey_all(&subkey_name) { | ||||
|                     Ok(_) => tracing::trace!("Successfully deleted subkey: {}", subkey_name), | ||||
|                     Err(e) => tracing::error!("Failed to delete subkey {}: {}", subkey_name, e), | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|         Ok(()) | ||||
|     } | ||||
|  | ||||
|     pub fn reg_change_catrgory_in_profile(dev_name: &str) -> io::Result<()> { | ||||
|         use winreg::{enums::HKEY_LOCAL_MACHINE, enums::KEY_ALL_ACCESS, RegKey}; | ||||
|         let hklm = RegKey::predef(HKEY_LOCAL_MACHINE); | ||||
|         let profiles_key = hklm.open_subkey_with_flags( | ||||
|             "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Profiles", | ||||
|             KEY_ALL_ACCESS, | ||||
|         )?; | ||||
|  | ||||
|         for subkey_name in profiles_key.enum_keys().filter_map(Result::ok) { | ||||
|             let subkey = profiles_key.open_subkey_with_flags(&subkey_name, KEY_ALL_ACCESS)?; | ||||
|             match subkey.get_value::<String, _>("ProfileName") { | ||||
|                 Ok(profile_name) => { | ||||
|                     if !dev_name.is_empty() && dev_name == profile_name { | ||||
|                         match subkey.set_value("Category", &1u32) { | ||||
|                             Ok(_) => tracing::trace!("Successfully set Category in registry"), | ||||
|                             Err(e) => tracing::error!("Failed to set Category in registry: {}", e), | ||||
|                         } | ||||
|                     } | ||||
|                 } | ||||
|                 Err(e) => { | ||||
|                     tracing::error!( | ||||
|                         "Failed to read ProfileName for subkey {}: {}", | ||||
|                         subkey_name, | ||||
|                         e | ||||
|                     ); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|         Ok(()) | ||||
|     } | ||||
|  | ||||
|     // 根据接口名称查找 GUID | ||||
|     pub fn find_interface_guid(interface_name: &str) -> io::Result<String> { | ||||
|         // 注册表路径:所有网络接口的根目录 | ||||
|         let network_key_path = | ||||
|             r"SYSTEM\CurrentControlSet\Control\Network\{4D36E972-E325-11CE-BFC1-08002BE10318}"; | ||||
|  | ||||
|         let hklm = RegKey::predef(HKEY_LOCAL_MACHINE); | ||||
|         let network_key = hklm.open_subkey_with_flags(network_key_path, KEY_READ)?; | ||||
|  | ||||
|         // 遍历该路径下的所有 GUID 子键 | ||||
|         for guid in network_key.enum_keys().map_while(Result::ok) { | ||||
|             if let Ok(guid_key) = network_key.open_subkey_with_flags(&guid, KEY_READ) { | ||||
|                 // 检查 Connection/Name 是否匹配目标接口名 | ||||
|                 if let Ok(conn_key) = guid_key.open_subkey_with_flags("Connection", KEY_READ) { | ||||
|                     if let Ok(name) = conn_key.get_value::<String, _>("Name") { | ||||
|                         if name == interface_name { | ||||
|                             return Ok(guid); | ||||
|                         } | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         // 如果没有找到对应的接口 | ||||
|         Err(io::Error::new( | ||||
|             io::ErrorKind::NotFound, | ||||
|             "Interface not found", | ||||
|         )) | ||||
|     } | ||||
|  | ||||
|     // 打开注册表键 | ||||
|     pub fn open_interface_key(interface_guid: &str, prefix: &str) -> io::Result<RegKey> { | ||||
|         let path = format!(r"{}{}", prefix, interface_guid); | ||||
|         let hkey_local_machine = RegKey::predef(HKEY_LOCAL_MACHINE); | ||||
|         hkey_local_machine.open_subkey_with_flags(&path, KEY_WRITE) | ||||
|     } | ||||
|  | ||||
|     // 禁用动态 DNS 更新 | ||||
|     // disableDynamicUpdates sets the appropriate registry values to prevent the | ||||
|     // Windows DHCP client from sending dynamic DNS updates for our interface to | ||||
|     // AD domain controllers. | ||||
|     pub fn disable_dynamic_updates(interface_guid: &str) -> io::Result<()> { | ||||
|         let prefixes = [ | ||||
|             Self::IPV4_TCPIP_INTERFACE_PREFIX, | ||||
|             Self::IPV6_TCPIP_INTERFACE_PREFIX, | ||||
|         ]; | ||||
|  | ||||
|         for prefix in &prefixes { | ||||
|             let key = match Self::open_interface_key(interface_guid, prefix) { | ||||
|                 Ok(k) => k, | ||||
|                 Err(e) => { | ||||
|                     // 模拟 mute-key-not-found-if-closing 行为 | ||||
|                     if matches!(e.kind(), io::ErrorKind::NotFound) { | ||||
|                         continue; | ||||
|                     } else { | ||||
|                         return Err(e); | ||||
|                     } | ||||
|                 } | ||||
|             }; | ||||
|  | ||||
|             key.set_value("RegistrationEnabled", &0u32)?; | ||||
|             key.set_value("DisableDynamicUpdate", &1u32)?; | ||||
|             key.set_value("MaxNumberOfAddressesToRegister", &0u32)?; | ||||
|         } | ||||
|  | ||||
|         Ok(()) | ||||
|     } | ||||
|  | ||||
|     // 设置单个 DWORD 值到指定的注册表路径下 | ||||
|     fn set_single_dword( | ||||
|         interface_guid: &str, | ||||
|         prefix: &str, | ||||
|         value_name: &str, | ||||
|         data: u32, | ||||
|     ) -> io::Result<()> { | ||||
|         let key = match Self::open_interface_key(interface_guid, prefix) { | ||||
|             Ok(k) => k, | ||||
|             Err(e) => { | ||||
|                 // 模拟 muteKeyNotFoundIfClosing 行为:忽略 Key Not Found 错误 | ||||
|                 return if matches!(e.kind(), io::ErrorKind::NotFound) { | ||||
|                     Ok(()) | ||||
|                 } else { | ||||
|                     Err(e) | ||||
|                 }; | ||||
|             } | ||||
|         }; | ||||
|  | ||||
|         key.set_value(value_name, &data)?; | ||||
|         Ok(()) | ||||
|     } | ||||
|  | ||||
|     // 禁用 NetBIOS 名称解析请求 | ||||
|     pub fn disable_netbios(interface_guid: &str) -> io::Result<()> { | ||||
|         Self::set_single_dword( | ||||
|             interface_guid, | ||||
|             Self::NETBT_INTERFACE_PREFIX, | ||||
|             "NetbiosOptions", | ||||
|             2, | ||||
|         ) | ||||
|     } | ||||
| } | ||||
|   | ||||
| @@ -4,13 +4,17 @@ use std::{ | ||||
|     io::Write as _, | ||||
|     sync::{Arc, Mutex}, | ||||
| }; | ||||
| use time::util::refresh_tz; | ||||
| use tokio::{task::JoinSet, time::timeout}; | ||||
| use tracing::Instrument; | ||||
|  | ||||
| use crate::{set_global_var, use_global_var}; | ||||
|  | ||||
| pub mod compressor; | ||||
| pub mod config; | ||||
| pub mod constants; | ||||
| pub mod defer; | ||||
| pub mod dns; | ||||
| pub mod error; | ||||
| pub mod global_ctx; | ||||
| pub mod ifcfg; | ||||
| @@ -19,13 +23,12 @@ pub mod network; | ||||
| pub mod scoped_task; | ||||
| pub mod stun; | ||||
| pub mod stun_codec_ext; | ||||
| pub mod token_bucket; | ||||
|  | ||||
| pub fn get_logger_timer<F: time::formatting::Formattable>( | ||||
|     format: F, | ||||
| ) -> tracing_subscriber::fmt::time::OffsetTime<F> { | ||||
|     unsafe { | ||||
|         time::util::local_offset::set_soundness(time::util::local_offset::Soundness::Unsound) | ||||
|     }; | ||||
|     refresh_tz(); | ||||
|     let local_offset = time::UtcOffset::current_local_offset() | ||||
|         .unwrap_or(time::UtcOffset::from_whole_seconds(0).unwrap()); | ||||
|     tracing_subscriber::fmt::time::OffsetTime::new(local_offset, format) | ||||
| @@ -87,7 +90,17 @@ pub fn join_joinset_background<T: Debug + Send + Sync + 'static>( | ||||
|     ); | ||||
| } | ||||
|  | ||||
| pub fn set_default_machine_id(mid: Option<String>) { | ||||
|     set_global_var!(MACHINE_UID, mid); | ||||
| } | ||||
|  | ||||
| pub fn get_machine_id() -> uuid::Uuid { | ||||
|     if let Some(default_mid) = use_global_var!(MACHINE_UID) { | ||||
|         let mut b = [0u8; 16]; | ||||
|         crate::tunnel::generate_digest_from_str("", &default_mid, &mut b); | ||||
|         return uuid::Uuid::from_bytes(b); | ||||
|     } | ||||
|  | ||||
|     // a path same as the binary | ||||
|     let machine_id_file = std::env::current_exe() | ||||
|         .map(|x| x.with_file_name("et_machine_id")) | ||||
| @@ -108,6 +121,9 @@ pub fn get_machine_id() -> uuid::Uuid { | ||||
|     ))] | ||||
|     let gen_mid = machine_uid::get() | ||||
|         .map(|x| { | ||||
|             if x.is_empty() { | ||||
|                 return uuid::Uuid::new_v4(); | ||||
|             } | ||||
|             let mut b = [0u8; 16]; | ||||
|             crate::tunnel::generate_digest_from_str("", x.as_str(), &mut b); | ||||
|             uuid::Uuid::from_bytes(b) | ||||
|   | ||||
| @@ -179,18 +179,16 @@ impl IPCollector { | ||||
|                 Self::do_collect_local_ip_addrs(self.net_ns.clone()).await; | ||||
|             let net_ns = self.net_ns.clone(); | ||||
|             let stun_info_collector = self.stun_info_collector.clone(); | ||||
|             task.spawn(async move { | ||||
|                 loop { | ||||
|                     let ip_addrs = Self::do_collect_local_ip_addrs(net_ns.clone()).await; | ||||
|                     *cached_ip_list.write().await = ip_addrs; | ||||
|                     tokio::time::sleep(std::time::Duration::from_secs(CACHED_IP_LIST_TIMEOUT_SEC)) | ||||
|                         .await; | ||||
|                 } | ||||
|             }); | ||||
|  | ||||
|             let cached_ip_list = self.cached_ip_list.clone(); | ||||
|             task.spawn(async move { | ||||
|                 let mut last_fetch_iface_time = std::time::Instant::now(); | ||||
|                 loop { | ||||
|                     if last_fetch_iface_time.elapsed().as_secs() > CACHED_IP_LIST_TIMEOUT_SEC { | ||||
|                         let ifaces = Self::do_collect_local_ip_addrs(net_ns.clone()).await; | ||||
|                         *cached_ip_list.write().await = ifaces; | ||||
|                         last_fetch_iface_time = std::time::Instant::now(); | ||||
|                     } | ||||
|  | ||||
|                     let stun_info = stun_info_collector.get_stun_info(); | ||||
|                     for ip in stun_info.public_ip.iter() { | ||||
|                         let Ok(ip_addr) = ip.parse::<IpAddr>() else { | ||||
| @@ -199,14 +197,20 @@ impl IPCollector { | ||||
|  | ||||
|                         match ip_addr { | ||||
|                             IpAddr::V4(v) => { | ||||
|                                 cached_ip_list.write().await.public_ipv4 = Some(v.into()) | ||||
|                                 cached_ip_list.write().await.public_ipv4.replace(v.into()); | ||||
|                             } | ||||
|                             IpAddr::V6(v) => { | ||||
|                                 cached_ip_list.write().await.public_ipv6 = Some(v.into()) | ||||
|                                 cached_ip_list.write().await.public_ipv6.replace(v.into()); | ||||
|                             } | ||||
|                         } | ||||
|                     } | ||||
|  | ||||
|                     tracing::debug!( | ||||
|                         "got public ip: {:?}, {:?}", | ||||
|                         cached_ip_list.read().await.public_ipv4, | ||||
|                         cached_ip_list.read().await.public_ipv6 | ||||
|                     ); | ||||
|  | ||||
|                     let sleep_sec = if !cached_ip_list.read().await.public_ipv4.is_none() { | ||||
|                         CACHED_IP_LIST_TIMEOUT_SEC | ||||
|                     } else { | ||||
| @@ -217,10 +221,10 @@ impl IPCollector { | ||||
|             }); | ||||
|         } | ||||
|  | ||||
|         return self.cached_ip_list.read().await.deref().clone(); | ||||
|         self.cached_ip_list.read().await.deref().clone() | ||||
|     } | ||||
|  | ||||
|     pub async fn collect_interfaces(net_ns: NetNS) -> Vec<NetworkInterface> { | ||||
|     pub async fn collect_interfaces(net_ns: NetNS, filter: bool) -> Vec<NetworkInterface> { | ||||
|         let _g = net_ns.guard(); | ||||
|         let ifaces = pnet::datalink::interfaces(); | ||||
|         let mut ret = vec![]; | ||||
| @@ -229,7 +233,7 @@ impl IPCollector { | ||||
|                 iface: iface.clone(), | ||||
|             }; | ||||
|  | ||||
|             if !f.filter_iface().await { | ||||
|             if filter && !f.filter_iface().await { | ||||
|                 continue; | ||||
|             } | ||||
|  | ||||
| @@ -243,21 +247,36 @@ impl IPCollector { | ||||
|     async fn do_collect_local_ip_addrs(net_ns: NetNS) -> GetIpListResponse { | ||||
|         let mut ret = GetIpListResponse::default(); | ||||
|  | ||||
|         let ifaces = Self::collect_interfaces(net_ns.clone()).await; | ||||
|         let ifaces = Self::collect_interfaces(net_ns.clone(), true).await; | ||||
|         let _g = net_ns.guard(); | ||||
|         for iface in ifaces { | ||||
|             for ip in iface.ips { | ||||
|                 let ip: std::net::IpAddr = ip.ip(); | ||||
|                 if ip.is_loopback() || ip.is_multicast() { | ||||
|                     continue; | ||||
|                 } | ||||
|                 match ip { | ||||
|                     std::net::IpAddr::V4(v4) => { | ||||
|                         if ip.is_loopback() || ip.is_multicast() { | ||||
|                             continue; | ||||
|                         } | ||||
|                         ret.interface_ipv4s.push(v4.into()); | ||||
|                     } | ||||
|                     _ => {} | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         let ifaces = Self::collect_interfaces(net_ns.clone(), false).await; | ||||
|         let _g = net_ns.guard(); | ||||
|         for iface in ifaces { | ||||
|             for ip in iface.ips { | ||||
|                 let ip: std::net::IpAddr = ip.ip(); | ||||
|                 match ip { | ||||
|                     std::net::IpAddr::V6(v6) => { | ||||
|                         if v6.is_multicast() || v6.is_loopback() || v6.is_unicast_link_local() { | ||||
|                             continue; | ||||
|                         } | ||||
|                         ret.interface_ipv6s.push(v6.into()); | ||||
|                     } | ||||
|                     _ => {} | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|   | ||||
| @@ -8,8 +8,6 @@ use crate::proto::common::{NatType, StunInfo}; | ||||
| use anyhow::Context; | ||||
| use chrono::Local; | ||||
| use crossbeam::atomic::AtomicCell; | ||||
| use hickory_resolver::config::{NameServerConfig, Protocol, ResolverConfig, ResolverOpts}; | ||||
| use hickory_resolver::TokioAsyncResolver; | ||||
| use rand::seq::IteratorRandom; | ||||
| use tokio::net::{lookup_host, UdpSocket}; | ||||
| use tokio::sync::{broadcast, Mutex}; | ||||
| @@ -22,45 +20,9 @@ use stun_codec::{Message, MessageClass, MessageDecoder, MessageEncoder}; | ||||
|  | ||||
| use crate::common::error::Error; | ||||
|  | ||||
| use super::dns::resolve_txt_record; | ||||
| use super::stun_codec_ext::*; | ||||
|  | ||||
| pub fn get_default_resolver_config() -> ResolverConfig { | ||||
|     let mut default_resolve_config = ResolverConfig::new(); | ||||
|     default_resolve_config.add_name_server(NameServerConfig::new( | ||||
|         "223.5.5.5:53".parse().unwrap(), | ||||
|         Protocol::Udp, | ||||
|     )); | ||||
|     default_resolve_config.add_name_server(NameServerConfig::new( | ||||
|         "180.184.1.1:53".parse().unwrap(), | ||||
|         Protocol::Udp, | ||||
|     )); | ||||
|     default_resolve_config | ||||
| } | ||||
|  | ||||
| pub async fn resolve_txt_record( | ||||
|     domain_name: &str, | ||||
|     resolver: &TokioAsyncResolver, | ||||
| ) -> Result<String, Error> { | ||||
|     let response = resolver.txt_lookup(domain_name).await.with_context(|| { | ||||
|         format!( | ||||
|             "txt_lookup failed, domain_name: {}", | ||||
|             domain_name.to_string() | ||||
|         ) | ||||
|     })?; | ||||
|  | ||||
|     let txt_record = response.iter().next().with_context(|| { | ||||
|         format!( | ||||
|             "no txt record found, domain_name: {}", | ||||
|             domain_name.to_string() | ||||
|         ) | ||||
|     })?; | ||||
|  | ||||
|     let txt_data = String::from_utf8_lossy(&txt_record.txt_data()[0]); | ||||
|     tracing::info!(?txt_data, ?domain_name, "get txt record"); | ||||
|  | ||||
|     Ok(txt_data.to_string()) | ||||
| } | ||||
|  | ||||
| struct HostResolverIter { | ||||
|     hostnames: Vec<String>, | ||||
|     ips: Vec<SocketAddr>, | ||||
| @@ -79,10 +41,7 @@ impl HostResolverIter { | ||||
|     } | ||||
|  | ||||
|     async fn get_txt_record(domain_name: &str) -> Result<Vec<String>, Error> { | ||||
|         let resolver = TokioAsyncResolver::tokio_from_system_conf().unwrap_or( | ||||
|             TokioAsyncResolver::tokio(get_default_resolver_config(), ResolverOpts::default()), | ||||
|         ); | ||||
|         let txt_data = resolve_txt_record(domain_name, &resolver).await?; | ||||
|         let txt_data = resolve_txt_record(domain_name).await?; | ||||
|         Ok(txt_data.split(" ").map(|x| x.to_string()).collect()) | ||||
|     } | ||||
|  | ||||
| @@ -802,7 +761,10 @@ impl StunInfoCollector { | ||||
|     async fn get_public_ipv6(servers: &Vec<String>) -> Option<Ipv6Addr> { | ||||
|         let mut ips = HostResolverIter::new(servers.to_vec(), 10, true); | ||||
|         while let Some(ip) = ips.next().await { | ||||
|             let udp = Arc::new(UdpSocket::bind(format!("[::]:0")).await.unwrap()); | ||||
|             let Ok(udp_socket) = UdpSocket::bind(format!("[::]:0")).await else { | ||||
|                 break; | ||||
|             }; | ||||
|             let udp = Arc::new(udp_socket); | ||||
|             let ret = StunClientBuilder::new(udp.clone()) | ||||
|                 .new_stun_client(ip) | ||||
|                 .bind_request(false, false) | ||||
| @@ -928,7 +890,7 @@ impl StunInfoCollectorTrait for MockStunInfoCollector { | ||||
|             last_update_time: std::time::Instant::now().elapsed().as_secs() as i64, | ||||
|             min_port: 100, | ||||
|             max_port: 200, | ||||
|             public_ip: vec!["127.0.0.1".to_string()], | ||||
|             public_ip: vec!["127.0.0.1".to_string(), "::1".to_string()], | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -993,9 +955,18 @@ mod tests { | ||||
|     async fn test_txt_public_stun_server() { | ||||
|         let stun_servers = vec!["txt:stun.easytier.cn".to_string()]; | ||||
|         let detector = UdpNatTypeDetector::new(stun_servers, 1); | ||||
|         let ret = detector.detect_nat_type(0).await; | ||||
|         println!("{:#?}, {:?}", ret, ret.as_ref().unwrap().nat_type()); | ||||
|         assert!(!ret.unwrap().stun_resps.is_empty()); | ||||
|         for _ in 0..5 { | ||||
|             let ret = detector.detect_nat_type(0).await; | ||||
|             println!("{:#?}, {:?}", ret, ret.as_ref().unwrap().nat_type()); | ||||
|             if ret.is_ok() { | ||||
|                 assert!(!ret.unwrap().stun_resps.is_empty()); | ||||
|                 return; | ||||
|             } | ||||
|         } | ||||
|         debug_assert!( | ||||
|             false, | ||||
|             "should not reach here, stun server should be available" | ||||
|         ); | ||||
|     } | ||||
|  | ||||
|     #[tokio::test] | ||||
|   | ||||
							
								
								
									
										312
									
								
								easytier/src/common/token_bucket.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										312
									
								
								easytier/src/common/token_bucket.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,312 @@ | ||||
| use atomic_shim::AtomicU64; | ||||
| use dashmap::DashMap; | ||||
| use std::sync::atomic::Ordering; | ||||
| use std::sync::{Arc, Mutex}; | ||||
| use std::time::{Duration, Instant}; | ||||
| use tokio::time; | ||||
|  | ||||
| use crate::common::scoped_task::ScopedTask; | ||||
| use crate::proto::common::LimiterConfig; | ||||
|  | ||||
| /// Token Bucket rate limiter using atomic operations | ||||
| pub struct TokenBucket { | ||||
|     available_tokens: AtomicU64, // Current token count (atomic) | ||||
|     last_refill_time: AtomicU64, // Last refill time as micros since epoch | ||||
|     config: BucketConfig,        // Immutable configuration | ||||
|     refill_task: Mutex<Option<ScopedTask<()>>>, // Background refill task | ||||
|     start_time: Instant,         // Bucket creation time | ||||
| } | ||||
|  | ||||
| #[derive(Clone, Copy)] | ||||
| pub struct BucketConfig { | ||||
|     capacity: u64,             // Maximum token capacity | ||||
|     fill_rate: u64,            // Tokens added per second | ||||
|     refill_interval: Duration, // Time between refill operations | ||||
| } | ||||
|  | ||||
| impl From<LimiterConfig> for BucketConfig { | ||||
|     fn from(cfg: LimiterConfig) -> Self { | ||||
|         let burst_rate = 1.max(cfg.burst_rate.unwrap_or(1)); | ||||
|         let fill_rate = 8196.max(cfg.bps.unwrap_or(u64::MAX / burst_rate)); | ||||
|         let refill_interval = cfg | ||||
|             .fill_duration_ms | ||||
|             .map(|x| Duration::from_millis(1.max(x))) | ||||
|             .unwrap_or(Duration::from_millis(10)); | ||||
|         BucketConfig { | ||||
|             capacity: burst_rate * fill_rate, | ||||
|             fill_rate: fill_rate, | ||||
|             refill_interval, | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| impl TokenBucket { | ||||
|     pub fn new(capacity: u64, bps: u64, refill_interval: Duration) -> Arc<Self> { | ||||
|         let config = BucketConfig { | ||||
|             capacity, | ||||
|             fill_rate: bps, | ||||
|             refill_interval, | ||||
|         }; | ||||
|         Self::new_from_cfg(config) | ||||
|     } | ||||
|  | ||||
|     /// Creates a new Token Bucket rate limiter | ||||
|     /// | ||||
|     /// # Arguments | ||||
|     /// * `capacity` - Bucket capacity in bytes | ||||
|     /// * `bps` - Bandwidth limit in bytes per second | ||||
|     /// * `refill_interval` - Refill interval (recommended 10-50ms) | ||||
|     pub fn new_from_cfg(config: BucketConfig) -> Arc<Self> { | ||||
|         // Create Arc instance with placeholder task | ||||
|         let arc_self = Arc::new(Self { | ||||
|             available_tokens: AtomicU64::new(config.capacity), | ||||
|             last_refill_time: AtomicU64::new(0), | ||||
|             config, | ||||
|             refill_task: Mutex::new(None), | ||||
|             start_time: std::time::Instant::now(), | ||||
|         }); | ||||
|  | ||||
|         // Start background refill task | ||||
|         let arc_clone = arc_self.clone(); | ||||
|         let refill_task = tokio::spawn(async move { | ||||
|             let mut interval = time::interval(arc_clone.config.refill_interval); | ||||
|             loop { | ||||
|                 interval.tick().await; | ||||
|                 arc_clone.refill(); | ||||
|             } | ||||
|         }); | ||||
|  | ||||
|         // Replace placeholder task with actual one | ||||
|         arc_self | ||||
|             .refill_task | ||||
|             .lock() | ||||
|             .unwrap() | ||||
|             .replace(refill_task.into()); | ||||
|         arc_self | ||||
|     } | ||||
|  | ||||
|     /// Internal refill method (called only by background task) | ||||
|     fn refill(&self) { | ||||
|         let now_micros = self.elapsed_micros(); | ||||
|         let prev_time = self.last_refill_time.swap(now_micros, Ordering::Acquire); | ||||
|  | ||||
|         // Calculate elapsed time in seconds | ||||
|         let elapsed_secs = (now_micros.saturating_sub(prev_time)) as f64 / 1_000_000.0; | ||||
|  | ||||
|         // Calculate tokens to add | ||||
|         let tokens_to_add = (self.config.fill_rate as f64 * elapsed_secs) as u64; | ||||
|         if tokens_to_add == 0 { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         // Add tokens without exceeding capacity | ||||
|         let mut current = self.available_tokens.load(Ordering::Relaxed); | ||||
|         loop { | ||||
|             let new = current | ||||
|                 .saturating_add(tokens_to_add) | ||||
|                 .min(self.config.capacity); | ||||
|             match self.available_tokens.compare_exchange_weak( | ||||
|                 current, | ||||
|                 new, | ||||
|                 Ordering::Release, | ||||
|                 Ordering::Relaxed, | ||||
|             ) { | ||||
|                 Ok(_) => break, | ||||
|                 Err(actual) => current = actual, | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     /// Calculate microseconds since bucket creation | ||||
|     fn elapsed_micros(&self) -> u64 { | ||||
|         self.start_time.elapsed().as_micros() as u64 | ||||
|     } | ||||
|  | ||||
|     /// Attempt to consume tokens without blocking | ||||
|     /// | ||||
|     /// # Returns | ||||
|     /// `true` if tokens were consumed, `false` if insufficient tokens | ||||
|     pub fn try_consume(&self, tokens: u64) -> bool { | ||||
|         // Fast path for oversized packets | ||||
|         if tokens > self.config.capacity { | ||||
|             return false; | ||||
|         } | ||||
|  | ||||
|         let mut current = self.available_tokens.load(Ordering::Relaxed); | ||||
|         loop { | ||||
|             if current < tokens { | ||||
|                 return false; | ||||
|             } | ||||
|  | ||||
|             let new = current - tokens; | ||||
|             match self.available_tokens.compare_exchange_weak( | ||||
|                 current, | ||||
|                 new, | ||||
|                 Ordering::AcqRel, | ||||
|                 Ordering::Relaxed, | ||||
|             ) { | ||||
|                 Ok(_) => return true, | ||||
|                 Err(actual) => current = actual, | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| pub struct TokenBucketManager { | ||||
|     buckets: Arc<DashMap<String, Arc<TokenBucket>>>, | ||||
|  | ||||
|     retain_task: ScopedTask<()>, | ||||
| } | ||||
|  | ||||
| impl TokenBucketManager { | ||||
|     /// Creates a new TokenBucketManager | ||||
|     pub fn new() -> Self { | ||||
|         let buckets = Arc::new(DashMap::new()); | ||||
|  | ||||
|         let buckets_clone = buckets.clone(); | ||||
|         let retain_task = tokio::spawn(async move { | ||||
|             loop { | ||||
|                 // Retain only buckets that are still in use | ||||
|                 buckets_clone.retain(|_, bucket| Arc::<TokenBucket>::strong_count(bucket) <= 1); | ||||
|                 // Sleep for a while before next retention check | ||||
|                 tokio::time::sleep(Duration::from_secs(60)).await; | ||||
|             } | ||||
|         }); | ||||
|  | ||||
|         Self { | ||||
|             buckets, | ||||
|             retain_task: retain_task.into(), | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     /// Get or create a token bucket for the given key | ||||
|     pub fn get_or_create(&self, key: &str, cfg: BucketConfig) -> Arc<TokenBucket> { | ||||
|         self.buckets | ||||
|             .entry(key.to_string()) | ||||
|             .or_insert_with(|| TokenBucket::new_from_cfg(cfg)) | ||||
|             .clone() | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use tokio::time::{sleep, Duration}; | ||||
|  | ||||
|     /// Test initial state after creation | ||||
|     #[tokio::test] | ||||
|     async fn test_initial_state() { | ||||
|         let bucket = TokenBucket::new(1000, 1000, Duration::from_millis(10)); | ||||
|  | ||||
|         // Should have full capacity initially | ||||
|         assert!(bucket.try_consume(1000)); | ||||
|         assert!(!bucket.try_consume(1)); // Should be empty now | ||||
|     } | ||||
|  | ||||
|     /// Test token consumption behavior | ||||
|     #[tokio::test] | ||||
|     async fn test_consumption() { | ||||
|         let bucket = TokenBucket::new(1500, 1000, Duration::from_millis(10)); | ||||
|  | ||||
|         // First packet should succeed | ||||
|         assert!(bucket.try_consume(1000)); | ||||
|  | ||||
|         // Second packet should fail (only 500 left) | ||||
|         assert!(!bucket.try_consume(600)); | ||||
|  | ||||
|         // Should be able to take remaining tokens | ||||
|         assert!(bucket.try_consume(500)); | ||||
|     } | ||||
|  | ||||
|     /// Test background refill functionality | ||||
|     #[tokio::test] | ||||
|     async fn test_refill() { | ||||
|         let bucket = TokenBucket::new(1000, 1000, Duration::from_millis(10)); | ||||
|  | ||||
|         // Drain the bucket | ||||
|         assert!(bucket.try_consume(1000)); | ||||
|         assert!(!bucket.try_consume(1)); | ||||
|  | ||||
|         // Wait for refill (1 refill interval + buffer) | ||||
|         sleep(Duration::from_millis(25)).await; | ||||
|  | ||||
|         // Should have approximately 20 tokens (1000 tokens/s * 0.02s) | ||||
|         assert!(bucket.try_consume(15)); | ||||
|         assert!(!bucket.try_consume(10)); // But not full capacity | ||||
|     } | ||||
|  | ||||
|     /// Test capacity enforcement | ||||
|     #[tokio::test] | ||||
|     async fn test_capacity_limit() { | ||||
|         let bucket = TokenBucket::new(500, 1000, Duration::from_millis(10)); | ||||
|  | ||||
|         // Wait longer than refill interval | ||||
|         sleep(Duration::from_millis(50)).await; | ||||
|  | ||||
|         // Should not exceed capacity despite time passed | ||||
|         assert!(bucket.try_consume(500)); | ||||
|         assert!(!bucket.try_consume(1)); | ||||
|     } | ||||
|  | ||||
|     /// Test high load with concurrent access | ||||
|     #[tokio::test] | ||||
|     async fn test_concurrent_access() { | ||||
|         let bucket = TokenBucket::new(10_000, 1_000_000, Duration::from_millis(10)); | ||||
|         let mut handles = vec![]; | ||||
|  | ||||
|         // Spawn 100 tasks to consume tokens concurrently | ||||
|         for _ in 0..100 { | ||||
|             let bucket = bucket.clone(); | ||||
|             handles.push(tokio::spawn(async move { | ||||
|                 for _ in 0..100 { | ||||
|                     let _ = bucket.try_consume(10); | ||||
|                 } | ||||
|             })); | ||||
|         } | ||||
|  | ||||
|         // Wait for all tasks to complete | ||||
|         for handle in handles { | ||||
|             handle.await.unwrap(); | ||||
|         } | ||||
|  | ||||
|         // Verify we didn't exceed capacity | ||||
|         let tokens_left = bucket.available_tokens.load(Ordering::Relaxed); | ||||
|         assert!( | ||||
|             tokens_left <= 10_000, | ||||
|             "Tokens exceeded capacity: {}", | ||||
|             tokens_left | ||||
|         ); | ||||
|     } | ||||
|  | ||||
|     /// Test behavior when packet size exceeds capacity | ||||
|     #[tokio::test] | ||||
|     async fn test_oversized_packet() { | ||||
|         let bucket = TokenBucket::new(1500, 1000, Duration::from_millis(10)); | ||||
|  | ||||
|         // Packet larger than capacity should be rejected | ||||
|         assert!(!bucket.try_consume(1600)); | ||||
|  | ||||
|         // Regular packets should still work | ||||
|         assert!(bucket.try_consume(1000)); | ||||
|     } | ||||
|  | ||||
|     /// Test refill precision with small intervals | ||||
|     #[tokio::test] | ||||
|     async fn test_refill_precision() { | ||||
|         let bucket = TokenBucket::new(10_000, 10_000, Duration::from_micros(100)); // 100μs interval | ||||
|  | ||||
|         // Drain most tokens | ||||
|         assert!(bucket.try_consume(9900)); | ||||
|  | ||||
|         // Wait for multiple refills | ||||
|         sleep(Duration::from_millis(1)).await; | ||||
|  | ||||
|         // Should have accumulated about 100 tokens (10,000 tokens/s * 0.001s) | ||||
|         let tokens = bucket.available_tokens.load(Ordering::Relaxed); | ||||
|         assert!( | ||||
|             tokens >= 100 && tokens <= 200, | ||||
|             "Unexpected token count: {}", | ||||
|             tokens | ||||
|         ); | ||||
|     } | ||||
| } | ||||
| @@ -12,29 +12,31 @@ use std::{ | ||||
| }; | ||||
|  | ||||
| use crate::{ | ||||
|     common::{error::Error, global_ctx::ArcGlobalCtx, PeerId}, | ||||
|     common::{error::Error, global_ctx::ArcGlobalCtx, stun::StunInfoCollectorTrait, PeerId}, | ||||
|     peers::{ | ||||
|         peer_manager::PeerManager, peer_rpc::PeerRpcManager, | ||||
|         peer_conn::PeerConnId, | ||||
|         peer_manager::PeerManager, | ||||
|         peer_rpc::PeerRpcManager, | ||||
|         peer_rpc_service::DirectConnectorManagerRpcServer, | ||||
|         peer_task::{PeerTaskLauncher, PeerTaskManager}, | ||||
|     }, | ||||
|     proto::{ | ||||
|         peer_rpc::{ | ||||
|             DirectConnectorRpc, DirectConnectorRpcClientFactory, DirectConnectorRpcServer, | ||||
|             GetIpListRequest, GetIpListResponse, | ||||
|             GetIpListRequest, GetIpListResponse, SendV6HolePunchPacketRequest, | ||||
|         }, | ||||
|         rpc_types::controller::BaseController, | ||||
|     }, | ||||
|     tunnel::IpVersion, | ||||
|     tunnel::{udp::UdpTunnelConnector, IpVersion}, | ||||
| }; | ||||
|  | ||||
| use crate::proto::cli::PeerConnInfo; | ||||
| use anyhow::Context; | ||||
| use rand::Rng; | ||||
| use tokio::{task::JoinSet, time::timeout}; | ||||
| use tracing::Instrument; | ||||
| use tokio::{net::UdpSocket, task::JoinSet, time::timeout}; | ||||
| use url::Host; | ||||
|  | ||||
| use super::create_connector_by_url; | ||||
| use super::{create_connector_by_url, udp_hole_punch}; | ||||
|  | ||||
| pub const DIRECT_CONNECTOR_SERVICE_ID: u32 = 1; | ||||
| pub const DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC: u64 = 300; | ||||
| @@ -77,7 +79,7 @@ impl PeerManagerForDirectConnector for PeerManager { | ||||
| struct DstBlackListItem(PeerId, String); | ||||
|  | ||||
| #[derive(Hash, Eq, PartialEq, Clone)] | ||||
| struct DstListenerUrlBlackListItem(PeerId, url::Url); | ||||
| struct DstListenerUrlBlackListItem(PeerId, String); | ||||
|  | ||||
| struct DirectConnectorManagerData { | ||||
|     global_ctx: ArcGlobalCtx, | ||||
| @@ -93,95 +95,114 @@ impl DirectConnectorManagerData { | ||||
|             dst_listener_blacklist: timedmap::TimedMap::new(), | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| impl std::fmt::Debug for DirectConnectorManagerData { | ||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||
|         f.debug_struct("DirectConnectorManagerData") | ||||
|             .field("peer_manager", &self.peer_manager) | ||||
|             .finish() | ||||
|     } | ||||
| } | ||||
|     async fn remote_send_v6_hole_punch_packet( | ||||
|         &self, | ||||
|         dst_peer_id: PeerId, | ||||
|         local_socket: &UdpSocket, | ||||
|         remote_url: &url::Url, | ||||
|     ) -> Result<(), Error> { | ||||
|         let global_ctx = self.peer_manager.get_global_ctx(); | ||||
|         let listener_port = remote_url.port().ok_or(anyhow::anyhow!( | ||||
|             "failed to parse port from remote url: {}", | ||||
|             remote_url | ||||
|         ))?; | ||||
|         let connector_ip = global_ctx | ||||
|             .get_stun_info_collector() | ||||
|             .get_stun_info() | ||||
|             .public_ip | ||||
|             .iter() | ||||
|             .find(|x| x.contains(":")) | ||||
|             .ok_or(anyhow::anyhow!( | ||||
|                 "failed to get public ipv6 address from stun info" | ||||
|             ))? | ||||
|             .parse::<std::net::Ipv6Addr>() | ||||
|             .with_context(|| { | ||||
|                 format!( | ||||
|                     "failed to parse public ipv6 address from stun info: {:?}", | ||||
|                     global_ctx.get_stun_info_collector().get_stun_info() | ||||
|                 ) | ||||
|             })?; | ||||
|         let connector_addr = SocketAddr::new( | ||||
|             std::net::IpAddr::V6(connector_ip), | ||||
|             local_socket.local_addr()?.port(), | ||||
|         ); | ||||
|  | ||||
| pub struct DirectConnectorManager { | ||||
|     global_ctx: ArcGlobalCtx, | ||||
|     data: Arc<DirectConnectorManagerData>, | ||||
|  | ||||
|     tasks: JoinSet<()>, | ||||
| } | ||||
|  | ||||
| impl DirectConnectorManager { | ||||
|     pub fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Self { | ||||
|         Self { | ||||
|             global_ctx: global_ctx.clone(), | ||||
|             data: Arc::new(DirectConnectorManagerData::new(global_ctx, peer_manager)), | ||||
|             tasks: JoinSet::new(), | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     pub fn run(&mut self) { | ||||
|         if self.global_ctx.get_flags().disable_p2p { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         self.run_as_server(); | ||||
|         self.run_as_client(); | ||||
|     } | ||||
|  | ||||
|     pub fn run_as_server(&mut self) { | ||||
|         self.data | ||||
|         let rpc_stub = self | ||||
|             .peer_manager | ||||
|             .get_peer_rpc_mgr() | ||||
|             .rpc_server() | ||||
|             .registry() | ||||
|             .register( | ||||
|                 DirectConnectorRpcServer::new(DirectConnectorManagerRpcServer::new( | ||||
|                     self.global_ctx.clone(), | ||||
|                 )), | ||||
|                 &self.data.global_ctx.get_network_name(), | ||||
|             ); | ||||
|     } | ||||
|  | ||||
|     pub fn run_as_client(&mut self) { | ||||
|         let data = self.data.clone(); | ||||
|         let my_peer_id = self.data.peer_manager.my_peer_id(); | ||||
|         self.tasks.spawn( | ||||
|             async move { | ||||
|                 loop { | ||||
|                     let peers = data.peer_manager.list_peers().await; | ||||
|                     let mut tasks = JoinSet::new(); | ||||
|                     for peer_id in peers { | ||||
|                         if peer_id == my_peer_id | ||||
|                             || data.peer_manager.has_directly_connected_conn(peer_id) | ||||
|                         { | ||||
|                             continue; | ||||
|                         } | ||||
|                         tasks.spawn(Self::do_try_direct_connect(data.clone(), peer_id)); | ||||
|                     } | ||||
|  | ||||
|                     while let Some(task_ret) = tasks.join_next().await { | ||||
|                         tracing::debug!(?task_ret, ?my_peer_id, "direct connect task ret"); | ||||
|                     } | ||||
|                     tokio::time::sleep(std::time::Duration::from_secs(5)).await; | ||||
|                 } | ||||
|             } | ||||
|             .instrument( | ||||
|                 tracing::info_span!("direct_connector_client", my_id = ?self.global_ctx.id), | ||||
|             ), | ||||
|             .rpc_client() | ||||
|             .scoped_client::<DirectConnectorRpcClientFactory<BaseController>>( | ||||
|             self.peer_manager.my_peer_id(), | ||||
|             dst_peer_id, | ||||
|             global_ctx.get_network_name(), | ||||
|         ); | ||||
|  | ||||
|         rpc_stub | ||||
|             .send_v6_hole_punch_packet( | ||||
|                 BaseController::default(), | ||||
|                 SendV6HolePunchPacketRequest { | ||||
|                     listener_port: listener_port as u32, | ||||
|                     connector_addr: Some(connector_addr.into()), | ||||
|                 }, | ||||
|             ) | ||||
|             .await | ||||
|             .with_context(|| { | ||||
|                 format!( | ||||
|                     "do rpc, send v6 hole punch packet to peer {} at {}", | ||||
|                     dst_peer_id, remote_url | ||||
|                 ) | ||||
|             })?; | ||||
|  | ||||
|         Ok(()) | ||||
|     } | ||||
|  | ||||
|     async fn do_try_connect_to_ip( | ||||
|         data: Arc<DirectConnectorManagerData>, | ||||
|     async fn connect_to_public_ipv6( | ||||
|         &self, | ||||
|         dst_peer_id: PeerId, | ||||
|         addr: String, | ||||
|     ) -> Result<(), Error> { | ||||
|         let connector = create_connector_by_url(&addr, &data.global_ctx, IpVersion::Both).await?; | ||||
|         let (peer_id, conn_id) = timeout( | ||||
|             std::time::Duration::from_secs(3), | ||||
|             data.peer_manager.try_direct_connect(connector), | ||||
|         remote_url: &url::Url, | ||||
|     ) -> Result<(PeerId, PeerConnId), Error> { | ||||
|         let local_socket = Arc::new( | ||||
|             UdpSocket::bind("[::]:0") | ||||
|                 .await | ||||
|                 .with_context(|| format!("failed to bind local socket for {}", remote_url))?, | ||||
|         ); | ||||
|  | ||||
|         // ask remote to send v6 hole punch packet | ||||
|         // and no matter what the result is, continue to connect | ||||
|         let _ = self | ||||
|             .remote_send_v6_hole_punch_packet(dst_peer_id, &local_socket, &remote_url) | ||||
|             .await; | ||||
|  | ||||
|         let udp_connector = UdpTunnelConnector::new(remote_url.clone()); | ||||
|         let remote_addr = super::check_scheme_and_get_socket_addr::<SocketAddr>( | ||||
|             &remote_url, | ||||
|             "udp", | ||||
|             IpVersion::V6, | ||||
|         ) | ||||
|         .await??; | ||||
|         .await?; | ||||
|         let ret = udp_connector | ||||
|             .try_connect_with_socket(local_socket, remote_addr) | ||||
|             .await?; | ||||
|  | ||||
|         // NOTICE: must add as directly connected tunnel | ||||
|         self.peer_manager.add_client_tunnel(ret, true).await | ||||
|     } | ||||
|  | ||||
|     async fn do_try_connect_to_ip(&self, dst_peer_id: PeerId, addr: String) -> Result<(), Error> { | ||||
|         let connector = create_connector_by_url(&addr, &self.global_ctx, IpVersion::Both).await?; | ||||
|         let remote_url = connector.remote_url(); | ||||
|         let (peer_id, conn_id) = | ||||
|             if remote_url.scheme() == "udp" && matches!(remote_url.host(), Some(Host::Ipv6(_))) { | ||||
|                 self.connect_to_public_ipv6(dst_peer_id, &remote_url) | ||||
|                     .await? | ||||
|             } else { | ||||
|                 timeout( | ||||
|                     std::time::Duration::from_secs(3), | ||||
|                     self.peer_manager.try_direct_connect(connector), | ||||
|                 ) | ||||
|                 .await?? | ||||
|             }; | ||||
|  | ||||
|         if peer_id != dst_peer_id && !TESTING.load(Ordering::Relaxed) { | ||||
|             tracing::info!( | ||||
| @@ -190,7 +211,7 @@ impl DirectConnectorManager { | ||||
|                 dst_peer_id, | ||||
|                 peer_id | ||||
|             ); | ||||
|             data.peer_manager | ||||
|             self.peer_manager | ||||
|                 .get_peer_map() | ||||
|                 .close_peer_conn(peer_id, &conn_id) | ||||
|                 .await?; | ||||
| @@ -200,21 +221,44 @@ impl DirectConnectorManager { | ||||
|         Ok(()) | ||||
|     } | ||||
|  | ||||
|     #[tracing::instrument] | ||||
|     #[tracing::instrument(skip(self))] | ||||
|     async fn try_connect_to_ip( | ||||
|         data: Arc<DirectConnectorManagerData>, | ||||
|         self: Arc<DirectConnectorManagerData>, | ||||
|         dst_peer_id: PeerId, | ||||
|         addr: String, | ||||
|     ) -> Result<(), Error> { | ||||
|         let mut rand_gen = rand::rngs::OsRng::default(); | ||||
|         let backoff_ms = vec![1000, 2000]; | ||||
|         let backoff_ms = vec![1000, 2000, 4000]; | ||||
|         let mut backoff_idx = 0; | ||||
|  | ||||
|         tracing::debug!(?dst_peer_id, ?addr, "try_connect_to_ip start"); | ||||
|  | ||||
|         self.dst_listener_blacklist.cleanup(); | ||||
|  | ||||
|         if self | ||||
|             .dst_listener_blacklist | ||||
|             .contains(&DstListenerUrlBlackListItem( | ||||
|                 dst_peer_id.clone(), | ||||
|                 addr.clone(), | ||||
|             )) | ||||
|         { | ||||
|             return Err(Error::UrlInBlacklist); | ||||
|         } | ||||
|  | ||||
|         loop { | ||||
|             let ret = Self::do_try_connect_to_ip(data.clone(), dst_peer_id, addr.clone()).await; | ||||
|             if self.peer_manager.has_directly_connected_conn(dst_peer_id) { | ||||
|                 return Ok(()); | ||||
|             } | ||||
|  | ||||
|             tracing::debug!(?dst_peer_id, ?addr, "try_connect_to_ip start one round"); | ||||
|             let ret = self.do_try_connect_to_ip(dst_peer_id, addr.clone()).await; | ||||
|             tracing::debug!(?ret, ?dst_peer_id, ?addr, "try_connect_to_ip return"); | ||||
|             if matches!(ret, Err(Error::UrlInBlacklist) | Ok(_)) { | ||||
|                 return ret; | ||||
|             if ret.is_ok() { | ||||
|                 return Ok(()); | ||||
|             } | ||||
|  | ||||
|             if self.peer_manager.has_directly_connected_conn(dst_peer_id) { | ||||
|                 return Ok(()); | ||||
|             } | ||||
|  | ||||
|             if backoff_idx < backoff_ms.len() { | ||||
| @@ -230,49 +274,29 @@ impl DirectConnectorManager { | ||||
|                 backoff_idx += 1; | ||||
|                 continue; | ||||
|             } else { | ||||
|                 self.dst_listener_blacklist.insert( | ||||
|                     DstListenerUrlBlackListItem(dst_peer_id.clone(), addr), | ||||
|                     (), | ||||
|                     std::time::Duration::from_secs(DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC), | ||||
|                 ); | ||||
|                 return ret; | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     #[tracing::instrument] | ||||
|     async fn do_try_direct_connect_internal( | ||||
|         data: Arc<DirectConnectorManagerData>, | ||||
|     fn spawn_direct_connect_task( | ||||
|         self: &Arc<DirectConnectorManagerData>, | ||||
|         dst_peer_id: PeerId, | ||||
|         ip_list: GetIpListResponse, | ||||
|     ) -> Result<(), Error> { | ||||
|         data.dst_listener_blacklist.cleanup(); | ||||
|  | ||||
|         let enable_ipv6 = data.global_ctx.get_flags().enable_ipv6; | ||||
|         let available_listeners = ip_list | ||||
|             .listeners | ||||
|             .into_iter() | ||||
|             .map(Into::<url::Url>::into) | ||||
|             .filter_map(|l| if l.scheme() != "ring" { Some(l) } else { None }) | ||||
|             .filter(|l| l.port().is_some() && l.host().is_some()) | ||||
|             .filter(|l| { | ||||
|                 !data | ||||
|                     .dst_listener_blacklist | ||||
|                     .contains(&DstListenerUrlBlackListItem(dst_peer_id.clone(), l.clone())) | ||||
|             }) | ||||
|             .filter(|l| enable_ipv6 || !matches!(l.host().unwrap().to_owned(), Host::Ipv6(_))) | ||||
|             .collect::<Vec<_>>(); | ||||
|  | ||||
|         tracing::debug!(?available_listeners, "got available listeners"); | ||||
|  | ||||
|         if available_listeners.is_empty() { | ||||
|             return Err(anyhow::anyhow!("peer {} have no valid listener", dst_peer_id).into()); | ||||
|         } | ||||
|  | ||||
|         // if have default listener, use it first | ||||
|         let listener = available_listeners | ||||
|             .iter() | ||||
|             .find(|l| l.scheme() == data.global_ctx.get_flags().default_protocol) | ||||
|             .unwrap_or(available_listeners.get(0).unwrap()); | ||||
|  | ||||
|         let mut tasks = bounded_join_set::JoinSet::new(2); | ||||
|  | ||||
|         let listener_host = listener.socket_addrs(|| None)?.pop(); | ||||
|         ip_list: &GetIpListResponse, | ||||
|         listener: &url::Url, | ||||
|         tasks: &mut JoinSet<Result<(), Error>>, | ||||
|     ) { | ||||
|         let Ok(mut addrs) = listener.socket_addrs(|| None) else { | ||||
|             tracing::error!(?listener, "failed to parse socket address from listener"); | ||||
|             return; | ||||
|         }; | ||||
|         let listener_host = addrs.pop(); | ||||
|         tracing::info!(?listener_host, ?listener, "try direct connect to peer"); | ||||
|         match listener_host { | ||||
|             Some(SocketAddr::V4(s_addr)) => { | ||||
|                 if s_addr.ip().is_unspecified() { | ||||
| @@ -284,7 +308,7 @@ impl DirectConnectorManager { | ||||
|                             let mut addr = (*listener).clone(); | ||||
|                             if addr.set_host(Some(ip.to_string().as_str())).is_ok() { | ||||
|                                 tasks.spawn(Self::try_connect_to_ip( | ||||
|                                     data.clone(), | ||||
|                                     self.clone(), | ||||
|                                     dst_peer_id.clone(), | ||||
|                                     addr.to_string(), | ||||
|                                 )); | ||||
| @@ -299,7 +323,7 @@ impl DirectConnectorManager { | ||||
|                         }); | ||||
|                 } else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) { | ||||
|                     tasks.spawn(Self::try_connect_to_ip( | ||||
|                         data.clone(), | ||||
|                         self.clone(), | ||||
|                         dst_peer_id.clone(), | ||||
|                         listener.to_string(), | ||||
|                     )); | ||||
| @@ -330,7 +354,7 @@ impl DirectConnectorManager { | ||||
|                                 .is_ok() | ||||
|                             { | ||||
|                                 tasks.spawn(Self::try_connect_to_ip( | ||||
|                                     data.clone(), | ||||
|                                     self.clone(), | ||||
|                                     dst_peer_id.clone(), | ||||
|                                     addr.to_string(), | ||||
|                                 )); | ||||
| @@ -345,7 +369,7 @@ impl DirectConnectorManager { | ||||
|                         }); | ||||
|                 } else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) { | ||||
|                     tasks.spawn(Self::try_connect_to_ip( | ||||
|                         data.clone(), | ||||
|                         self.clone(), | ||||
|                         dst_peer_id.clone(), | ||||
|                         listener.to_string(), | ||||
|                     )); | ||||
| @@ -355,64 +379,230 @@ impl DirectConnectorManager { | ||||
|                 tracing::error!(?p, ?listener, "failed to parse ip version from listener"); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|         let mut has_succ = false; | ||||
|         while let Some(ret) = tasks.join_next().await { | ||||
|             match ret { | ||||
|                 Ok(Ok(_)) => { | ||||
|                     has_succ = true; | ||||
|                     tracing::info!( | ||||
|                         ?dst_peer_id, | ||||
|                         ?listener, | ||||
|                         "try direct connect to peer success" | ||||
|                     ); | ||||
|                     break; | ||||
|                 } | ||||
|                 Ok(Err(e)) => { | ||||
|                     tracing::info!(?e, "try direct connect to peer failed"); | ||||
|                 } | ||||
|                 Err(e) => { | ||||
|                     tracing::error!(?e, "try direct connect to peer task join failed"); | ||||
|                 } | ||||
|             } | ||||
|     #[tracing::instrument(skip(self))] | ||||
|     async fn do_try_direct_connect_internal( | ||||
|         self: &Arc<DirectConnectorManagerData>, | ||||
|         dst_peer_id: PeerId, | ||||
|         ip_list: GetIpListResponse, | ||||
|     ) -> Result<(), Error> { | ||||
|         let enable_ipv6 = self.global_ctx.get_flags().enable_ipv6; | ||||
|         let available_listeners = ip_list | ||||
|             .listeners | ||||
|             .clone() | ||||
|             .into_iter() | ||||
|             .map(Into::<url::Url>::into) | ||||
|             .filter_map(|l| if l.scheme() != "ring" { Some(l) } else { None }) | ||||
|             .filter(|l| l.port().is_some() && l.host().is_some()) | ||||
|             .filter(|l| enable_ipv6 || !matches!(l.host().unwrap().to_owned(), Host::Ipv6(_))) | ||||
|             .collect::<Vec<_>>(); | ||||
|  | ||||
|         tracing::debug!(?available_listeners, "got available listeners"); | ||||
|  | ||||
|         if available_listeners.is_empty() { | ||||
|             return Err(anyhow::anyhow!("peer {} have no valid listener", dst_peer_id).into()); | ||||
|         } | ||||
|  | ||||
|         if !has_succ { | ||||
|             data.dst_listener_blacklist.insert( | ||||
|                 DstListenerUrlBlackListItem(dst_peer_id.clone(), listener.clone()), | ||||
|                 (), | ||||
|                 std::time::Duration::from_secs(DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC), | ||||
|         let default_protocol = self.global_ctx.get_flags().default_protocol; | ||||
|         // sort available listeners, default protocol has the highest priority, udp is second, others just random | ||||
|         // highest priority is in the last | ||||
|         let mut available_listeners = available_listeners; | ||||
|         available_listeners.sort_by_key(|l| { | ||||
|             let scheme = l.scheme(); | ||||
|             if scheme == default_protocol { | ||||
|                 3 | ||||
|             } else if scheme == "udp" { | ||||
|                 2 | ||||
|             } else { | ||||
|                 1 | ||||
|             } | ||||
|         }); | ||||
|  | ||||
|         while !available_listeners.is_empty() { | ||||
|             let mut tasks = JoinSet::new(); | ||||
|             let mut listener_list = vec![]; | ||||
|  | ||||
|             let cur_scheme = available_listeners.last().unwrap().scheme().to_owned(); | ||||
|             while let Some(listener) = available_listeners.last() { | ||||
|                 if listener.scheme() != cur_scheme { | ||||
|                     break; | ||||
|                 } | ||||
|  | ||||
|                 tracing::debug!("try direct connect to peer with listener: {}", listener); | ||||
|                 self.spawn_direct_connect_task( | ||||
|                     dst_peer_id.clone(), | ||||
|                     &ip_list, | ||||
|                     &listener, | ||||
|                     &mut tasks, | ||||
|                 ); | ||||
|  | ||||
|                 listener_list.push(listener.clone().to_string()); | ||||
|                 available_listeners.pop(); | ||||
|             } | ||||
|  | ||||
|             let ret = tasks.join_all().await; | ||||
|             tracing::debug!( | ||||
|                 ?ret, | ||||
|                 ?dst_peer_id, | ||||
|                 ?cur_scheme, | ||||
|                 ?listener_list, | ||||
|                 "all tasks finished for current scheme" | ||||
|             ); | ||||
|  | ||||
|             if self.peer_manager.has_directly_connected_conn(dst_peer_id) { | ||||
|                 tracing::info!( | ||||
|                     "direct connect to peer {} success, has direct conn", | ||||
|                     dst_peer_id | ||||
|                 ); | ||||
|                 return Ok(()); | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         Ok(()) | ||||
|     } | ||||
|  | ||||
|     #[tracing::instrument] | ||||
|     #[tracing::instrument(skip(self))] | ||||
|     async fn do_try_direct_connect( | ||||
|         data: Arc<DirectConnectorManagerData>, | ||||
|         self: Arc<DirectConnectorManagerData>, | ||||
|         dst_peer_id: PeerId, | ||||
|     ) -> Result<(), Error> { | ||||
|         let peer_manager = data.peer_manager.clone(); | ||||
|         tracing::debug!("try direct connect to peer: {}", dst_peer_id); | ||||
|         let mut backoff = | ||||
|             udp_hole_punch::BackOff::new(vec![1000, 2000, 2000, 5000, 5000, 10000, 30000, 60000]); | ||||
|         loop { | ||||
|             let peer_manager = self.peer_manager.clone(); | ||||
|             tracing::debug!("try direct connect to peer: {}", dst_peer_id); | ||||
|  | ||||
|         let rpc_stub = peer_manager | ||||
|             .get_peer_rpc_mgr() | ||||
|             .rpc_client() | ||||
|             .scoped_client::<DirectConnectorRpcClientFactory<BaseController>>( | ||||
|             let rpc_stub = peer_manager | ||||
|                 .get_peer_rpc_mgr() | ||||
|                 .rpc_client() | ||||
|                 .scoped_client::<DirectConnectorRpcClientFactory<BaseController>>( | ||||
|                 peer_manager.my_peer_id(), | ||||
|                 dst_peer_id, | ||||
|                 data.global_ctx.get_network_name(), | ||||
|                 self.global_ctx.get_network_name(), | ||||
|             ); | ||||
|  | ||||
|         let ip_list = rpc_stub | ||||
|             .get_ip_list(BaseController::default(), GetIpListRequest {}) | ||||
|             let ip_list = rpc_stub | ||||
|                 .get_ip_list(BaseController::default(), GetIpListRequest {}) | ||||
|                 .await | ||||
|                 .with_context(|| format!("get ip list from peer {}", dst_peer_id))?; | ||||
|  | ||||
|             tracing::info!(ip_list = ?ip_list, dst_peer_id = ?dst_peer_id, "got ip list"); | ||||
|  | ||||
|             let ret = self | ||||
|                 .do_try_direct_connect_internal(dst_peer_id, ip_list) | ||||
|                 .await; | ||||
|             tracing::info!(?ret, ?dst_peer_id, "do_try_direct_connect return"); | ||||
|  | ||||
|             if peer_manager.has_directly_connected_conn(dst_peer_id) { | ||||
|                 tracing::info!( | ||||
|                     "direct connect to peer {} success, has direct conn", | ||||
|                     dst_peer_id | ||||
|                 ); | ||||
|                 return Ok(()); | ||||
|             } | ||||
|  | ||||
|             tokio::time::sleep(Duration::from_millis(backoff.next_backoff())).await; | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| impl std::fmt::Debug for DirectConnectorManagerData { | ||||
|     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||||
|         f.debug_struct("DirectConnectorManagerData") | ||||
|             .field("peer_manager", &self.peer_manager) | ||||
|             .finish() | ||||
|     } | ||||
| } | ||||
|  | ||||
| pub struct DirectConnectorManager { | ||||
|     global_ctx: ArcGlobalCtx, | ||||
|     data: Arc<DirectConnectorManagerData>, | ||||
|     client: PeerTaskManager<DirectConnectorLauncher>, | ||||
|     tasks: JoinSet<()>, | ||||
| } | ||||
|  | ||||
| #[derive(Clone)] | ||||
| struct DirectConnectorLauncher(Arc<DirectConnectorManagerData>); | ||||
|  | ||||
| #[async_trait::async_trait] | ||||
| impl PeerTaskLauncher for DirectConnectorLauncher { | ||||
|     type Data = Arc<DirectConnectorManagerData>; | ||||
|     type CollectPeerItem = PeerId; | ||||
|     type TaskRet = (); | ||||
|  | ||||
|     fn new_data(&self, _peer_mgr: Arc<PeerManager>) -> Self::Data { | ||||
|         self.0.clone() | ||||
|     } | ||||
|  | ||||
|     async fn collect_peers_need_task(&self, data: &Self::Data) -> Vec<Self::CollectPeerItem> { | ||||
|         let my_peer_id = data.peer_manager.my_peer_id(); | ||||
|         data.peer_manager | ||||
|             .list_peers() | ||||
|             .await | ||||
|             .with_context(|| format!("get ip list from peer {}", dst_peer_id))?; | ||||
|             .into_iter() | ||||
|             .filter(|peer_id| { | ||||
|                 *peer_id != my_peer_id && !data.peer_manager.has_directly_connected_conn(*peer_id) | ||||
|             }) | ||||
|             .collect() | ||||
|     } | ||||
|  | ||||
|         tracing::info!(ip_list = ?ip_list, dst_peer_id = ?dst_peer_id, "got ip list"); | ||||
|     async fn launch_task( | ||||
|         &self, | ||||
|         data: &Self::Data, | ||||
|         item: Self::CollectPeerItem, | ||||
|     ) -> tokio::task::JoinHandle<Result<Self::TaskRet, anyhow::Error>> { | ||||
|         let data = data.clone(); | ||||
|         tokio::spawn(async move { data.do_try_direct_connect(item).await.map_err(Into::into) }) | ||||
|     } | ||||
|  | ||||
|         Self::do_try_direct_connect_internal(data, dst_peer_id, ip_list).await | ||||
|     async fn all_task_done(&self, _data: &Self::Data) {} | ||||
|  | ||||
|     fn loop_interval_ms(&self) -> u64 { | ||||
|         5000 | ||||
|     } | ||||
| } | ||||
|  | ||||
| impl DirectConnectorManager { | ||||
|     pub fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Self { | ||||
|         let data = Arc::new(DirectConnectorManagerData::new( | ||||
|             global_ctx.clone(), | ||||
|             peer_manager.clone(), | ||||
|         )); | ||||
|         let client = PeerTaskManager::new(DirectConnectorLauncher(data.clone()), peer_manager); | ||||
|         Self { | ||||
|             global_ctx, | ||||
|             data, | ||||
|             client, | ||||
|             tasks: JoinSet::new(), | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     pub fn run(&mut self) { | ||||
|         if self.global_ctx.get_flags().disable_p2p { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         self.run_as_server(); | ||||
|         self.run_as_client(); | ||||
|     } | ||||
|  | ||||
|     pub fn run_as_server(&mut self) { | ||||
|         self.data | ||||
|             .peer_manager | ||||
|             .get_peer_rpc_mgr() | ||||
|             .rpc_server() | ||||
|             .registry() | ||||
|             .register( | ||||
|                 DirectConnectorRpcServer::new(DirectConnectorManagerRpcServer::new( | ||||
|                     self.global_ctx.clone(), | ||||
|                 )), | ||||
|                 &self.data.global_ctx.get_network_name(), | ||||
|             ); | ||||
|     } | ||||
|  | ||||
|     pub fn run_as_client(&mut self) { | ||||
|         self.client.start(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -491,6 +681,13 @@ mod tests { | ||||
|  | ||||
|         wait_route_appear(p_a.clone(), p_c.clone()).await.unwrap(); | ||||
|  | ||||
|         p_c.get_global_ctx() | ||||
|             .get_ip_collector() | ||||
|             .collect_ip_addrs() | ||||
|             .await; | ||||
|  | ||||
|         tokio::time::sleep(std::time::Duration::from_secs(4)).await; | ||||
|  | ||||
|         let mut dm_a = DirectConnectorManager::new(p_a.get_global_ctx(), p_a.clone()); | ||||
|         let mut dm_c = DirectConnectorManager::new(p_c.get_global_ctx(), p_c.clone()); | ||||
|  | ||||
| @@ -525,6 +722,7 @@ mod tests { | ||||
|  | ||||
|     #[tokio::test] | ||||
|     async fn direct_connector_scheme_blacklist() { | ||||
|         TESTING.store(true, std::sync::atomic::Ordering::Relaxed); | ||||
|         let p_a = create_mock_peer_manager().await; | ||||
|         let data = Arc::new(DirectConnectorManagerData::new( | ||||
|             p_a.get_global_ctx(), | ||||
| @@ -539,7 +737,7 @@ mod tests { | ||||
|             .interface_ipv4s | ||||
|             .push("127.0.0.1".parse::<std::net::Ipv4Addr>().unwrap().into()); | ||||
|  | ||||
|         DirectConnectorManager::do_try_direct_connect_internal(data.clone(), 1, ip_list.clone()) | ||||
|         data.do_try_direct_connect_internal(1, ip_list.clone()) | ||||
|             .await | ||||
|             .unwrap(); | ||||
|  | ||||
|   | ||||
| @@ -2,19 +2,15 @@ use std::{net::SocketAddr, sync::Arc}; | ||||
|  | ||||
| use crate::{ | ||||
|     common::{ | ||||
|         dns::{resolve_txt_record, RESOLVER}, | ||||
|         error::Error, | ||||
|         global_ctx::ArcGlobalCtx, | ||||
|         stun::{get_default_resolver_config, resolve_txt_record}, | ||||
|     }, | ||||
|     tunnel::{IpVersion, Tunnel, TunnelConnector, TunnelError, PROTO_PORT_OFFSET}, | ||||
| }; | ||||
| use anyhow::Context; | ||||
| use dashmap::DashSet; | ||||
| use hickory_resolver::{ | ||||
|     config::{ResolverConfig, ResolverOpts}, | ||||
|     proto::rr::rdata::SRV, | ||||
|     TokioAsyncResolver, | ||||
| }; | ||||
| use hickory_resolver::proto::rr::rdata::SRV; | ||||
| use rand::{seq::SliceRandom, Rng as _}; | ||||
|  | ||||
| use crate::proto::common::TunnelInfo; | ||||
| @@ -43,9 +39,6 @@ pub struct DNSTunnelConnector { | ||||
|     bind_addrs: Vec<SocketAddr>, | ||||
|     global_ctx: ArcGlobalCtx, | ||||
|     ip_version: IpVersion, | ||||
|  | ||||
|     default_resolve_config: ResolverConfig, | ||||
|     default_resolve_opts: ResolverOpts, | ||||
| } | ||||
|  | ||||
| impl DNSTunnelConnector { | ||||
| @@ -55,9 +48,6 @@ impl DNSTunnelConnector { | ||||
|             bind_addrs: Vec::new(), | ||||
|             global_ctx, | ||||
|             ip_version: IpVersion::Both, | ||||
|  | ||||
|             default_resolve_config: get_default_resolver_config(), | ||||
|             default_resolve_opts: ResolverOpts::default(), | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -66,12 +56,7 @@ impl DNSTunnelConnector { | ||||
|         &self, | ||||
|         domain_name: &str, | ||||
|     ) -> Result<Box<dyn TunnelConnector>, Error> { | ||||
|         let resolver = | ||||
|             TokioAsyncResolver::tokio_from_system_conf().unwrap_or(TokioAsyncResolver::tokio( | ||||
|                 self.default_resolve_config.clone(), | ||||
|                 self.default_resolve_opts.clone(), | ||||
|             )); | ||||
|         let txt_data = resolve_txt_record(domain_name, &resolver) | ||||
|         let txt_data = resolve_txt_record(domain_name) | ||||
|             .await | ||||
|             .with_context(|| format!("resolve txt record failed, domain_name: {}", domain_name))?; | ||||
|  | ||||
| @@ -126,12 +111,6 @@ impl DNSTunnelConnector { | ||||
|     ) -> Result<Box<dyn TunnelConnector>, Error> { | ||||
|         tracing::info!("handle_srv_record: {}", domain_name); | ||||
|  | ||||
|         let resolver = | ||||
|             TokioAsyncResolver::tokio_from_system_conf().unwrap_or(TokioAsyncResolver::tokio( | ||||
|                 self.default_resolve_config.clone(), | ||||
|                 self.default_resolve_opts.clone(), | ||||
|             )); | ||||
|  | ||||
|         let srv_domains = PROTO_PORT_OFFSET | ||||
|             .iter() | ||||
|             .map(|(p, _)| (format!("_easytier._{}.{}", p, domain_name), *p)) // _easytier._udp.{domain_name} | ||||
| @@ -141,7 +120,7 @@ impl DNSTunnelConnector { | ||||
|         let srv_lookup_tasks = srv_domains | ||||
|             .iter() | ||||
|             .map(|(srv_domain, protocol)| { | ||||
|                 let resolver = resolver.clone(); | ||||
|                 let resolver = RESOLVER.clone(); | ||||
|                 let responses = responses.clone(); | ||||
|                 async move { | ||||
|                     let response = resolver.srv_lookup(srv_domain).await.with_context(|| { | ||||
|   | ||||
| @@ -1,15 +1,21 @@ | ||||
| use std::{collections::BTreeSet, sync::Arc}; | ||||
| use std::{ | ||||
|     collections::BTreeSet, | ||||
|     sync::{Arc, Weak}, | ||||
| }; | ||||
|  | ||||
| use anyhow::Context; | ||||
| use dashmap::{DashMap, DashSet}; | ||||
| use tokio::{ | ||||
|     sync::{broadcast::Receiver, mpsc, Mutex}, | ||||
|     sync::{ | ||||
|         broadcast::{error::RecvError, Receiver}, | ||||
|         mpsc, Mutex, | ||||
|     }, | ||||
|     task::JoinSet, | ||||
|     time::timeout, | ||||
| }; | ||||
|  | ||||
| use crate::{ | ||||
|     common::PeerId, | ||||
|     common::{join_joinset_background, PeerId}, | ||||
|     peers::peer_conn::PeerConnId, | ||||
|     proto::{ | ||||
|         cli::{ | ||||
| @@ -50,7 +56,7 @@ struct ReconnResult { | ||||
| struct ConnectorManagerData { | ||||
|     connectors: ConnectorMap, | ||||
|     reconnecting: DashSet<String>, | ||||
|     peer_manager: Arc<PeerManager>, | ||||
|     peer_manager: Weak<PeerManager>, | ||||
|     alive_conn_urls: Arc<DashSet<String>>, | ||||
|     // user removed connector urls | ||||
|     removed_conn_urls: Arc<DashSet<String>>, | ||||
| @@ -75,7 +81,7 @@ impl ManualConnectorManager { | ||||
|             data: Arc::new(ConnectorManagerData { | ||||
|                 connectors, | ||||
|                 reconnecting: DashSet::new(), | ||||
|                 peer_manager, | ||||
|                 peer_manager: Arc::downgrade(&peer_manager), | ||||
|                 alive_conn_urls: Arc::new(DashSet::new()), | ||||
|                 removed_conn_urls: Arc::new(DashSet::new()), | ||||
|                 net_ns: global_ctx.net_ns.clone(), | ||||
| @@ -179,8 +185,35 @@ impl ManualConnectorManager { | ||||
|         mut event_recv: Receiver<GlobalCtxEvent>, | ||||
|     ) { | ||||
|         loop { | ||||
|             let event = event_recv.recv().await.expect("event_recv got error"); | ||||
|             Self::handle_event(&event, &data).await; | ||||
|             match event_recv.recv().await { | ||||
|                 Ok(event) => { | ||||
|                     Self::handle_event(&event, &data).await; | ||||
|                 } | ||||
|                 Err(RecvError::Lagged(n)) => { | ||||
|                     tracing::warn!("event_recv lagged: {}, rebuild alive conn list", n); | ||||
|                     event_recv = event_recv.resubscribe(); | ||||
|                     data.alive_conn_urls.clear(); | ||||
|                     let Some(pm) = data.peer_manager.upgrade() else { | ||||
|                         tracing::warn!("peer manager is gone, exit"); | ||||
|                         break; | ||||
|                     }; | ||||
|                     for x in pm.get_peer_map().get_alive_conns().iter().map(|x| { | ||||
|                         x.tunnel | ||||
|                             .clone() | ||||
|                             .unwrap_or_default() | ||||
|                             .remote_addr | ||||
|                             .unwrap_or_default() | ||||
|                             .to_string() | ||||
|                     }) { | ||||
|                         data.alive_conn_urls.insert(x); | ||||
|                     } | ||||
|                     continue; | ||||
|                 } | ||||
|                 Err(RecvError::Closed) => { | ||||
|                     tracing::warn!("event_recv closed, exit"); | ||||
|                     break; | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -190,6 +223,8 @@ impl ManualConnectorManager { | ||||
|             use_global_var!(MANUAL_CONNECTOR_RECONNECT_INTERVAL_MS), | ||||
|         )); | ||||
|         let (reconn_result_send, mut reconn_result_recv) = mpsc::channel(100); | ||||
|         let tasks = Arc::new(std::sync::Mutex::new(JoinSet::new())); | ||||
|         join_joinset_background(tasks.clone(), "connector_reconnect_tasks".to_string()); | ||||
|  | ||||
|         loop { | ||||
|             tokio::select! { | ||||
| @@ -205,7 +240,7 @@ impl ManualConnectorManager { | ||||
|                         let insert_succ = data.reconnecting.insert(dead_url.clone()); | ||||
|                         assert!(insert_succ); | ||||
|  | ||||
|                         tokio::spawn(async move { | ||||
|                         tasks.lock().unwrap().spawn(async move { | ||||
|                             let reconn_ret = Self::conn_reconnect(data_clone.clone(), dead_url.clone(), connector.clone()).await; | ||||
|                             sender.send(reconn_ret).await.unwrap(); | ||||
|  | ||||
| @@ -271,7 +306,6 @@ impl ManualConnectorManager { | ||||
|  | ||||
|     async fn collect_dead_conns(data: Arc<ConnectorManagerData>) -> BTreeSet<String> { | ||||
|         Self::handle_remove_connector(data.clone()); | ||||
|  | ||||
|         let all_urls: BTreeSet<String> = data | ||||
|             .connectors | ||||
|             .iter() | ||||
| @@ -309,8 +343,13 @@ impl ManualConnectorManager { | ||||
|             connector.lock().await.remote_url().clone(), | ||||
|         )); | ||||
|         tracing::info!("reconnect try connect... conn: {:?}", connector); | ||||
|         let (peer_id, conn_id) = data | ||||
|             .peer_manager | ||||
|         let Some(pm) = data.peer_manager.upgrade() else { | ||||
|             return Err(Error::AnyhowError(anyhow::anyhow!( | ||||
|                 "peer manager is gone, cannot reconnect" | ||||
|             ))); | ||||
|         }; | ||||
|  | ||||
|         let (peer_id, conn_id) = pm | ||||
|             .try_direct_connect(connector.lock().await.as_mut()) | ||||
|             .await?; | ||||
|         tracing::info!("reconnect succ: {} {} {}", peer_id, conn_id, dead_url); | ||||
|   | ||||
| @@ -43,8 +43,8 @@ async fn set_bind_addr_for_peer_connector( | ||||
|         connector.set_bind_addrs(bind_addrs); | ||||
|     } else { | ||||
|         let mut bind_addrs = vec![]; | ||||
|         for ipv6 in ips.interface_ipv6s { | ||||
|             let socket_addr = SocketAddrV6::new(ipv6.into(), 0, 0, 0).into(); | ||||
|         for ipv6 in ips.interface_ipv6s.iter().chain(ips.public_ipv6.iter()) { | ||||
|             let socket_addr = SocketAddrV6::new(std::net::Ipv6Addr::from(*ipv6), 0, 0, 0).into(); | ||||
|             bind_addrs.push(socket_addr); | ||||
|         } | ||||
|         connector.set_bind_addrs(bind_addrs); | ||||
| @@ -60,7 +60,8 @@ pub async fn create_connector_by_url( | ||||
|     let url = url::Url::parse(url).map_err(|_| Error::InvalidUrl(url.to_owned()))?; | ||||
|     let mut connector: Box<dyn TunnelConnector + 'static> = match url.scheme() { | ||||
|         "tcp" => { | ||||
|             let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "tcp", ip_version)?; | ||||
|             let dst_addr = | ||||
|                 check_scheme_and_get_socket_addr::<SocketAddr>(&url, "tcp", ip_version).await?; | ||||
|             let mut connector = TcpTunnelConnector::new(url); | ||||
|             if global_ctx.config.get_flags().bind_device { | ||||
|                 set_bind_addr_for_peer_connector( | ||||
| @@ -73,7 +74,8 @@ pub async fn create_connector_by_url( | ||||
|             Box::new(connector) | ||||
|         } | ||||
|         "udp" => { | ||||
|             let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "udp", ip_version)?; | ||||
|             let dst_addr = | ||||
|                 check_scheme_and_get_socket_addr::<SocketAddr>(&url, "udp", ip_version).await?; | ||||
|             let mut connector = UdpTunnelConnector::new(url); | ||||
|             if global_ctx.config.get_flags().bind_device { | ||||
|                 set_bind_addr_for_peer_connector( | ||||
| @@ -90,13 +92,14 @@ pub async fn create_connector_by_url( | ||||
|             Box::new(connector) | ||||
|         } | ||||
|         "ring" => { | ||||
|             check_scheme_and_get_socket_addr::<uuid::Uuid>(&url, "ring", IpVersion::Both)?; | ||||
|             check_scheme_and_get_socket_addr::<uuid::Uuid>(&url, "ring", IpVersion::Both).await?; | ||||
|             let connector = RingTunnelConnector::new(url); | ||||
|             Box::new(connector) | ||||
|         } | ||||
|         #[cfg(feature = "quic")] | ||||
|         "quic" => { | ||||
|             let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "quic", ip_version)?; | ||||
|             let dst_addr = | ||||
|                 check_scheme_and_get_socket_addr::<SocketAddr>(&url, "quic", ip_version).await?; | ||||
|             let mut connector = QUICTunnelConnector::new(url); | ||||
|             if global_ctx.config.get_flags().bind_device { | ||||
|                 set_bind_addr_for_peer_connector( | ||||
| @@ -110,7 +113,8 @@ pub async fn create_connector_by_url( | ||||
|         } | ||||
|         #[cfg(feature = "wireguard")] | ||||
|         "wg" => { | ||||
|             let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "wg", ip_version)?; | ||||
|             let dst_addr = | ||||
|                 check_scheme_and_get_socket_addr::<SocketAddr>(&url, "wg", ip_version).await?; | ||||
|             let nid = global_ctx.get_network_identity(); | ||||
|             let wg_config = WgConfig::new_from_network_identity( | ||||
|                 &nid.network_name, | ||||
| @@ -130,7 +134,7 @@ pub async fn create_connector_by_url( | ||||
|         #[cfg(feature = "websocket")] | ||||
|         "ws" | "wss" => { | ||||
|             use crate::tunnel::FromUrl; | ||||
|             let dst_addr = SocketAddr::from_url(url.clone(), ip_version)?; | ||||
|             let dst_addr = SocketAddr::from_url(url.clone(), ip_version).await?; | ||||
|             let mut connector = crate::tunnel::websocket::WSTunnelConnector::new(url); | ||||
|             if global_ctx.config.get_flags().bind_device { | ||||
|                 set_bind_addr_for_peer_connector( | ||||
|   | ||||
| @@ -56,8 +56,8 @@ impl From<NatType> for UdpNatType { | ||||
|     fn from(nat_type: NatType) -> Self { | ||||
|         match nat_type { | ||||
|             NatType::Unknown => UdpNatType::Unknown, | ||||
|             NatType::NoPat | NatType::OpenInternet => UdpNatType::Open(nat_type), | ||||
|             NatType::FullCone | NatType::Restricted | NatType::PortRestricted => { | ||||
|             NatType::OpenInternet => UdpNatType::Open(nat_type), | ||||
|             NatType::NoPat | NatType::FullCone | NatType::Restricted | NatType::PortRestricted => { | ||||
|                 UdpNatType::Cone(nat_type) | ||||
|             } | ||||
|             NatType::Symmetric | NatType::SymUdpFirewall => UdpNatType::HardSymmetric(nat_type), | ||||
| @@ -495,6 +495,7 @@ impl PunchHoleServerCommon { | ||||
|             .udp_nat_type | ||||
|     } | ||||
|  | ||||
|     #[async_recursion::async_recursion] | ||||
|     pub(crate) async fn select_listener( | ||||
|         &self, | ||||
|         use_new_listener: bool, | ||||
| @@ -515,24 +516,28 @@ impl PunchHoleServerCommon { | ||||
|         let mut locked = all_listener_sockets.lock().await; | ||||
|  | ||||
|         let listener = if use_last { | ||||
|             locked.last_mut()? | ||||
|             Some(locked.last_mut()?) | ||||
|         } else { | ||||
|             // use the listener that is active most recently | ||||
|             locked | ||||
|                 .iter_mut() | ||||
|                 .max_by_key(|listener| listener.last_active_time.load())? | ||||
|                 .filter(|l| !l.mapped_addr.ip().is_unspecified()) | ||||
|                 .max_by_key(|listener| listener.last_active_time.load()) | ||||
|         }; | ||||
|  | ||||
|         if listener.mapped_addr.ip().is_unspecified() { | ||||
|             tracing::info!("listener mapped addr is unspecified, trying to get mapped addr"); | ||||
|             listener.mapped_addr = self | ||||
|                 .get_global_ctx() | ||||
|                 .get_stun_info_collector() | ||||
|                 .get_udp_port_mapping(listener.mapped_addr.port()) | ||||
|                 .await | ||||
|                 .ok()?; | ||||
|         if listener.is_none() || listener.as_ref().unwrap().mapped_addr.ip().is_unspecified() { | ||||
|             tracing::warn!( | ||||
|                 ?use_new_listener, | ||||
|                 "no available udp hole punching listener with mapped address" | ||||
|             ); | ||||
|             if !use_new_listener { | ||||
|                 return self.select_listener(true).await; | ||||
|             } else { | ||||
|                 return None; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         let listener = listener.unwrap(); | ||||
|         Some((listener.get_socket().await, listener.mapped_addr)) | ||||
|     } | ||||
|  | ||||
|   | ||||
| @@ -143,7 +143,7 @@ impl UdpHolePunchRpc for UdpHolePunchServer { | ||||
| } | ||||
|  | ||||
| #[derive(Debug)] | ||||
| struct BackOff { | ||||
| pub struct BackOff { | ||||
|     backoffs_ms: Vec<u64>, | ||||
|     current_idx: usize, | ||||
| } | ||||
| @@ -221,7 +221,7 @@ impl UdpHoePunchConnectorData { | ||||
|             Ok(Some(tunnel)) => { | ||||
|                 tracing::info!(?tunnel, "hole punching get tunnel success"); | ||||
|  | ||||
|                 if let Err(e) = self.peer_mgr.add_client_tunnel(tunnel).await { | ||||
|                 if let Err(e) = self.peer_mgr.add_client_tunnel(tunnel, false).await { | ||||
|                     tracing::warn!(?e, "add client tunnel failed"); | ||||
|                     op(true); | ||||
|                     false | ||||
|   | ||||
| @@ -3,12 +3,14 @@ use std::{ | ||||
|     fmt::Write, | ||||
|     net::{IpAddr, SocketAddr}, | ||||
|     path::PathBuf, | ||||
|     str::FromStr, | ||||
|     sync::Mutex, | ||||
|     time::Duration, | ||||
|     vec, | ||||
| }; | ||||
|  | ||||
| use anyhow::Context; | ||||
| use cidr::Ipv4Inet; | ||||
| use clap::{command, Args, Parser, Subcommand}; | ||||
| use humansize::format_size; | ||||
| use service_manager::*; | ||||
| @@ -51,6 +53,15 @@ struct Cli { | ||||
|     #[arg(short, long, default_value = "false", help = "verbose output")] | ||||
|     verbose: bool, | ||||
|  | ||||
|     #[arg( | ||||
|         short = 'o', | ||||
|         long = "output", | ||||
|         value_enum, | ||||
|         default_value = "table", | ||||
|         help = "output format" | ||||
|     )] | ||||
|     output_format: OutputFormat, | ||||
|  | ||||
|     #[command(subcommand)] | ||||
|     sub_command: SubCommand, | ||||
| } | ||||
| @@ -77,23 +88,23 @@ enum SubCommand { | ||||
|     Proxy, | ||||
| } | ||||
|  | ||||
| #[derive(clap::ValueEnum, Debug, Clone, PartialEq)] | ||||
| enum OutputFormat { | ||||
|     Table, | ||||
|     Json, | ||||
| } | ||||
|  | ||||
| #[derive(Args, Debug)] | ||||
| struct PeerArgs { | ||||
|     #[command(subcommand)] | ||||
|     sub_command: Option<PeerSubCommand>, | ||||
| } | ||||
|  | ||||
| #[derive(Args, Debug)] | ||||
| struct PeerListArgs { | ||||
|     #[arg(short, long)] | ||||
|     verbose: bool, | ||||
| } | ||||
|  | ||||
| #[derive(Subcommand, Debug)] | ||||
| enum PeerSubCommand { | ||||
|     Add, | ||||
|     Remove, | ||||
|     List(PeerListArgs), | ||||
|     List, | ||||
|     ListForeign, | ||||
|     ListGlobalForeign, | ||||
| } | ||||
| @@ -193,14 +204,15 @@ struct InstallArgs { | ||||
|  | ||||
| type Error = anyhow::Error; | ||||
|  | ||||
| struct CommandHandler { | ||||
| struct CommandHandler<'a> { | ||||
|     client: Mutex<RpcClient>, | ||||
|     verbose: bool, | ||||
|     output_format: &'a OutputFormat, | ||||
| } | ||||
|  | ||||
| type RpcClient = StandAloneClient<TcpTunnelConnector>; | ||||
|  | ||||
| impl CommandHandler { | ||||
| impl CommandHandler<'_> { | ||||
|     async fn get_peer_manager_client( | ||||
|         &self, | ||||
|     ) -> Result<Box<dyn PeerManageRpc<Controller = BaseController>>, Error> { | ||||
| @@ -294,9 +306,12 @@ impl CommandHandler { | ||||
|         println!("remove peer"); | ||||
|     } | ||||
|  | ||||
|     async fn handle_peer_list(&self, _args: &PeerArgs) -> Result<(), Error> { | ||||
|         #[derive(tabled::Tabled)] | ||||
|     async fn handle_peer_list(&self) -> Result<(), Error> { | ||||
|         #[derive(tabled::Tabled, serde::Serialize)] | ||||
|         struct PeerTableItem { | ||||
|             #[tabled(rename = "ipv4")] | ||||
|             cidr: String, | ||||
|             #[tabled(skip)] | ||||
|             ipv4: String, | ||||
|             hostname: String, | ||||
|             cost: String, | ||||
| @@ -314,7 +329,12 @@ impl CommandHandler { | ||||
|             fn from(p: PeerRoutePair) -> Self { | ||||
|                 let route = p.route.clone().unwrap_or_default(); | ||||
|                 PeerTableItem { | ||||
|                     ipv4: route.ipv4_addr.map(|ip| ip.to_string()).unwrap_or_default(), | ||||
|                     cidr: route.ipv4_addr.map(|ip| ip.to_string()).unwrap_or_default(), | ||||
|                     ipv4: route | ||||
|                         .ipv4_addr | ||||
|                         .map(|ip: easytier::proto::common::Ipv4Inet| ip.address.unwrap_or_default()) | ||||
|                         .map(|ip| ip.to_string()) | ||||
|                         .unwrap_or_default(), | ||||
|                     hostname: route.hostname.clone(), | ||||
|                     cost: cost_to_str(route.cost), | ||||
|                     lat_ms: if route.cost == 1 { | ||||
| @@ -344,7 +364,10 @@ impl CommandHandler { | ||||
|         impl From<NodeInfo> for PeerTableItem { | ||||
|             fn from(p: NodeInfo) -> Self { | ||||
|                 PeerTableItem { | ||||
|                     ipv4: p.ipv4_addr.clone(), | ||||
|                     cidr: p.ipv4_addr.clone(), | ||||
|                     ipv4: Ipv4Inet::from_str(&p.ipv4_addr) | ||||
|                         .map(|ip| ip.address().to_string()) | ||||
|                         .unwrap_or_default(), | ||||
|                     hostname: p.hostname.clone(), | ||||
|                     cost: "Local".to_string(), | ||||
|                     lat_ms: "-".to_string(), | ||||
| @@ -366,7 +389,7 @@ impl CommandHandler { | ||||
|         let mut items: Vec<PeerTableItem> = vec![]; | ||||
|         let peer_routes = self.list_peer_route_pair().await?; | ||||
|         if self.verbose { | ||||
|             println!("{:#?}", peer_routes); | ||||
|             println!("{}", serde_json::to_string_pretty(&peer_routes)?); | ||||
|             return Ok(()); | ||||
|         } | ||||
|  | ||||
| @@ -382,7 +405,7 @@ impl CommandHandler { | ||||
|             items.push(p.into()); | ||||
|         } | ||||
|  | ||||
|         println!("{}", tabled::Table::new(items).with(Style::modern())); | ||||
|         print_output(&items, self.output_format)?; | ||||
|  | ||||
|         Ok(()) | ||||
|     } | ||||
| @@ -404,8 +427,9 @@ impl CommandHandler { | ||||
|             .list_foreign_network(BaseController::default(), request) | ||||
|             .await?; | ||||
|         let network_map = response; | ||||
|         if self.verbose { | ||||
|             println!("{:#?}", network_map); | ||||
|         if self.verbose || *self.output_format == OutputFormat::Json { | ||||
|             let json = serde_json::to_string_pretty(&network_map.foreign_networks)?; | ||||
|             println!("{}", json); | ||||
|             return Ok(()); | ||||
|         } | ||||
|  | ||||
| @@ -445,8 +469,11 @@ impl CommandHandler { | ||||
|         let response = client | ||||
|             .list_global_foreign_network(BaseController::default(), request) | ||||
|             .await?; | ||||
|         if self.verbose { | ||||
|             println!("{:#?}", response); | ||||
|         if self.verbose || *self.output_format == OutputFormat::Json { | ||||
|             println!( | ||||
|                 "{}", | ||||
|                 serde_json::to_string_pretty(&response.foreign_networks)? | ||||
|             ); | ||||
|             return Ok(()); | ||||
|         } | ||||
|  | ||||
| @@ -464,7 +491,7 @@ impl CommandHandler { | ||||
|     } | ||||
|  | ||||
|     async fn handle_route_list(&self) -> Result<(), Error> { | ||||
|         #[derive(tabled::Tabled)] | ||||
|         #[derive(tabled::Tabled, serde::Serialize)] | ||||
|         struct RouteTableItem { | ||||
|             ipv4: String, | ||||
|             hostname: String, | ||||
| @@ -491,6 +518,23 @@ impl CommandHandler { | ||||
|             .await? | ||||
|             .node_info | ||||
|             .ok_or(anyhow::anyhow!("node info not found"))?; | ||||
|         let peer_routes = self.list_peer_route_pair().await?; | ||||
|  | ||||
|         if self.verbose { | ||||
|             #[derive(serde::Serialize)] | ||||
|             struct VerboseItem { | ||||
|                 node_info: NodeInfo, | ||||
|                 peer_routes: Vec<PeerRoutePair>, | ||||
|             } | ||||
|             println!( | ||||
|                 "{}", | ||||
|                 serde_json::to_string_pretty(&VerboseItem { | ||||
|                     node_info, | ||||
|                     peer_routes | ||||
|                 })? | ||||
|             ); | ||||
|             return Ok(()); | ||||
|         } | ||||
|  | ||||
|         items.push(RouteTableItem { | ||||
|             ipv4: node_info.ipv4_addr.clone(), | ||||
| @@ -510,7 +554,6 @@ impl CommandHandler { | ||||
|  | ||||
|             version: node_info.version.clone(), | ||||
|         }); | ||||
|         let peer_routes = self.list_peer_route_pair().await?; | ||||
|         for p in peer_routes.iter() { | ||||
|             let Some(next_hop_pair) = peer_routes.iter().find(|pair| { | ||||
|                 pair.route.clone().unwrap_or_default().peer_id | ||||
| @@ -634,7 +677,7 @@ impl CommandHandler { | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         println!("{}", tabled::Table::new(items).with(Style::modern())); | ||||
|         print_output(&items, self.output_format)?; | ||||
|  | ||||
|         Ok(()) | ||||
|     } | ||||
| @@ -645,6 +688,10 @@ impl CommandHandler { | ||||
|         let response = client | ||||
|             .list_connector(BaseController::default(), request) | ||||
|             .await?; | ||||
|         if self.verbose || *self.output_format == OutputFormat::Json { | ||||
|             println!("{}", serde_json::to_string_pretty(&response.connectors)?); | ||||
|             return Ok(()); | ||||
|         } | ||||
|         println!("response: {:#?}", response); | ||||
|         Ok(()) | ||||
|     } | ||||
| @@ -912,6 +959,21 @@ impl Service { | ||||
|     } | ||||
| } | ||||
|  | ||||
| fn print_output<T>(items: &[T], format: &OutputFormat) -> Result<(), Error> | ||||
| where | ||||
|     T: tabled::Tabled + serde::Serialize, | ||||
| { | ||||
|     match format { | ||||
|         OutputFormat::Table => { | ||||
|             println!("{}", tabled::Table::new(items).with(Style::modern())); | ||||
|         } | ||||
|         OutputFormat::Json => { | ||||
|             println!("{}", serde_json::to_string_pretty(items)?); | ||||
|         } | ||||
|     } | ||||
|     Ok(()) | ||||
| } | ||||
|  | ||||
| #[tokio::main] | ||||
| #[tracing::instrument] | ||||
| async fn main() -> Result<(), Error> { | ||||
| @@ -924,6 +986,7 @@ async fn main() -> Result<(), Error> { | ||||
|     let handler = CommandHandler { | ||||
|         client: Mutex::new(client), | ||||
|         verbose: cli.verbose, | ||||
|         output_format: &cli.output_format, | ||||
|     }; | ||||
|  | ||||
|     match cli.sub_command { | ||||
| @@ -934,12 +997,8 @@ async fn main() -> Result<(), Error> { | ||||
|             Some(PeerSubCommand::Remove) => { | ||||
|                 println!("remove peer"); | ||||
|             } | ||||
|             Some(PeerSubCommand::List(arg)) => { | ||||
|                 if arg.verbose { | ||||
|                     println!("{:#?}", handler.list_peer_route_pair().await?); | ||||
|                 } else { | ||||
|                     handler.handle_peer_list(&peer_args).await?; | ||||
|                 } | ||||
|             Some(PeerSubCommand::List) => { | ||||
|                 handler.handle_peer_list().await?; | ||||
|             } | ||||
|             Some(PeerSubCommand::ListForeign) => { | ||||
|                 handler.handle_foreign_network_list().await?; | ||||
| @@ -948,7 +1007,7 @@ async fn main() -> Result<(), Error> { | ||||
|                 handler.handle_global_foreign_network_list().await?; | ||||
|             } | ||||
|             None => { | ||||
|                 handler.handle_peer_list(&peer_args).await?; | ||||
|                 handler.handle_peer_list().await?; | ||||
|             } | ||||
|         }, | ||||
|         SubCommand::Connector(conn_args) => match conn_args.sub_command { | ||||
| @@ -975,7 +1034,14 @@ async fn main() -> Result<(), Error> { | ||||
|                 loop { | ||||
|                     let ret = collector.get_stun_info(); | ||||
|                     if ret.udp_nat_type != NatType::Unknown as i32 { | ||||
|                         println!("stun info: {:#?}", ret); | ||||
|                         if cli.output_format == OutputFormat::Json { | ||||
|                             match serde_json::to_string_pretty(&ret) { | ||||
|                                 Ok(json) => println!("{}", json), | ||||
|                                 Err(e) => eprintln!("Error serializing to JSON: {}", e), | ||||
|                             } | ||||
|                         } else { | ||||
|                             println!("stun info: {:#?}", ret); | ||||
|                         } | ||||
|                         break; | ||||
|                     } | ||||
|                     tokio::time::sleep(Duration::from_millis(200)).await; | ||||
| @@ -993,27 +1059,46 @@ async fn main() -> Result<(), Error> { | ||||
|                 ) | ||||
|                 .await?; | ||||
|  | ||||
|             #[derive(tabled::Tabled)] | ||||
|             #[derive(tabled::Tabled, serde::Serialize)] | ||||
|             struct PeerCenterTableItem { | ||||
|                 node_id: String, | ||||
|                 direct_peers: String, | ||||
|                 #[tabled(rename = "direct_peers")] | ||||
|                 #[serde(skip_serializing)] | ||||
|                 direct_peers_str: String, | ||||
|                 #[tabled(skip)] | ||||
|                 direct_peers: Vec<DirectPeerItem>, | ||||
|             } | ||||
|  | ||||
|             #[derive(serde::Serialize)] | ||||
|             struct DirectPeerItem { | ||||
|                 node_id: String, | ||||
|                 latency_ms: i32, | ||||
|             } | ||||
|  | ||||
|             let mut table_rows = vec![]; | ||||
|             for (k, v) in resp.global_peer_map.iter() { | ||||
|                 let node_id = k; | ||||
|                 let direct_peers = v | ||||
|                 let direct_peers_strs = v | ||||
|                     .direct_peers | ||||
|                     .iter() | ||||
|                     .map(|(k, v)| format!("{}: {:?}ms", k, v.latency_ms,)) | ||||
|                     .collect::<Vec<_>>(); | ||||
|                 let direct_peers: Vec<_> = v | ||||
|                     .direct_peers | ||||
|                     .iter() | ||||
|                     .map(|(k, v)| DirectPeerItem { | ||||
|                         node_id: k.to_string(), | ||||
|                         latency_ms: v.latency_ms, | ||||
|                     }) | ||||
|                     .collect(); | ||||
|                 table_rows.push(PeerCenterTableItem { | ||||
|                     node_id: node_id.to_string(), | ||||
|                     direct_peers: direct_peers.join("\n"), | ||||
|                     direct_peers_str: direct_peers_strs.join("\n"), | ||||
|                     direct_peers, | ||||
|                 }); | ||||
|             } | ||||
|  | ||||
|             println!("{}", tabled::Table::new(table_rows).with(Style::modern())); | ||||
|             print_output(&table_rows, &cli.output_format)?; | ||||
|         } | ||||
|         SubCommand::VpnPortal => { | ||||
|             let vpn_portal_client = handler.get_vpn_portal_client().await?; | ||||
| @@ -1045,6 +1130,11 @@ async fn main() -> Result<(), Error> { | ||||
|                 .ok_or(anyhow::anyhow!("node info not found"))?; | ||||
|             match sub_cmd.sub_command { | ||||
|                 Some(NodeSubCommand::Info) | None => { | ||||
|                     if cli.verbose || cli.output_format == OutputFormat::Json { | ||||
|                         println!("{}", serde_json::to_string_pretty(&node_info)?); | ||||
|                         return Ok(()); | ||||
|                     } | ||||
|  | ||||
|                     let stun_info = node_info.stun_info.clone().unwrap_or_default(); | ||||
|                     let ip_list = node_info.ip_list.clone().unwrap_or_default(); | ||||
|  | ||||
| @@ -1168,25 +1258,21 @@ async fn main() -> Result<(), Error> { | ||||
|         } | ||||
|         SubCommand::Proxy => { | ||||
|             let mut entries = vec![]; | ||||
|             let client = handler.get_tcp_proxy_client("tcp").await?; | ||||
|             let ret = client | ||||
|                 .list_tcp_proxy_entry(BaseController::default(), Default::default()) | ||||
|                 .await; | ||||
|             entries.extend(ret.unwrap_or_default().entries); | ||||
|  | ||||
|             let client = handler.get_tcp_proxy_client("kcp_src").await?; | ||||
|             let ret = client | ||||
|                 .list_tcp_proxy_entry(BaseController::default(), Default::default()) | ||||
|                 .await; | ||||
|             entries.extend(ret.unwrap_or_default().entries); | ||||
|             for client_type in &["tcp", "kcp_src", "kcp_dst", "quic_src", "quic_dst"] { | ||||
|                 let client = handler.get_tcp_proxy_client(client_type).await?; | ||||
|                 let ret = client | ||||
|                     .list_tcp_proxy_entry(BaseController::default(), Default::default()) | ||||
|                     .await; | ||||
|                 entries.extend(ret.unwrap_or_default().entries); | ||||
|             } | ||||
|  | ||||
|             let client = handler.get_tcp_proxy_client("kcp_dst").await?; | ||||
|             let ret = client | ||||
|                 .list_tcp_proxy_entry(BaseController::default(), Default::default()) | ||||
|                 .await; | ||||
|             entries.extend(ret.unwrap_or_default().entries); | ||||
|             if cli.verbose { | ||||
|                 println!("{}", serde_json::to_string_pretty(&entries)?); | ||||
|                 return Ok(()); | ||||
|             } | ||||
|  | ||||
|             #[derive(tabled::Tabled)] | ||||
|             #[derive(tabled::Tabled, serde::Serialize)] | ||||
|             struct TableItem { | ||||
|                 src: String, | ||||
|                 dst: String, | ||||
| @@ -1215,7 +1301,7 @@ async fn main() -> Result<(), Error> { | ||||
|                 }) | ||||
|                 .collect::<Vec<_>>(); | ||||
|  | ||||
|             println!("{}", tabled::Table::new(table_rows).with(Style::modern())); | ||||
|             print_output(&table_rows, &cli.output_format)?; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|   | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1,7 +1,7 @@ | ||||
| use std::{ | ||||
|     mem::MaybeUninit, | ||||
|     net::{IpAddr, Ipv4Addr, SocketAddrV4}, | ||||
|     sync::Arc, | ||||
|     sync::{Arc, Weak}, | ||||
|     thread, | ||||
|     time::Duration, | ||||
| }; | ||||
| @@ -34,7 +34,7 @@ use super::{ | ||||
|  | ||||
| #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] | ||||
| struct IcmpNatKey { | ||||
|     dst_ip: std::net::IpAddr, | ||||
|     real_dst_ip: std::net::IpAddr, | ||||
|     icmp_id: u16, | ||||
|     icmp_seq: u16, | ||||
| } | ||||
| @@ -45,15 +45,22 @@ struct IcmpNatEntry { | ||||
|     my_peer_id: PeerId, | ||||
|     src_ip: IpAddr, | ||||
|     start_time: std::time::Instant, | ||||
|     mapped_dst_ip: std::net::Ipv4Addr, | ||||
| } | ||||
|  | ||||
| impl IcmpNatEntry { | ||||
|     fn new(src_peer_id: PeerId, my_peer_id: PeerId, src_ip: IpAddr) -> Result<Self, Error> { | ||||
|     fn new( | ||||
|         src_peer_id: PeerId, | ||||
|         my_peer_id: PeerId, | ||||
|         src_ip: IpAddr, | ||||
|         mapped_dst_ip: Ipv4Addr, | ||||
|     ) -> Result<Self, Error> { | ||||
|         Ok(Self { | ||||
|             src_peer_id, | ||||
|             my_peer_id, | ||||
|             src_ip, | ||||
|             start_time: std::time::Instant::now(), | ||||
|             mapped_dst_ip, | ||||
|         }) | ||||
|     } | ||||
| } | ||||
| @@ -65,10 +72,10 @@ type NewPacketReceiver = tokio::sync::mpsc::UnboundedReceiver<IcmpNatKey>; | ||||
| #[derive(Debug)] | ||||
| pub struct IcmpProxy { | ||||
|     global_ctx: ArcGlobalCtx, | ||||
|     peer_manager: Arc<PeerManager>, | ||||
|     peer_manager: Weak<PeerManager>, | ||||
|  | ||||
|     cidr_set: CidrSet, | ||||
|     socket: std::sync::Mutex<Option<socket2::Socket>>, | ||||
|     socket: std::sync::Mutex<Option<Arc<socket2::Socket>>>, | ||||
|  | ||||
|     nat_table: IcmpNatTable, | ||||
|  | ||||
| @@ -78,7 +85,10 @@ pub struct IcmpProxy { | ||||
|     icmp_sender: Arc<std::sync::Mutex<Option<UnboundedSender<ZCPacket>>>>, | ||||
| } | ||||
|  | ||||
| fn socket_recv(socket: &Socket, buf: &mut [MaybeUninit<u8>]) -> Result<(usize, IpAddr), Error> { | ||||
| fn socket_recv( | ||||
|     socket: &Socket, | ||||
|     buf: &mut [MaybeUninit<u8>], | ||||
| ) -> Result<(usize, IpAddr), std::io::Error> { | ||||
|     let (size, addr) = socket.recv_from(buf)?; | ||||
|     let addr = match addr.as_socket() { | ||||
|         None => IpAddr::V4(Ipv4Addr::UNSPECIFIED), | ||||
| @@ -87,15 +97,32 @@ fn socket_recv(socket: &Socket, buf: &mut [MaybeUninit<u8>]) -> Result<(usize, I | ||||
|     Ok((size, addr)) | ||||
| } | ||||
|  | ||||
| fn socket_recv_loop(socket: Socket, nat_table: IcmpNatTable, sender: UnboundedSender<ZCPacket>) { | ||||
| fn socket_recv_loop( | ||||
|     socket: Arc<Socket>, | ||||
|     nat_table: IcmpNatTable, | ||||
|     sender: UnboundedSender<ZCPacket>, | ||||
| ) { | ||||
|     let mut buf = [0u8; 8192]; | ||||
|     let data: &mut [MaybeUninit<u8>] = unsafe { std::mem::transmute(&mut buf[..]) }; | ||||
|  | ||||
|     loop { | ||||
|         let Ok((len, peer_ip)) = socket_recv(&socket, data) else { | ||||
|             continue; | ||||
|         let (len, peer_ip) = match socket_recv(&socket, data) { | ||||
|             Ok((len, peer_ip)) => (len, peer_ip), | ||||
|             Err(e) => { | ||||
|                 tracing::error!("recv icmp packet failed: {:?}", e); | ||||
|                 if sender.is_closed() { | ||||
|                     break; | ||||
|                 } else { | ||||
|                     continue; | ||||
|                 } | ||||
|             } | ||||
|         }; | ||||
|  | ||||
|         if len <= 0 { | ||||
|             tracing::error!("recv empty packet, len: {}", len); | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         if !peer_ip.is_ipv4() { | ||||
|             continue; | ||||
|         } | ||||
| @@ -114,7 +141,7 @@ fn socket_recv_loop(socket: Socket, nat_table: IcmpNatTable, sender: UnboundedSe | ||||
|         } | ||||
|  | ||||
|         let key = IcmpNatKey { | ||||
|             dst_ip: peer_ip, | ||||
|             real_dst_ip: peer_ip, | ||||
|             icmp_id: icmp_packet.get_identifier(), | ||||
|             icmp_seq: icmp_packet.get_sequence_number(), | ||||
|         }; | ||||
| @@ -128,12 +155,11 @@ fn socket_recv_loop(socket: Socket, nat_table: IcmpNatTable, sender: UnboundedSe | ||||
|             continue; | ||||
|         }; | ||||
|  | ||||
|         let src_v4 = ipv4_packet.get_source(); | ||||
|         let payload_len = len - ipv4_packet.get_header_length() as usize * 4; | ||||
|         let id = ipv4_packet.get_identification(); | ||||
|         let _ = compose_ipv4_packet( | ||||
|             &mut buf[..], | ||||
|             &src_v4, | ||||
|             &v.mapped_dst_ip, | ||||
|             &dest_ip, | ||||
|             IpNextHeaderProtocols::Icmp, | ||||
|             payload_len, | ||||
| @@ -176,7 +202,7 @@ impl IcmpProxy { | ||||
|         let cidr_set = CidrSet::new(global_ctx.clone()); | ||||
|         let ret = Self { | ||||
|             global_ctx, | ||||
|             peer_manager, | ||||
|             peer_manager: Arc::downgrade(&peer_manager), | ||||
|             cidr_set, | ||||
|             socket: std::sync::Mutex::new(None), | ||||
|  | ||||
| @@ -208,7 +234,7 @@ impl IcmpProxy { | ||||
|         let socket = self.create_raw_socket(); | ||||
|         match socket { | ||||
|             Ok(socket) => { | ||||
|                 self.socket.lock().unwrap().replace(socket); | ||||
|                 self.socket.lock().unwrap().replace(Arc::new(socket)); | ||||
|             } | ||||
|             Err(e) => { | ||||
|                 tracing::warn!("create icmp socket failed: {:?}", e); | ||||
| @@ -241,7 +267,7 @@ impl IcmpProxy { | ||||
|         let (sender, mut receiver) = tokio::sync::mpsc::unbounded_channel(); | ||||
|         self.icmp_sender.lock().unwrap().replace(sender.clone()); | ||||
|         if let Some(socket) = self.socket.lock().unwrap().as_ref() { | ||||
|             let socket = socket.try_clone()?; | ||||
|             let socket = socket.clone(); | ||||
|             let nat_table = self.nat_table.clone(); | ||||
|             thread::spawn(|| { | ||||
|                 socket_recv_loop(socket, nat_table, sender); | ||||
| @@ -254,7 +280,11 @@ impl IcmpProxy { | ||||
|                 while let Some(msg) = receiver.recv().await { | ||||
|                     let hdr = msg.peer_manager_header().unwrap(); | ||||
|                     let to_peer_id = hdr.to_peer_id.into(); | ||||
|                     let ret = peer_manager.send_msg(msg, to_peer_id).await; | ||||
|                     let Some(pm) = peer_manager.upgrade() else { | ||||
|                         tracing::warn!("peer manager is gone, icmp proxy send loop exit"); | ||||
|                         return; | ||||
|                     }; | ||||
|                     let ret = pm.send_msg(msg, to_peer_id).await; | ||||
|                     if ret.is_err() { | ||||
|                         tracing::error!("send icmp packet to peer failed: {:?}", ret); | ||||
|                     } | ||||
| @@ -271,9 +301,12 @@ impl IcmpProxy { | ||||
|             } | ||||
|         }); | ||||
|  | ||||
|         self.peer_manager | ||||
|             .add_packet_process_pipeline(Box::new(self.clone())) | ||||
|             .await; | ||||
|         let Some(pm) = self.peer_manager.upgrade() else { | ||||
|             tracing::warn!("peer manager is gone, icmp proxy init failed"); | ||||
|             return Err(anyhow::anyhow!("peer manager is gone").into()); | ||||
|         }; | ||||
|  | ||||
|         pm.add_packet_process_pipeline(Box::new(self.clone())).await; | ||||
|         Ok(()) | ||||
|     } | ||||
|  | ||||
| @@ -361,7 +394,11 @@ impl IcmpProxy { | ||||
|             return None; | ||||
|         } | ||||
|  | ||||
|         if !self.cidr_set.contains_v4(ipv4.get_destination()) | ||||
|         let mut real_dst_ip = ipv4.get_destination(); | ||||
|  | ||||
|         if !self | ||||
|             .cidr_set | ||||
|             .contains_v4(ipv4.get_destination(), &mut real_dst_ip) | ||||
|             && !is_exit_node | ||||
|             && !(self.global_ctx.no_tun() | ||||
|                 && Some(ipv4.get_destination()) | ||||
| @@ -416,7 +453,7 @@ impl IcmpProxy { | ||||
|         let icmp_seq = icmp_packet.get_sequence_number(); | ||||
|  | ||||
|         let key = IcmpNatKey { | ||||
|             dst_ip: ipv4.get_destination().into(), | ||||
|             real_dst_ip: real_dst_ip.into(), | ||||
|             icmp_id, | ||||
|             icmp_seq, | ||||
|         }; | ||||
| @@ -425,6 +462,7 @@ impl IcmpProxy { | ||||
|             hdr.from_peer_id.into(), | ||||
|             hdr.to_peer_id.into(), | ||||
|             ipv4.get_source().into(), | ||||
|             ipv4.get_destination(), | ||||
|         ) | ||||
|         .ok()?; | ||||
|  | ||||
| @@ -432,10 +470,24 @@ impl IcmpProxy { | ||||
|             tracing::info!("icmp nat table entry replaced: {:?}", old); | ||||
|         } | ||||
|  | ||||
|         if let Err(e) = self.send_icmp_packet(ipv4.get_destination(), &icmp_packet) { | ||||
|         if let Err(e) = self.send_icmp_packet(real_dst_ip, &icmp_packet) { | ||||
|             tracing::error!("send icmp packet failed: {:?}", e); | ||||
|         } | ||||
|  | ||||
|         Some(()) | ||||
|     } | ||||
| } | ||||
|  | ||||
| impl Drop for IcmpProxy { | ||||
|     fn drop(&mut self) { | ||||
|         tracing::info!( | ||||
|             "dropping icmp proxy, {:?}", | ||||
|             self.socket.lock().unwrap().as_ref() | ||||
|         ); | ||||
|         self.socket.lock().unwrap().as_ref().and_then(|s| { | ||||
|             tracing::info!("shutting down icmp socket"); | ||||
|             let _ = s.shutdown(std::net::Shutdown::Both); | ||||
|             Some(()) | ||||
|         }); | ||||
|     } | ||||
| } | ||||
|   | ||||
| @@ -45,11 +45,13 @@ impl IpPacket { | ||||
|         // make sure the fragment doesn't overlap with existing fragments | ||||
|         for f in &self.fragments { | ||||
|             if f.offset <= fragment.offset && fragment.offset < f.offset + f.data.len() as u16 { | ||||
|                 tracing::trace!("fragment overlap 1, f.offset = {}, fragment.offset = {}, f.data.len() = {}, fragment.data.len() = {}", f.offset, fragment.offset, f.data.len(), fragment.data.len()); | ||||
|                 return; | ||||
|             } | ||||
|             if fragment.offset <= f.offset | ||||
|                 && f.offset < fragment.offset + fragment.data.len() as u16 | ||||
|             { | ||||
|                 tracing::trace!("fragment overlap 2, f.offset = {}, fragment.offset = {}, f.data.len() = {}, fragment.data.len() = {}", f.offset, fragment.offset, f.data.len(), fragment.data.len()); | ||||
|                 return; | ||||
|             } | ||||
|         } | ||||
| @@ -151,6 +153,13 @@ impl IpReassembler { | ||||
|             id, | ||||
|         }; | ||||
|  | ||||
|         tracing::trace!( | ||||
|             ?key, | ||||
|             "add fragment, offset = {}, total_length = {}", | ||||
|             fragment.offset, | ||||
|             total_length | ||||
|         ); | ||||
|  | ||||
|         let mut entry = self.packets.entry(key.clone()).or_insert_with(|| { | ||||
|             let packet = IpPacket::new(source, destination); | ||||
|             let timestamp = Instant::now(); | ||||
|   | ||||
| @@ -20,7 +20,7 @@ use pnet::packet::{ | ||||
|     Packet as _, | ||||
| }; | ||||
| use prost::Message; | ||||
| use tokio::{io::copy_bidirectional, task::JoinSet}; | ||||
| use tokio::{io::copy_bidirectional, select, task::JoinSet}; | ||||
|  | ||||
| use super::{ | ||||
|     tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy}, | ||||
| @@ -106,8 +106,8 @@ async fn handle_kcp_output( | ||||
|  | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct NatDstKcpConnector { | ||||
|     kcp_endpoint: Arc<KcpEndpoint>, | ||||
|     peer_mgr: Arc<PeerManager>, | ||||
|     pub(crate) kcp_endpoint: Arc<KcpEndpoint>, | ||||
|     pub(crate) peer_mgr: Weak<PeerManager>, | ||||
| } | ||||
|  | ||||
| #[async_trait::async_trait] | ||||
| @@ -120,35 +120,68 @@ impl NatDstConnector for NatDstKcpConnector { | ||||
|             dst: Some(nat_dst.into()), | ||||
|         }; | ||||
|  | ||||
|         let (dst_peers, _) = match nat_dst { | ||||
|             SocketAddr::V4(addr) => { | ||||
|                 let ip = addr.ip(); | ||||
|                 self.peer_mgr.get_msg_dst_peer(&ip).await | ||||
|             } | ||||
|         let Some(peer_mgr) = self.peer_mgr.upgrade() else { | ||||
|             return Err(anyhow::anyhow!("peer manager is not available").into()); | ||||
|         }; | ||||
|  | ||||
|         let dst_peer_id = match nat_dst { | ||||
|             SocketAddr::V4(addr) => peer_mgr.get_peer_map().get_peer_id_by_ipv4(addr.ip()).await, | ||||
|             SocketAddr::V6(_) => return Err(anyhow::anyhow!("ipv6 is not supported").into()), | ||||
|         }; | ||||
|  | ||||
|         tracing::trace!("kcp nat dst: {:?}, dst peers: {:?}", nat_dst, dst_peers); | ||||
|         let Some(dst_peer) = dst_peer_id else { | ||||
|             return Err(anyhow::anyhow!("no peer found for nat dst: {}", nat_dst).into()); | ||||
|         }; | ||||
|  | ||||
|         if dst_peers.len() != 1 { | ||||
|             return Err(anyhow::anyhow!("no dst peer found for nat dst: {}", nat_dst).into()); | ||||
|         tracing::trace!("kcp nat dst: {:?}, dst peers: {:?}", nat_dst, dst_peer); | ||||
|  | ||||
|         let mut connect_tasks: JoinSet<std::result::Result<ConnId, anyhow::Error>> = JoinSet::new(); | ||||
|         let mut retry_remain = 5; | ||||
|         loop { | ||||
|             select! { | ||||
|                 Some(Ok(Ok(ret))) = connect_tasks.join_next() => { | ||||
|                     // just wait for the previous connection to finish | ||||
|                     let stream = KcpStream::new(&self.kcp_endpoint, ret) | ||||
|                         .ok_or(anyhow::anyhow!("failed to create kcp stream"))?; | ||||
|                     return Ok(stream); | ||||
|                 } | ||||
|                 _ = tokio::time::sleep(Duration::from_millis(200)), if !connect_tasks.is_empty() && retry_remain > 0 => { | ||||
|                     // no successful connection yet, trigger another connection attempt | ||||
|                 } | ||||
|                 else => { | ||||
|                     // got error in connect_tasks, continue to retry | ||||
|                     if retry_remain == 0 && connect_tasks.is_empty() { | ||||
|                         break; | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|  | ||||
|             // create a new connection task | ||||
|             if retry_remain == 0 { | ||||
|                 continue; | ||||
|             } | ||||
|             retry_remain -= 1; | ||||
|  | ||||
|             let kcp_endpoint = self.kcp_endpoint.clone(); | ||||
|             let my_peer_id = peer_mgr.my_peer_id(); | ||||
|             let conn_data_clone = conn_data.clone(); | ||||
|  | ||||
|             connect_tasks.spawn(async move { | ||||
|                 kcp_endpoint | ||||
|                     .connect( | ||||
|                         Duration::from_secs(10), | ||||
|                         my_peer_id, | ||||
|                         dst_peer, | ||||
|                         Bytes::from(conn_data_clone.encode_to_vec()), | ||||
|                     ) | ||||
|                     .await | ||||
|                     .with_context(|| { | ||||
|                         format!("failed to connect to nat dst: {}", nat_dst.to_string()) | ||||
|                     }) | ||||
|             }); | ||||
|         } | ||||
|  | ||||
|         let ret = self | ||||
|             .kcp_endpoint | ||||
|             .connect( | ||||
|                 Duration::from_secs(10), | ||||
|                 self.peer_mgr.my_peer_id(), | ||||
|                 dst_peers[0], | ||||
|                 Bytes::from(conn_data.encode_to_vec()), | ||||
|             ) | ||||
|             .await | ||||
|             .with_context(|| format!("failed to connect to nat dst: {}", nat_dst.to_string()))?; | ||||
|  | ||||
|         let stream = KcpStream::new(&self.kcp_endpoint, ret) | ||||
|             .ok_or(anyhow::anyhow!("failed to create kcp stream"))?; | ||||
|  | ||||
|         Ok(stream) | ||||
|         Err(anyhow::anyhow!("failed to connect to nat dst: {}", nat_dst).into()) | ||||
|     } | ||||
|  | ||||
|     fn check_packet_from_peer_fast(&self, _cidr_set: &CidrSet, _global_ctx: &GlobalCtx) -> bool { | ||||
| @@ -161,8 +194,9 @@ impl NatDstConnector for NatDstKcpConnector { | ||||
|         _global_ctx: &GlobalCtx, | ||||
|         hdr: &PeerManagerHeader, | ||||
|         _ipv4: &Ipv4Packet, | ||||
|         _real_dst_ip: &mut Ipv4Addr, | ||||
|     ) -> bool { | ||||
|         return hdr.from_peer_id == hdr.to_peer_id; | ||||
|         return hdr.from_peer_id == hdr.to_peer_id && hdr.is_kcp_src_modified(); | ||||
|     } | ||||
|  | ||||
|     fn transport_type(&self) -> TcpProxyEntryTransportType { | ||||
| @@ -173,32 +207,41 @@ impl NatDstConnector for NatDstKcpConnector { | ||||
| #[derive(Clone)] | ||||
| struct TcpProxyForKcpSrc(Arc<TcpProxy<NatDstKcpConnector>>); | ||||
|  | ||||
| pub struct KcpProxySrc { | ||||
|     kcp_endpoint: Arc<KcpEndpoint>, | ||||
|     peer_manager: Arc<PeerManager>, | ||||
|  | ||||
|     tcp_proxy: TcpProxyForKcpSrc, | ||||
|     tasks: JoinSet<()>, | ||||
| #[async_trait::async_trait] | ||||
| pub(crate) trait TcpProxyForKcpSrcTrait: Send + Sync + 'static { | ||||
|     type Connector: NatDstConnector; | ||||
|     fn get_tcp_proxy(&self) -> &Arc<TcpProxy<Self::Connector>>; | ||||
|     async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool; | ||||
| } | ||||
|  | ||||
| impl TcpProxyForKcpSrc { | ||||
| #[async_trait::async_trait] | ||||
| impl TcpProxyForKcpSrcTrait for TcpProxyForKcpSrc { | ||||
|     type Connector = NatDstKcpConnector; | ||||
|  | ||||
|     fn get_tcp_proxy(&self) -> &Arc<TcpProxy<Self::Connector>> { | ||||
|         &self.0 | ||||
|     } | ||||
|  | ||||
|     async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool { | ||||
|         let peer_map: Arc<crate::peers::peer_map::PeerMap> = | ||||
|             self.0.get_peer_manager().get_peer_map(); | ||||
|         let Some(dst_peer_id) = peer_map.get_peer_id_by_ipv4(dst_ip).await else { | ||||
|             return false; | ||||
|         }; | ||||
|         let Some(feature_flag) = peer_map.get_peer_feature_flag(dst_peer_id).await else { | ||||
|         let Some(peer_info) = peer_map.get_route_peer_info(dst_peer_id).await else { | ||||
|             return false; | ||||
|         }; | ||||
|         feature_flag.kcp_input | ||||
|         peer_info.feature_flag.map(|x| x.kcp_input).unwrap_or(false) | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[async_trait::async_trait] | ||||
| impl NicPacketFilter for TcpProxyForKcpSrc { | ||||
| impl<C: NatDstConnector, T: TcpProxyForKcpSrcTrait<Connector = C>> NicPacketFilter for T { | ||||
|     async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) -> bool { | ||||
|         let ret = self.0.try_process_packet_from_nic(zc_packet).await; | ||||
|         let ret = self | ||||
|             .get_tcp_proxy() | ||||
|             .try_process_packet_from_nic(zc_packet) | ||||
|             .await; | ||||
|         if ret { | ||||
|             return true; | ||||
|         } | ||||
| @@ -225,29 +268,45 @@ impl NicPacketFilter for TcpProxyForKcpSrc { | ||||
|             } | ||||
|         } else { | ||||
|             // if not syn packet, only allow established connection | ||||
|             if !self.0.is_tcp_proxy_connection(SocketAddr::new( | ||||
|                 IpAddr::V4(ip_packet.get_source()), | ||||
|                 tcp_packet.get_source(), | ||||
|             )) { | ||||
|             if !self | ||||
|                 .get_tcp_proxy() | ||||
|                 .is_tcp_proxy_connection(SocketAddr::new( | ||||
|                     IpAddr::V4(ip_packet.get_source()), | ||||
|                     tcp_packet.get_source(), | ||||
|                 )) | ||||
|             { | ||||
|                 return false; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         if let Some(my_ipv4) = self.0.get_global_ctx().get_ipv4() { | ||||
|         if let Some(my_ipv4) = self.get_tcp_proxy().get_global_ctx().get_ipv4() { | ||||
|             // this is a net-to-net packet, only allow it when smoltcp is enabled | ||||
|             // because the syn-ack packet will not be through and handled by the tun device when | ||||
|             // the source ip is in the local network | ||||
|             if ip_packet.get_source() != my_ipv4.address() && !self.0.is_smoltcp_enabled() { | ||||
|             if ip_packet.get_source() != my_ipv4.address() | ||||
|                 && !self.get_tcp_proxy().is_smoltcp_enabled() | ||||
|             { | ||||
|                 return false; | ||||
|             } | ||||
|         }; | ||||
|  | ||||
|         zc_packet.mut_peer_manager_header().unwrap().to_peer_id = self.0.get_my_peer_id().into(); | ||||
|  | ||||
|         let hdr = zc_packet.mut_peer_manager_header().unwrap(); | ||||
|         hdr.to_peer_id = self.get_tcp_proxy().get_my_peer_id().into(); | ||||
|         if self.get_tcp_proxy().get_transport_type() == TcpProxyEntryTransportType::Kcp { | ||||
|             hdr.set_kcp_src_modified(true); | ||||
|         } | ||||
|         true | ||||
|     } | ||||
| } | ||||
|  | ||||
| pub struct KcpProxySrc { | ||||
|     kcp_endpoint: Arc<KcpEndpoint>, | ||||
|     peer_manager: Arc<PeerManager>, | ||||
|  | ||||
|     tcp_proxy: TcpProxyForKcpSrc, | ||||
|     tasks: JoinSet<()>, | ||||
| } | ||||
|  | ||||
| impl KcpProxySrc { | ||||
|     pub async fn new(peer_manager: Arc<PeerManager>) -> Self { | ||||
|         let mut kcp_endpoint = create_kcp_endpoint(); | ||||
| @@ -268,7 +327,7 @@ impl KcpProxySrc { | ||||
|             peer_manager.clone(), | ||||
|             NatDstKcpConnector { | ||||
|                 kcp_endpoint: kcp_endpoint.clone(), | ||||
|                 peer_mgr: peer_manager.clone(), | ||||
|                 peer_mgr: Arc::downgrade(&peer_manager), | ||||
|             }, | ||||
|         ); | ||||
|  | ||||
| @@ -299,12 +358,17 @@ impl KcpProxySrc { | ||||
|     pub fn get_tcp_proxy(&self) -> Arc<TcpProxy<NatDstKcpConnector>> { | ||||
|         self.tcp_proxy.0.clone() | ||||
|     } | ||||
|  | ||||
|     pub fn get_kcp_endpoint(&self) -> Arc<KcpEndpoint> { | ||||
|         self.kcp_endpoint.clone() | ||||
|     } | ||||
| } | ||||
|  | ||||
| pub struct KcpProxyDst { | ||||
|     kcp_endpoint: Arc<KcpEndpoint>, | ||||
|     peer_manager: Arc<PeerManager>, | ||||
|     proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>, | ||||
|     cidr_set: Arc<CidrSet>, | ||||
|     tasks: JoinSet<()>, | ||||
| } | ||||
|  | ||||
| @@ -320,11 +384,12 @@ impl KcpProxyDst { | ||||
|             output_receiver, | ||||
|             false, | ||||
|         )); | ||||
|  | ||||
|         let cidr_set = CidrSet::new(peer_manager.get_global_ctx()); | ||||
|         Self { | ||||
|             kcp_endpoint: Arc::new(kcp_endpoint), | ||||
|             peer_manager, | ||||
|             proxy_entries: Arc::new(DashMap::new()), | ||||
|             cidr_set: Arc::new(cidr_set), | ||||
|             tasks, | ||||
|         } | ||||
|     } | ||||
| @@ -334,6 +399,7 @@ impl KcpProxyDst { | ||||
|         mut kcp_stream: KcpStream, | ||||
|         global_ctx: ArcGlobalCtx, | ||||
|         proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>, | ||||
|         cidr_set: Arc<CidrSet>, | ||||
|     ) -> Result<()> { | ||||
|         let mut conn_data = kcp_stream.conn_data().clone(); | ||||
|         let parsed_conn_data = KcpConnData::decode(&mut conn_data) | ||||
| @@ -346,6 +412,16 @@ impl KcpProxyDst { | ||||
|             ))? | ||||
|             .into(); | ||||
|  | ||||
|         match dst_socket.ip() { | ||||
|             IpAddr::V4(dst_v4_ip) => { | ||||
|                 let mut real_ip = dst_v4_ip; | ||||
|                 if cidr_set.contains_v4(dst_v4_ip, &mut real_ip) { | ||||
|                     dst_socket.set_ip(real_ip.into()); | ||||
|                 } | ||||
|             } | ||||
|             _ => {} | ||||
|         }; | ||||
|  | ||||
|         let conn_id = kcp_stream.conn_id(); | ||||
|         proxy_entries.insert( | ||||
|             conn_id, | ||||
| @@ -387,6 +463,7 @@ impl KcpProxyDst { | ||||
|         let kcp_endpoint = self.kcp_endpoint.clone(); | ||||
|         let global_ctx = self.peer_manager.get_global_ctx().clone(); | ||||
|         let proxy_entries = self.proxy_entries.clone(); | ||||
|         let cidr_set = self.cidr_set.clone(); | ||||
|         self.tasks.spawn(async move { | ||||
|             while let Ok(conn) = kcp_endpoint.accept().await { | ||||
|                 let stream = KcpStream::new(&kcp_endpoint, conn) | ||||
| @@ -395,8 +472,10 @@ impl KcpProxyDst { | ||||
|  | ||||
|                 let global_ctx = global_ctx.clone(); | ||||
|                 let proxy_entries = proxy_entries.clone(); | ||||
|                 let cidr_set = cidr_set.clone(); | ||||
|                 tokio::spawn(async move { | ||||
|                     let _ = Self::handle_one_in_stream(stream, global_ctx, proxy_entries).await; | ||||
|                     let _ = Self::handle_one_in_stream(stream, global_ctx, proxy_entries, cidr_set) | ||||
|                         .await; | ||||
|                 }); | ||||
|             } | ||||
|         }); | ||||
|   | ||||
| @@ -1,3 +1,4 @@ | ||||
| use dashmap::DashMap; | ||||
| use std::sync::{Arc, Mutex}; | ||||
| use tokio::task::JoinSet; | ||||
|  | ||||
| @@ -17,11 +18,15 @@ pub mod socks5; | ||||
|  | ||||
| pub mod kcp_proxy; | ||||
|  | ||||
| pub mod quic_proxy; | ||||
|  | ||||
| #[derive(Debug)] | ||||
| pub(crate) struct CidrSet { | ||||
|     global_ctx: ArcGlobalCtx, | ||||
|     cidr_set: Arc<Mutex<Vec<cidr::IpCidr>>>, | ||||
|     cidr_set: Arc<Mutex<Vec<cidr::Ipv4Cidr>>>, | ||||
|     tasks: JoinSet<()>, | ||||
|  | ||||
|     mapped_to_real: Arc<DashMap<cidr::Ipv4Cidr, cidr::Ipv4Cidr>>, | ||||
| } | ||||
|  | ||||
| impl CidrSet { | ||||
| @@ -30,6 +35,8 @@ impl CidrSet { | ||||
|             global_ctx, | ||||
|             cidr_set: Arc::new(Mutex::new(vec![])), | ||||
|             tasks: JoinSet::new(), | ||||
|  | ||||
|             mapped_to_real: Arc::new(DashMap::new()), | ||||
|         }; | ||||
|         ret.run_cidr_updater(); | ||||
|         ret | ||||
| @@ -38,15 +45,23 @@ impl CidrSet { | ||||
|     fn run_cidr_updater(&mut self) { | ||||
|         let global_ctx = self.global_ctx.clone(); | ||||
|         let cidr_set = self.cidr_set.clone(); | ||||
|         let mapped_to_real = self.mapped_to_real.clone(); | ||||
|         self.tasks.spawn(async move { | ||||
|             let mut last_cidrs = vec![]; | ||||
|             loop { | ||||
|                 let cidrs = global_ctx.get_proxy_cidrs(); | ||||
|                 let cidrs = global_ctx.config.get_proxy_cidrs(); | ||||
|                 if cidrs != last_cidrs { | ||||
|                     last_cidrs = cidrs.clone(); | ||||
|                     mapped_to_real.clear(); | ||||
|                     cidr_set.lock().unwrap().clear(); | ||||
|                     for cidr in cidrs.iter() { | ||||
|                         cidr_set.lock().unwrap().push(cidr.clone()); | ||||
|                         let real_cidr = cidr.cidr; | ||||
|                         let mapped = cidr.mapped_cidr.unwrap_or(real_cidr.clone()); | ||||
|                         cidr_set.lock().unwrap().push(mapped.clone()); | ||||
|  | ||||
|                         if mapped != real_cidr { | ||||
|                             mapped_to_real.insert(mapped.clone(), real_cidr.clone()); | ||||
|                         } | ||||
|                     } | ||||
|                 } | ||||
|                 tokio::time::sleep(std::time::Duration::from_secs(1)).await; | ||||
| @@ -54,11 +69,23 @@ impl CidrSet { | ||||
|         }); | ||||
|     } | ||||
|  | ||||
|     pub fn contains_v4(&self, ip: std::net::Ipv4Addr) -> bool { | ||||
|         let ip = ip.into(); | ||||
|     pub fn contains_v4(&self, ipv4: std::net::Ipv4Addr, real_ip: &mut std::net::Ipv4Addr) -> bool { | ||||
|         let ip = ipv4.into(); | ||||
|         let s = self.cidr_set.lock().unwrap(); | ||||
|         for cidr in s.iter() { | ||||
|             if cidr.contains(&ip) { | ||||
|                 if let Some(real_cidr) = self.mapped_to_real.get(&cidr).map(|v| v.value().clone()) { | ||||
|                     let origin_network_bits = real_cidr.first().address().to_bits(); | ||||
|                     let network_mask = cidr.mask().to_bits(); | ||||
|  | ||||
|                     let mut converted_ip = ipv4.to_bits(); | ||||
|                     converted_ip &= !network_mask; | ||||
|                     converted_ip |= origin_network_bits; | ||||
|  | ||||
|                     *real_ip = std::net::Ipv4Addr::from(converted_ip); | ||||
|                 } else { | ||||
|                     *real_ip = ipv4; | ||||
|                 } | ||||
|                 return true; | ||||
|             } | ||||
|         } | ||||
|   | ||||
							
								
								
									
										443
									
								
								easytier/src/gateway/quic_proxy.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										443
									
								
								easytier/src/gateway/quic_proxy.rs
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,443 @@ | ||||
| use std::net::{IpAddr, Ipv4Addr}; | ||||
| use std::sync::{Arc, Mutex, Weak}; | ||||
| use std::{net::SocketAddr, pin::Pin}; | ||||
|  | ||||
| use anyhow::Context; | ||||
| use dashmap::DashMap; | ||||
| use pnet::packet::ipv4::Ipv4Packet; | ||||
| use prost::Message as _; | ||||
| use quinn::{Endpoint, Incoming}; | ||||
| use tokio::io::{copy_bidirectional, AsyncRead, AsyncReadExt, AsyncWrite}; | ||||
| use tokio::net::TcpStream; | ||||
| use tokio::task::JoinSet; | ||||
| use tokio::time::timeout; | ||||
|  | ||||
| use crate::common::error::Result; | ||||
| use crate::common::global_ctx::{ArcGlobalCtx, GlobalCtx}; | ||||
| use crate::common::join_joinset_background; | ||||
| use crate::defer; | ||||
| use crate::gateway::kcp_proxy::TcpProxyForKcpSrcTrait; | ||||
| use crate::gateway::tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy}; | ||||
| use crate::gateway::CidrSet; | ||||
| use crate::peers::peer_manager::PeerManager; | ||||
| use crate::proto::cli::{ | ||||
|     ListTcpProxyEntryRequest, ListTcpProxyEntryResponse, TcpProxyEntry, TcpProxyEntryState, | ||||
|     TcpProxyEntryTransportType, TcpProxyRpc, | ||||
| }; | ||||
| use crate::proto::common::ProxyDstInfo; | ||||
| use crate::proto::rpc_types; | ||||
| use crate::proto::rpc_types::controller::BaseController; | ||||
| use crate::tunnel::packet_def::PeerManagerHeader; | ||||
| use crate::tunnel::quic::{configure_client, make_server_endpoint}; | ||||
|  | ||||
| pub struct QUICStream { | ||||
|     endpoint: Option<quinn::Endpoint>, | ||||
|     connection: Option<quinn::Connection>, | ||||
|     sender: quinn::SendStream, | ||||
|     receiver: quinn::RecvStream, | ||||
| } | ||||
|  | ||||
| impl AsyncRead for QUICStream { | ||||
|     fn poll_read( | ||||
|         self: std::pin::Pin<&mut Self>, | ||||
|         cx: &mut std::task::Context<'_>, | ||||
|         buf: &mut tokio::io::ReadBuf<'_>, | ||||
|     ) -> std::task::Poll<std::io::Result<()>> { | ||||
|         let this = self.get_mut(); | ||||
|         Pin::new(&mut this.receiver).poll_read(cx, buf) | ||||
|     } | ||||
| } | ||||
|  | ||||
| impl AsyncWrite for QUICStream { | ||||
|     fn poll_write( | ||||
|         self: std::pin::Pin<&mut Self>, | ||||
|         cx: &mut std::task::Context<'_>, | ||||
|         buf: &[u8], | ||||
|     ) -> std::task::Poll<std::io::Result<usize>> { | ||||
|         let this = self.get_mut(); | ||||
|         AsyncWrite::poll_write(Pin::new(&mut this.sender), cx, buf) | ||||
|     } | ||||
|  | ||||
|     fn poll_flush( | ||||
|         self: std::pin::Pin<&mut Self>, | ||||
|         cx: &mut std::task::Context<'_>, | ||||
|     ) -> std::task::Poll<std::io::Result<()>> { | ||||
|         let this = self.get_mut(); | ||||
|         Pin::new(&mut this.sender).poll_flush(cx) | ||||
|     } | ||||
|  | ||||
|     fn poll_shutdown( | ||||
|         self: std::pin::Pin<&mut Self>, | ||||
|         cx: &mut std::task::Context<'_>, | ||||
|     ) -> std::task::Poll<std::io::Result<()>> { | ||||
|         let this = self.get_mut(); | ||||
|         Pin::new(&mut this.sender).poll_shutdown(cx) | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[derive(Debug, Clone)] | ||||
| pub struct NatDstQUICConnector { | ||||
|     pub(crate) peer_mgr: Weak<PeerManager>, | ||||
| } | ||||
|  | ||||
| #[async_trait::async_trait] | ||||
| impl NatDstConnector for NatDstQUICConnector { | ||||
|     type DstStream = QUICStream; | ||||
|  | ||||
|     #[tracing::instrument(skip(self), level = "debug", name = "NatDstQUICConnector::connect")] | ||||
|     async fn connect(&self, src: SocketAddr, nat_dst: SocketAddr) -> Result<Self::DstStream> { | ||||
|         let Some(peer_mgr) = self.peer_mgr.upgrade() else { | ||||
|             return Err(anyhow::anyhow!("peer manager is not available").into()); | ||||
|         }; | ||||
|  | ||||
|         let IpAddr::V4(dst_ipv4) = nat_dst.ip() else { | ||||
|             return Err(anyhow::anyhow!("src must be an IPv4 address").into()); | ||||
|         }; | ||||
|  | ||||
|         let Some(dst_peer) = peer_mgr.get_peer_map().get_peer_id_by_ipv4(&dst_ipv4).await else { | ||||
|             return Err(anyhow::anyhow!("no peer found for dst: {}", nat_dst).into()); | ||||
|         }; | ||||
|  | ||||
|         let Some(dst_peer_info) = peer_mgr.get_peer_map().get_route_peer_info(dst_peer).await | ||||
|         else { | ||||
|             return Err(anyhow::anyhow!("no peer info found for dst peer: {}", dst_peer).into()); | ||||
|         }; | ||||
|  | ||||
|         let Some(dst_ipv4): Option<Ipv4Addr> = dst_peer_info.ipv4_addr.map(Into::into) else { | ||||
|             return Err(anyhow::anyhow!("no ipv4 found for dst peer: {}", dst_peer).into()); | ||||
|         }; | ||||
|  | ||||
|         let Some(quic_port) = dst_peer_info.quic_port else { | ||||
|             return Err(anyhow::anyhow!("no quic port found for dst peer: {}", dst_peer).into()); | ||||
|         }; | ||||
|  | ||||
|         let mut endpoint = Endpoint::client("0.0.0.0:0".parse().unwrap()) | ||||
|             .with_context(|| format!("failed to create QUIC endpoint for src: {}", src))?; | ||||
|         endpoint.set_default_client_config(configure_client()); | ||||
|  | ||||
|         // connect to server | ||||
|         let connection = { | ||||
|             let _g = peer_mgr.get_global_ctx().net_ns.guard(); | ||||
|             endpoint | ||||
|                 .connect( | ||||
|                     SocketAddr::new(dst_ipv4.into(), quic_port as u16), | ||||
|                     "localhost", | ||||
|                 ) | ||||
|                 .unwrap() | ||||
|                 .await | ||||
|                 .with_context(|| { | ||||
|                     format!( | ||||
|                         "failed to connect to NAT destination {} from {}, real dst: {}", | ||||
|                         nat_dst, src, dst_ipv4 | ||||
|                     ) | ||||
|                 })? | ||||
|         }; | ||||
|  | ||||
|         let (mut w, r) = connection | ||||
|             .open_bi() | ||||
|             .await | ||||
|             .with_context(|| "open_bi failed")?; | ||||
|  | ||||
|         let proxy_dst_info = ProxyDstInfo { | ||||
|             dst_addr: Some(nat_dst.into()), | ||||
|         }; | ||||
|         let proxy_dst_info_buf = proxy_dst_info.encode_to_vec(); | ||||
|         let buf_len = proxy_dst_info_buf.len() as u8; | ||||
|         w.write(&buf_len.to_le_bytes()) | ||||
|             .await | ||||
|             .with_context(|| "failed to write proxy dst info buf len to QUIC stream")?; | ||||
|         w.write(&proxy_dst_info_buf) | ||||
|             .await | ||||
|             .with_context(|| "failed to write proxy dst info to QUIC stream")?; | ||||
|  | ||||
|         Ok(QUICStream { | ||||
|             endpoint: Some(endpoint), | ||||
|             connection: Some(connection), | ||||
|             sender: w, | ||||
|             receiver: r, | ||||
|         }) | ||||
|     } | ||||
|  | ||||
|     fn check_packet_from_peer_fast(&self, _cidr_set: &CidrSet, _global_ctx: &GlobalCtx) -> bool { | ||||
|         true | ||||
|     } | ||||
|  | ||||
|     fn check_packet_from_peer( | ||||
|         &self, | ||||
|         _cidr_set: &CidrSet, | ||||
|         _global_ctx: &GlobalCtx, | ||||
|         hdr: &PeerManagerHeader, | ||||
|         _ipv4: &Ipv4Packet, | ||||
|         _real_dst_ip: &mut Ipv4Addr, | ||||
|     ) -> bool { | ||||
|         return hdr.from_peer_id == hdr.to_peer_id && !hdr.is_kcp_src_modified(); | ||||
|     } | ||||
|  | ||||
|     fn transport_type(&self) -> TcpProxyEntryTransportType { | ||||
|         TcpProxyEntryTransportType::Quic | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[derive(Clone)] | ||||
| struct TcpProxyForQUICSrc(Arc<TcpProxy<NatDstQUICConnector>>); | ||||
|  | ||||
| #[async_trait::async_trait] | ||||
| impl TcpProxyForKcpSrcTrait for TcpProxyForQUICSrc { | ||||
|     type Connector = NatDstQUICConnector; | ||||
|  | ||||
|     fn get_tcp_proxy(&self) -> &Arc<TcpProxy<Self::Connector>> { | ||||
|         &self.0 | ||||
|     } | ||||
|  | ||||
|     async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool { | ||||
|         let peer_map: Arc<crate::peers::peer_map::PeerMap> = | ||||
|             self.0.get_peer_manager().get_peer_map(); | ||||
|         let Some(dst_peer_id) = peer_map.get_peer_id_by_ipv4(dst_ip).await else { | ||||
|             return false; | ||||
|         }; | ||||
|         let Some(peer_info) = peer_map.get_route_peer_info(dst_peer_id).await else { | ||||
|             return false; | ||||
|         }; | ||||
|         let Some(quic_port) = peer_info.quic_port else { | ||||
|             return false; | ||||
|         }; | ||||
|         quic_port > 0 | ||||
|     } | ||||
| } | ||||
|  | ||||
| pub struct QUICProxySrc { | ||||
|     peer_manager: Arc<PeerManager>, | ||||
|     tcp_proxy: TcpProxyForQUICSrc, | ||||
| } | ||||
|  | ||||
| impl QUICProxySrc { | ||||
|     pub async fn new(peer_manager: Arc<PeerManager>) -> Self { | ||||
|         let tcp_proxy = TcpProxy::new( | ||||
|             peer_manager.clone(), | ||||
|             NatDstQUICConnector { | ||||
|                 peer_mgr: Arc::downgrade(&peer_manager), | ||||
|             }, | ||||
|         ); | ||||
|  | ||||
|         Self { | ||||
|             peer_manager, | ||||
|             tcp_proxy: TcpProxyForQUICSrc(tcp_proxy), | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     pub async fn start(&self) { | ||||
|         self.peer_manager | ||||
|             .add_nic_packet_process_pipeline(Box::new(self.tcp_proxy.clone())) | ||||
|             .await; | ||||
|         self.peer_manager | ||||
|             .add_packet_process_pipeline(Box::new(self.tcp_proxy.0.clone())) | ||||
|             .await; | ||||
|         self.tcp_proxy.0.start(false).await.unwrap(); | ||||
|     } | ||||
|  | ||||
|     pub fn get_tcp_proxy(&self) -> Arc<TcpProxy<NatDstQUICConnector>> { | ||||
|         self.tcp_proxy.0.clone() | ||||
|     } | ||||
| } | ||||
|  | ||||
| pub struct QUICProxyDst { | ||||
|     global_ctx: Arc<GlobalCtx>, | ||||
|     endpoint: Arc<quinn::Endpoint>, | ||||
|     proxy_entries: Arc<DashMap<SocketAddr, TcpProxyEntry>>, | ||||
|     tasks: Arc<Mutex<JoinSet<()>>>, | ||||
| } | ||||
|  | ||||
| impl QUICProxyDst { | ||||
|     pub fn new(global_ctx: ArcGlobalCtx) -> Result<Self> { | ||||
|         let _g = global_ctx.net_ns.guard(); | ||||
|         let (endpoint, _) = make_server_endpoint("0.0.0.0:0".parse().unwrap()) | ||||
|             .map_err(|e| anyhow::anyhow!("failed to create QUIC endpoint: {}", e))?; | ||||
|         let tasks = Arc::new(Mutex::new(JoinSet::new())); | ||||
|         join_joinset_background(tasks.clone(), "QUICProxyDst tasks".to_string()); | ||||
|         Ok(Self { | ||||
|             global_ctx, | ||||
|             endpoint: Arc::new(endpoint), | ||||
|             proxy_entries: Arc::new(DashMap::new()), | ||||
|             tasks, | ||||
|         }) | ||||
|     } | ||||
|  | ||||
|     pub async fn start(&self) -> Result<()> { | ||||
|         let endpoint = self.endpoint.clone(); | ||||
|         let tasks = Arc::downgrade(&self.tasks.clone()); | ||||
|         let ctx = self.global_ctx.clone(); | ||||
|         let cidr_set = Arc::new(CidrSet::new(ctx.clone())); | ||||
|         let proxy_entries = self.proxy_entries.clone(); | ||||
|  | ||||
|         let task = async move { | ||||
|             loop { | ||||
|                 match endpoint.accept().await { | ||||
|                     Some(conn) => { | ||||
|                         let Some(tasks) = tasks.upgrade() else { | ||||
|                             tracing::warn!( | ||||
|                                 "QUICProxyDst tasks is not available, stopping accept loop" | ||||
|                             ); | ||||
|                             return; | ||||
|                         }; | ||||
|                         tasks | ||||
|                             .lock() | ||||
|                             .unwrap() | ||||
|                             .spawn(Self::handle_connection_with_timeout( | ||||
|                                 conn, | ||||
|                                 ctx.clone(), | ||||
|                                 cidr_set.clone(), | ||||
|                                 proxy_entries.clone(), | ||||
|                             )); | ||||
|                     } | ||||
|                     None => { | ||||
|                         return; | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|         }; | ||||
|  | ||||
|         self.tasks.lock().unwrap().spawn(task); | ||||
|  | ||||
|         Ok(()) | ||||
|     } | ||||
|  | ||||
|     pub fn local_addr(&self) -> Result<SocketAddr> { | ||||
|         self.endpoint.local_addr().map_err(Into::into) | ||||
|     } | ||||
|  | ||||
|     async fn handle_connection_with_timeout( | ||||
|         conn: Incoming, | ||||
|         ctx: Arc<GlobalCtx>, | ||||
|         cidr_set: Arc<CidrSet>, | ||||
|         proxy_entries: Arc<DashMap<SocketAddr, TcpProxyEntry>>, | ||||
|     ) { | ||||
|         let remote_addr = conn.remote_address(); | ||||
|         defer!( | ||||
|             proxy_entries.remove(&remote_addr); | ||||
|         ); | ||||
|         let ret = timeout( | ||||
|             std::time::Duration::from_secs(10), | ||||
|             Self::handle_connection(conn, ctx, cidr_set, remote_addr, proxy_entries.clone()), | ||||
|         ) | ||||
|         .await; | ||||
|  | ||||
|         match ret { | ||||
|             Ok(Ok((mut quic_stream, mut tcp_stream))) => { | ||||
|                 let ret = copy_bidirectional(&mut quic_stream, &mut tcp_stream).await; | ||||
|                 tracing::info!( | ||||
|                     "QUIC connection handled, result: {:?}, remote addr: {:?}", | ||||
|                     ret, | ||||
|                     quic_stream.connection.as_ref().map(|c| c.remote_address()) | ||||
|                 ); | ||||
|             } | ||||
|             Ok(Err(e)) => { | ||||
|                 tracing::error!("Failed to handle QUIC connection: {}", e); | ||||
|             } | ||||
|             Err(_) => { | ||||
|                 tracing::warn!("Timeout while handling QUIC connection"); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     async fn handle_connection( | ||||
|         incoming: Incoming, | ||||
|         ctx: ArcGlobalCtx, | ||||
|         cidr_set: Arc<CidrSet>, | ||||
|         proxy_entry_key: SocketAddr, | ||||
|         proxy_entries: Arc<DashMap<SocketAddr, TcpProxyEntry>>, | ||||
|     ) -> Result<(QUICStream, TcpStream)> { | ||||
|         let conn = incoming.await.with_context(|| "accept failed")?; | ||||
|         let addr = conn.remote_address(); | ||||
|         tracing::info!("Accepted QUIC connection from {}", addr); | ||||
|         let (w, mut r) = conn.accept_bi().await.with_context(|| "accept_bi failed")?; | ||||
|         let len = r | ||||
|             .read_u8() | ||||
|             .await | ||||
|             .with_context(|| "failed to read proxy dst info buf len")?; | ||||
|         let mut buf = vec![0u8; len as usize]; | ||||
|         r.read_exact(&mut buf) | ||||
|             .await | ||||
|             .with_context(|| "failed to read proxy dst info")?; | ||||
|  | ||||
|         let proxy_dst_info = | ||||
|             ProxyDstInfo::decode(&buf[..]).with_context(|| "failed to decode proxy dst info")?; | ||||
|  | ||||
|         let dst_socket: SocketAddr = proxy_dst_info | ||||
|             .dst_addr | ||||
|             .map(Into::into) | ||||
|             .ok_or_else(|| anyhow::anyhow!("no dst addr in proxy dst info"))?; | ||||
|  | ||||
|         let SocketAddr::V4(mut dst_socket) = dst_socket else { | ||||
|             return Err(anyhow::anyhow!("NAT destination must be an IPv4 address").into()); | ||||
|         }; | ||||
|  | ||||
|         let mut real_ip = *dst_socket.ip(); | ||||
|         if cidr_set.contains_v4(*dst_socket.ip(), &mut real_ip) { | ||||
|             dst_socket.set_ip(real_ip); | ||||
|         } | ||||
|  | ||||
|         if Some(*dst_socket.ip()) == ctx.get_ipv4().map(|ip| ip.address()) && ctx.no_tun() { | ||||
|             dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap(); | ||||
|         } | ||||
|  | ||||
|         proxy_entries.insert( | ||||
|             proxy_entry_key, | ||||
|             TcpProxyEntry { | ||||
|                 src: Some(addr.into()), | ||||
|                 dst: Some(SocketAddr::V4(dst_socket).into()), | ||||
|                 start_time: chrono::Local::now().timestamp() as u64, | ||||
|                 state: TcpProxyEntryState::ConnectingDst.into(), | ||||
|                 transport_type: TcpProxyEntryTransportType::Quic.into(), | ||||
|             }, | ||||
|         ); | ||||
|  | ||||
|         let connector = NatDstTcpConnector {}; | ||||
|  | ||||
|         let dst_stream = { | ||||
|             let _g = ctx.net_ns.guard(); | ||||
|             connector | ||||
|                 .connect("0.0.0.0:0".parse().unwrap(), dst_socket.into()) | ||||
|                 .await? | ||||
|         }; | ||||
|  | ||||
|         if let Some(mut e) = proxy_entries.get_mut(&proxy_entry_key) { | ||||
|             e.state = TcpProxyEntryState::Connected.into(); | ||||
|         } | ||||
|  | ||||
|         let quic_stream = QUICStream { | ||||
|             endpoint: None, | ||||
|             connection: Some(conn), | ||||
|             sender: w, | ||||
|             receiver: r, | ||||
|         }; | ||||
|  | ||||
|         Ok((quic_stream, dst_stream)) | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[derive(Clone)] | ||||
| pub struct QUICProxyDstRpcService(Weak<DashMap<SocketAddr, TcpProxyEntry>>); | ||||
|  | ||||
| impl QUICProxyDstRpcService { | ||||
|     pub fn new(quic_proxy_dst: &QUICProxyDst) -> Self { | ||||
|         Self(Arc::downgrade(&quic_proxy_dst.proxy_entries)) | ||||
|     } | ||||
| } | ||||
|  | ||||
| #[async_trait::async_trait] | ||||
| impl TcpProxyRpc for QUICProxyDstRpcService { | ||||
|     type Controller = BaseController; | ||||
|     async fn list_tcp_proxy_entry( | ||||
|         &self, | ||||
|         _: BaseController, | ||||
|         _request: ListTcpProxyEntryRequest, // Accept request of type HelloRequest | ||||
|     ) -> std::result::Result<ListTcpProxyEntryResponse, rpc_types::error::Error> { | ||||
|         let mut reply = ListTcpProxyEntryResponse::default(); | ||||
|         if let Some(tcp_proxy) = self.0.upgrade() { | ||||
|             for item in tcp_proxy.iter() { | ||||
|                 reply.entries.push(item.value().clone()); | ||||
|             } | ||||
|         } | ||||
|         Ok(reply) | ||||
|     } | ||||
| } | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user