diff --git a/.github/update.log b/.github/update.log index 856724ba13..63192c5882 100644 --- a/.github/update.log +++ b/.github/update.log @@ -1057,3 +1057,4 @@ Update On Thu Jul 10 20:38:34 CEST 2025 Update On Fri Jul 11 20:40:18 CEST 2025 Update On Sat Jul 12 20:36:26 CEST 2025 Update On Sun Jul 13 20:36:25 CEST 2025 +Update On Mon Jul 14 14:45:36 CEST 2025 diff --git a/clash-meta/go.mod b/clash-meta/go.mod index 82b14af295..4bd156cc50 100644 --- a/clash-meta/go.mod +++ b/clash-meta/go.mod @@ -31,7 +31,7 @@ require ( github.com/metacubex/sing-shadowsocks v0.2.11-0.20250621023810-0e9ef9dd0c92 github.com/metacubex/sing-shadowsocks2 v0.2.5-0.20250621023950-93d605a2143d github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2 - github.com/metacubex/sing-tun v0.4.7-0.20250708161036-39e27fac35ed + github.com/metacubex/sing-tun v0.4.7-0.20250611091011-60774779fdd8 github.com/metacubex/sing-vmess v0.2.2 github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f github.com/metacubex/smux v0.0.0-20250503055512-501391591dee @@ -82,7 +82,6 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gobwas/httphead v0.1.0 // indirect github.com/gobwas/pool v0.2.1 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect @@ -93,7 +92,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/metacubex/gvisor v0.0.0-20250324165734-5857f47bd43b // indirect - github.com/metacubex/nftables v0.0.0-20250708020511-be959b9e8902 // indirect + github.com/metacubex/nftables v0.0.0-20250503052935-30a69ab87793 // indirect github.com/oasisprotocol/deoxysii v0.0.0-20220228165953-2091330c22b7 // indirect github.com/onsi/ginkgo/v2 v2.9.5 // indirect github.com/pierrec/lz4/v4 v4.1.14 // indirect diff --git a/clash-meta/go.sum b/clash-meta/go.sum index 8097b74bdc..f089d65209 100644 --- a/clash-meta/go.sum +++ b/clash-meta/go.sum @@ -58,8 +58,6 @@ github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs= github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0= github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -112,8 +110,8 @@ github.com/metacubex/gopacket v1.1.20-0.20230608035415-7e2f98a3e759 h1:cjd4biTvO github.com/metacubex/gopacket v1.1.20-0.20230608035415-7e2f98a3e759/go.mod h1:UHOv2xu+RIgLwpXca7TLrXleEd4oR3sPatW6IF8wU88= github.com/metacubex/gvisor v0.0.0-20250324165734-5857f47bd43b h1:RUh4OdVPz/jDrM9MQ2ySuqu2aeBqcA8rtfWUYLZ8RtI= github.com/metacubex/gvisor v0.0.0-20250324165734-5857f47bd43b/go.mod h1:8LpS0IJW1VmWzUm3ylb0e2SK5QDm5lO/2qwWLZgRpBU= -github.com/metacubex/nftables v0.0.0-20250708020511-be959b9e8902 h1:+OzINlacUgggoeiEBNtXwDzedhlijf6sYiBfavBdDac= -github.com/metacubex/nftables v0.0.0-20250708020511-be959b9e8902/go.mod h1:Hw5GBK4x+A1fhll+OQdPpbsS0YGHqxFZFxr17PFd2zg= +github.com/metacubex/nftables v0.0.0-20250503052935-30a69ab87793 h1:1Qpuy+sU3DmyX9HwI+CrBT/oLNJngvBorR2RbajJcqo= +github.com/metacubex/nftables v0.0.0-20250503052935-30a69ab87793/go.mod h1:RjRNb4G52yAgfR+Oe/kp9G4PJJ97Fnj89eY1BFO3YyA= github.com/metacubex/quic-go v0.53.1-0.20250628094454-fda5262d1d9c h1:ABQzmOaZddM3q0OYeoZEc0XF+KW+dUdPNvY/c5rsunI= github.com/metacubex/quic-go v0.53.1-0.20250628094454-fda5262d1d9c/go.mod h1:eWlAK3zsKI0P8UhYpXlIsl3mtW4D6MpMNuYLIu8CKWI= github.com/metacubex/randv2 v0.2.0 h1:uP38uBvV2SxYfLj53kuvAjbND4RUDfFJjwr4UigMiLs= @@ -131,8 +129,8 @@ github.com/metacubex/sing-shadowsocks2 v0.2.5-0.20250621023950-93d605a2143d h1:E github.com/metacubex/sing-shadowsocks2 v0.2.5-0.20250621023950-93d605a2143d/go.mod h1:+ukTd0OPFglT3bnKAYTJWYPbuox6HYNXE235r5tHdUk= github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2 h1:gXU+MYPm7Wme3/OAY2FFzVq9d9GxPHOqu5AQfg/ddhI= github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2/go.mod h1:mbfboaXauKJNIHJYxQRa+NJs4JU9NZfkA+I33dS2+9E= -github.com/metacubex/sing-tun v0.4.7-0.20250708161036-39e27fac35ed h1:biW5cGsM+953MpQhowBn7qKmIcHwtqWu7ygDH4xNJSI= -github.com/metacubex/sing-tun v0.4.7-0.20250708161036-39e27fac35ed/go.mod h1:t6aROwOw+QJxnWNTlffNAdZZGpri4JEiswoltkrWT64= +github.com/metacubex/sing-tun v0.4.7-0.20250611091011-60774779fdd8 h1:4zWKqxTx75TbfW2EmlQ3hxM6RTRg2PYOAVMCnU4I61I= +github.com/metacubex/sing-tun v0.4.7-0.20250611091011-60774779fdd8/go.mod h1:2YywXPWW8Z97kTH7RffOeykKzU+l0aiKlglWV1PAS64= github.com/metacubex/sing-vmess v0.2.2 h1:nG6GIKF1UOGmlzs+BIetdGHkFZ20YqFVIYp5Htqzp+4= github.com/metacubex/sing-vmess v0.2.2/go.mod h1:CVDNcdSLVYFgTHQlubr88d8CdqupAUDqLjROos+H9xk= github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f h1:Sr/DYKYofKHKc4GF3qkRGNuj6XA6c0eqPgEDN+VAsYU= diff --git a/clash-nyanpasu/frontend/nyanpasu/package.json b/clash-nyanpasu/frontend/nyanpasu/package.json index 553c90b0c0..dcc1d72e87 100644 --- a/clash-nyanpasu/frontend/nyanpasu/package.json +++ b/clash-nyanpasu/frontend/nyanpasu/package.json @@ -78,7 +78,7 @@ "change-case": "5.4.4", "clsx": "2.1.1", "core-js": "3.44.0", - "filesize": "10.1.6", + "filesize": "11.0.1", "meta-json-schema": "1.19.11", "monaco-yaml": "5.4.0", "nanoid": "5.1.5", diff --git a/clash-nyanpasu/manifest/version.json b/clash-nyanpasu/manifest/version.json index 2d4a5e5d1f..a361e0fe75 100644 --- a/clash-nyanpasu/manifest/version.json +++ b/clash-nyanpasu/manifest/version.json @@ -5,7 +5,7 @@ "mihomo_alpha": "alpha-fb464bb", "clash_rs": "v0.8.1", "clash_premium": "2023-09-05-gdcc8d87", - "clash_rs_alpha": "0.8.1-alpha+sha.13e4adc" + "clash_rs_alpha": "0.8.1-alpha+sha.31b7349" }, "arch_template": { "mihomo": { @@ -69,5 +69,5 @@ "linux-armv7hf": "clash-armv7-unknown-linux-gnueabihf" } }, - "updated_at": "2025-07-12T22:21:06.807Z" + "updated_at": "2025-07-13T22:21:20.442Z" } diff --git a/clash-nyanpasu/pnpm-lock.yaml b/clash-nyanpasu/pnpm-lock.yaml index b357a5ce48..bf1683ffca 100644 --- a/clash-nyanpasu/pnpm-lock.yaml +++ b/clash-nyanpasu/pnpm-lock.yaml @@ -406,8 +406,8 @@ importers: specifier: 3.44.0 version: 3.44.0 filesize: - specifier: 10.1.6 - version: 10.1.6 + specifier: 11.0.1 + version: 11.0.1 meta-json-schema: specifier: 1.19.11 version: 1.19.11 @@ -557,8 +557,8 @@ importers: specifier: 1.8.2 version: 1.8.2 filesize: - specifier: 10.1.6 - version: 10.1.6 + specifier: 11.0.1 + version: 11.0.1 p-retry: specifier: 6.2.1 version: 6.2.1 @@ -5108,8 +5108,8 @@ packages: filelist@1.0.4: resolution: {integrity: sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==} - filesize@10.1.6: - resolution: {integrity: sha512-sJslQKU2uM33qH5nqewAwVB2QgR6w1aMNsYUp3aN5rMRyXEwJGmZvaWzeJFNTOXWlHQyBFCWrdj3fV/fsTOX8w==} + filesize@11.0.1: + resolution: {integrity: sha512-ua1SLPcFgqf1lICRVqTA5d8T6kqg2ZTIm0BImnUk4pZzfAlwhqO9zKv7GCE5FGl3zIbBZZSmq7yLikFNsi5eXw==} engines: {node: '>= 10.4.0'} fill-range@7.1.1: @@ -13727,7 +13727,7 @@ snapshots: dependencies: minimatch: 5.1.6 - filesize@10.1.6: {} + filesize@11.0.1: {} fill-range@7.1.1: dependencies: diff --git a/clash-nyanpasu/scripts/package.json b/clash-nyanpasu/scripts/package.json index 334a12d4aa..00081c0216 100644 --- a/clash-nyanpasu/scripts/package.json +++ b/clash-nyanpasu/scripts/package.json @@ -7,7 +7,7 @@ "@types/figlet": "1.7.0", "@types/semver": "7.7.0", "figlet": "1.8.2", - "filesize": "10.1.6", + "filesize": "11.0.1", "p-retry": "6.2.1", "semver": "7.7.2", "zod": "3.25.76" diff --git a/clash-verge-rev/.github/workflows/dev.yml b/clash-verge-rev/.github/workflows/dev.yml index 53b771cc0f..4819828cda 100644 --- a/clash-verge-rev/.github/workflows/dev.yml +++ b/clash-verge-rev/.github/workflows/dev.yml @@ -2,12 +2,28 @@ name: Development Test on: workflow_dispatch: + inputs: + run_windows: + description: "运行 Windows" + required: false + type: boolean + default: true + run_macos_aarch64: + description: "运行 macOS aarch64" + required: false + type: boolean + default: true + run_macos_x86_64: + description: "运行 macOS x86_64" + required: false + type: boolean + default: true + permissions: write-all env: CARGO_INCREMENTAL: 0 RUST_BACKTRACE: short concurrency: - # only allow per workflow per commit (and not pr) to run at a time group: "${{ github.workflow }} - ${{ github.head_ref || github.ref }}" cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} @@ -20,46 +36,60 @@ jobs: - os: windows-latest target: x86_64-pc-windows-msvc bundle: nsis + id: windows + input: run_windows - os: macos-latest target: aarch64-apple-darwin bundle: dmg + id: macos-aarch64 + input: run_macos_aarch64 - os: macos-latest target: x86_64-apple-darwin bundle: dmg + id: macos-x86_64 + input: run_macos_x86_64 runs-on: ${{ matrix.os }} steps: - name: Checkout Repository + if: github.event.inputs[matrix.input] == 'true' uses: actions/checkout@v4 - name: Install Rust Stable + if: github.event.inputs[matrix.input] == 'true' uses: dtolnay/rust-toolchain@stable - name: Add Rust Target + if: github.event.inputs[matrix.input] == 'true' run: rustup target add ${{ matrix.target }} - name: Rust Cache + if: github.event.inputs[matrix.input] == 'true' uses: Swatinem/rust-cache@v2 with: workspaces: src-tauri save-if: false - name: Install Node + if: github.event.inputs[matrix.input] == 'true' uses: actions/setup-node@v4 with: node-version: "20" - uses: pnpm/action-setup@v4 name: Install pnpm + if: github.event.inputs[matrix.input] == 'true' with: run_install: false - name: Pnpm install and check + if: github.event.inputs[matrix.input] == 'true' run: | pnpm i pnpm run prebuild ${{ matrix.target }} - name: Tauri build + if: github.event.inputs[matrix.input] == 'true' uses: tauri-apps/tauri-action@v0 env: NODE_OPTIONS: "--max_old_space_size=4096" @@ -77,7 +107,7 @@ jobs: args: --target ${{ matrix.target }} -b ${{ matrix.bundle }} - name: Upload Artifacts - if: matrix.os == 'macos-latest' + if: matrix.os == 'macos-latest' && github.event.inputs[matrix.input] == 'true' uses: actions/upload-artifact@v4 with: name: ${{ matrix.target }} @@ -85,7 +115,7 @@ jobs: if-no-files-found: error - name: Upload Artifacts - if: matrix.os == 'windows-latest' + if: matrix.os == 'windows-latest' && github.event.inputs[matrix.input] == 'true' uses: actions/upload-artifact@v4 with: name: ${{ matrix.target }} diff --git a/clash-verge-rev/UPDATELOG.md b/clash-verge-rev/UPDATELOG.md index a70b632e9d..f7c1d5cd71 100644 --- a/clash-verge-rev/UPDATELOG.md +++ b/clash-verge-rev/UPDATELOG.md @@ -13,6 +13,7 @@ - 修复`DNS`覆写 `fallback` `proxy server` `nameserver` `direct Nameserver` 字段支持留空 - 修复`DNS`覆写 `nameserver-policy` 字段无法正确识别 `geo` 库 - 修复搜索框输入特殊字符崩溃 +- 修复 Windows 下 Start UP 名称与 exe 名称不统一 ### ✨ 新增功能 diff --git a/clash-verge-rev/src-tauri/src/utils/autostart.rs b/clash-verge-rev/src-tauri/src/utils/autostart.rs index df04b8100a..f13009727d 100644 --- a/clash-verge-rev/src-tauri/src/utils/autostart.rs +++ b/clash-verge-rev/src-tauri/src/utils/autostart.rs @@ -39,10 +39,20 @@ pub fn get_exe_path() -> Result { pub fn create_shortcut() -> Result<()> { let exe_path = get_exe_path()?; let startup_dir = get_startup_dir()?; - let shortcut_path = startup_dir.join("Clash-Verge.lnk"); + let old_shortcut_path = startup_dir.join("Clash-Verge.lnk"); + let new_shortcut_path = startup_dir.join("Clash Verge.lnk"); - // 如果快捷方式已存在,直接返回成功 - if shortcut_path.exists() { + // 移除旧的快捷方式 + if old_shortcut_path.exists() { + if let Err(e) = fs::remove_file(&old_shortcut_path) { + info!(target: "app", "移除旧快捷方式失败: {e}"); + } else { + info!(target: "app", "成功移除旧快捷方式"); + } + } + + // 如果新快捷方式已存在,直接返回成功 + if new_shortcut_path.exists() { info!(target: "app", "启动快捷方式已存在"); return Ok(()); } @@ -53,7 +63,7 @@ pub fn create_shortcut() -> Result<()> { $Shortcut = $WshShell.CreateShortcut('{}'); \ $Shortcut.TargetPath = '{}'; \ $Shortcut.Save()", - shortcut_path.to_string_lossy().replace("\\", "\\\\"), + new_shortcut_path.to_string_lossy().replace("\\", "\\\\"), exe_path.to_string_lossy().replace("\\", "\\\\") ); @@ -77,18 +87,29 @@ pub fn create_shortcut() -> Result<()> { #[cfg(target_os = "windows")] pub fn remove_shortcut() -> Result<()> { let startup_dir = get_startup_dir()?; - let shortcut_path = startup_dir.join("Clash-Verge.lnk"); + let old_shortcut_path = startup_dir.join("Clash-Verge.lnk"); + let new_shortcut_path = startup_dir.join("Clash Verge.lnk"); - // 如果快捷方式不存在,直接返回成功 - if !shortcut_path.exists() { - info!(target: "app", "启动快捷方式不存在,无需删除"); - return Ok(()); + let mut removed_any = false; + + // 删除旧的快捷方式 + if old_shortcut_path.exists() { + fs::remove_file(&old_shortcut_path).map_err(|e| anyhow!("删除旧快捷方式失败: {}", e))?; + info!(target: "app", "成功删除旧启动快捷方式"); + removed_any = true; } - // 删除快捷方式 - fs::remove_file(&shortcut_path).map_err(|e| anyhow!("删除快捷方式失败: {}", e))?; + // 删除新的快捷方式 + if new_shortcut_path.exists() { + fs::remove_file(&new_shortcut_path).map_err(|e| anyhow!("删除快捷方式失败: {}", e))?; + info!(target: "app", "成功删除启动快捷方式"); + removed_any = true; + } + + if !removed_any { + info!(target: "app", "启动快捷方式不存在,无需删除"); + } - info!(target: "app", "成功删除启动快捷方式"); Ok(()) } @@ -96,9 +117,9 @@ pub fn remove_shortcut() -> Result<()> { #[cfg(target_os = "windows")] pub fn is_shortcut_enabled() -> Result { let startup_dir = get_startup_dir()?; - let shortcut_path = startup_dir.join("Clash-Verge.lnk"); + let new_shortcut_path = startup_dir.join("Clash Verge.lnk"); - Ok(shortcut_path.exists()) + Ok(new_shortcut_path.exists()) } // 非 Windows 平台使用的空方法 diff --git a/filebrowser/CHANGELOG.md b/filebrowser/CHANGELOG.md index e2a68f7dee..fedfbbfd89 100644 --- a/filebrowser/CHANGELOG.md +++ b/filebrowser/CHANGELOG.md @@ -2,6 +2,19 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +## [2.40.0](https://github.com/filebrowser/filebrowser/compare/v2.39.0...v2.40.0) (2025-07-13) + + +### Features + +* add font size botton to text editor ([#5290](https://github.com/filebrowser/filebrowser/issues/5290)) ([035084d](https://github.com/filebrowser/filebrowser/commit/035084d8e83243065fad69bfac1b69559fbad5fb)) + + +### Bug Fixes + +* invalid path when uploading files ([9072cbc](https://github.com/filebrowser/filebrowser/commit/9072cbce340da55477906f5419a4cfb6d6937dc0)) +* Only left click should drag the image in extended image view ([b8454bb](https://github.com/filebrowser/filebrowser/commit/b8454bb2e41ca2848b926b66354468ba4b1c7ba5)) + ## [2.39.0](https://github.com/filebrowser/filebrowser/compare/v2.38.0...v2.39.0) (2025-07-13) diff --git a/filebrowser/docker/alpine/healthcheck.sh b/filebrowser/docker/alpine/healthcheck.sh index 70e71bf6df..f86550dcc4 100644 --- a/filebrowser/docker/alpine/healthcheck.sh +++ b/filebrowser/docker/alpine/healthcheck.sh @@ -2,8 +2,8 @@ set -e -PORT=${FB_PORT:-$(cat /tmp/FB_CONFIG | sh /JSON.sh | grep '\["port"\]' | awk '{print $2}')} -ADDRESS=${FB_ADDRESS:-$(cat /tmp/FB_CONFIG | sh /JSON.sh | grep '\["address"\]' | awk '{print $2}' | sed 's/"//g')} +PORT=${FB_PORT:-$(cat /config/settings.json | sh /JSON.sh | grep '\["port"\]' | awk '{print $2}')} +ADDRESS=${FB_ADDRESS:-$(cat /config/settings.json | sh /JSON.sh | grep '\["address"\]' | awk '{print $2}' | sed 's/"//g')} ADDRESS=${ADDRESS:-localhost} -wget -q --spider http://$ADDRESS:$PORT/health || exit 1 \ No newline at end of file +wget -q --spider http://$ADDRESS:$PORT/health || exit 1 diff --git a/filebrowser/docker/alpine/init.sh b/filebrowser/docker/alpine/init.sh index 5ea2cf6e44..a4ac72ae4a 100755 --- a/filebrowser/docker/alpine/init.sh +++ b/filebrowser/docker/alpine/init.sh @@ -32,7 +32,4 @@ if [ -z "$config_file" ]; then set -- --config=/config/settings.json "$@" fi -# Create a symlink to the config file for compatibility with the healthcheck script -ln -s "$config_file" /tmp/FB_CONFIG - -exec filebrowser "$@" \ No newline at end of file +exec filebrowser "$@" diff --git a/filebrowser/frontend/src/api/tus.ts b/filebrowser/frontend/src/api/tus.ts index 64efe69ad3..fbfef4da49 100644 --- a/filebrowser/frontend/src/api/tus.ts +++ b/filebrowser/frontend/src/api/tus.ts @@ -1,5 +1,5 @@ import * as tus from "tus-js-client"; -import { baseURL, tusEndpoint, tusSettings } from "@/utils/constants"; +import { baseURL, tusEndpoint, tusSettings, origin } from "@/utils/constants"; import { useAuthStore } from "@/stores/auth"; import { useUploadStore } from "@/stores/upload"; import { removePrefix } from "@/api/utils"; @@ -35,7 +35,7 @@ export async function upload( } return new Promise((resolve, reject) => { const upload = new tus.Upload(content, { - endpoint: `${baseURL}${resourcePath}`, + endpoint: `${origin}${baseURL}${resourcePath}`, chunkSize: tusSettings.chunkSize, retryDelays: computeRetryDelays(tusSettings), parallelUploads: 1, diff --git a/filebrowser/frontend/src/components/files/ExtendedImage.vue b/filebrowser/frontend/src/components/files/ExtendedImage.vue index e872bfbed0..88b7830470 100644 --- a/filebrowser/frontend/src/components/files/ExtendedImage.vue +++ b/filebrowser/frontend/src/components/files/ExtendedImage.vue @@ -172,7 +172,8 @@ const setCenter = () => { imgex.value.style.top = position.value.center.y + "px"; }; -const mousedownStart = (event: Event) => { +const mousedownStart = (event: MouseEvent) => { + if (event.button !== 0) return; lastX.value = null; lastY.value = null; inDrag.value = true; @@ -184,8 +185,10 @@ const mouseMove = (event: MouseEvent) => { event.preventDefault(); }; const mouseUp = (event: Event) => { + if (inDrag.value) { + event.preventDefault(); + } inDrag.value = false; - event.preventDefault(); }; const touchStart = (event: TouchEvent) => { lastX.value = null; diff --git a/filebrowser/frontend/src/views/files/Editor.vue b/filebrowser/frontend/src/views/files/Editor.vue index 958670336f..f40fa39286 100644 --- a/filebrowser/frontend/src/views/files/Editor.vue +++ b/filebrowser/frontend/src/views/files/Editor.vue @@ -4,6 +4,18 @@ {{ fileStore.req?.name ?? "" }} + + {{ fontSize }}px + + (null); +const fontSize = ref(parseInt(localStorage.getItem("editorFontSize") || "14")); const isPreview = ref(false); const previewContent = ref(""); @@ -121,6 +134,7 @@ onMounted(() => { editor.value!.setTheme("ace/theme/twilight"); } + editor.value.setFontSize(fontSize.value); editor.value.focus(); }); @@ -186,6 +200,21 @@ const save = async () => { $showError(e); } }; + +const increaseFontSize = () => { + fontSize.value += 1; + editor.value?.setFontSize(fontSize.value); + localStorage.setItem("editorFontSize", fontSize.value.toString()); +}; + +const decreaseFontSize = () => { + if (fontSize.value > 1) { + fontSize.value -= 1; + editor.value?.setFontSize(fontSize.value); + localStorage.setItem("editorFontSize", fontSize.value.toString()); + } +}; + const close = () => { if (!editor.value?.session.getUndoManager().isClean()) { layoutStore.showHover("discardEditorChanges"); @@ -202,3 +231,10 @@ const preview = () => { isPreview.value = !isPreview.value; }; + + diff --git a/filebrowser/http/tus_handlers.go b/filebrowser/http/tus_handlers.go index c5eec9adf6..002d78b596 100644 --- a/filebrowser/http/tus_handlers.go +++ b/filebrowser/http/tus_handlers.go @@ -147,7 +147,8 @@ func tusPostHandler() handleFunc { // Enables the user to utilize the PATCH endpoint for uploading file data registerUpload(file.RealPath(), uploadLength) - w.Header().Set("Location", "/api/tus/"+r.URL.Path) + // Signal the frontend to reuse the current request URL + w.Header().Set("Location", "") return http.StatusCreated, nil }) diff --git a/lede/target/linux/bcm53xx/patches-6.6/180-usb-xhci-add-support-for-performing-fake-doorbell.patch b/lede/target/linux/bcm53xx/patches-6.6/180-usb-xhci-add-support-for-performing-fake-doorbell.patch index aef8435148..f8d8f0069b 100644 --- a/lede/target/linux/bcm53xx/patches-6.6/180-usb-xhci-add-support-for-performing-fake-doorbell.patch +++ b/lede/target/linux/bcm53xx/patches-6.6/180-usb-xhci-add-support-for-performing-fake-doorbell.patch @@ -85,7 +85,7 @@ it on BCM4708 family. /* * Reset a halted HC. * -@@ -481,6 +524,15 @@ static int xhci_run_finished(struct xhci +@@ -498,6 +541,15 @@ static int xhci_run_finished(struct xhci return -ENODEV; } @@ -103,11 +103,11 @@ it on BCM4708 family. if (xhci->quirks & XHCI_NEC_HOST) --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h -@@ -1662,6 +1662,7 @@ struct xhci_hcd { - #define XHCI_WRITE_64_HI_LO BIT_ULL(47) +@@ -1660,6 +1660,7 @@ struct xhci_hcd { #define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) #define XHCI_ETRON_HOST BIT_ULL(49) -+#define XHCI_FAKE_DOORBELL BIT_ULL(50) + #define XHCI_LIMIT_ENDPOINT_INTERVAL_9 BIT_ULL(50) ++#define XHCI_FAKE_DOORBELL BIT_ULL(51) unsigned int num_active_eps; unsigned int limit_active_eps; diff --git a/mihomo/go.mod b/mihomo/go.mod index 82b14af295..4bd156cc50 100644 --- a/mihomo/go.mod +++ b/mihomo/go.mod @@ -31,7 +31,7 @@ require ( github.com/metacubex/sing-shadowsocks v0.2.11-0.20250621023810-0e9ef9dd0c92 github.com/metacubex/sing-shadowsocks2 v0.2.5-0.20250621023950-93d605a2143d github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2 - github.com/metacubex/sing-tun v0.4.7-0.20250708161036-39e27fac35ed + github.com/metacubex/sing-tun v0.4.7-0.20250611091011-60774779fdd8 github.com/metacubex/sing-vmess v0.2.2 github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f github.com/metacubex/smux v0.0.0-20250503055512-501391591dee @@ -82,7 +82,6 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gobwas/httphead v0.1.0 // indirect github.com/gobwas/pool v0.2.1 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect @@ -93,7 +92,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/metacubex/gvisor v0.0.0-20250324165734-5857f47bd43b // indirect - github.com/metacubex/nftables v0.0.0-20250708020511-be959b9e8902 // indirect + github.com/metacubex/nftables v0.0.0-20250503052935-30a69ab87793 // indirect github.com/oasisprotocol/deoxysii v0.0.0-20220228165953-2091330c22b7 // indirect github.com/onsi/ginkgo/v2 v2.9.5 // indirect github.com/pierrec/lz4/v4 v4.1.14 // indirect diff --git a/mihomo/go.sum b/mihomo/go.sum index 8097b74bdc..f089d65209 100644 --- a/mihomo/go.sum +++ b/mihomo/go.sum @@ -58,8 +58,6 @@ github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs= github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0= github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -112,8 +110,8 @@ github.com/metacubex/gopacket v1.1.20-0.20230608035415-7e2f98a3e759 h1:cjd4biTvO github.com/metacubex/gopacket v1.1.20-0.20230608035415-7e2f98a3e759/go.mod h1:UHOv2xu+RIgLwpXca7TLrXleEd4oR3sPatW6IF8wU88= github.com/metacubex/gvisor v0.0.0-20250324165734-5857f47bd43b h1:RUh4OdVPz/jDrM9MQ2ySuqu2aeBqcA8rtfWUYLZ8RtI= github.com/metacubex/gvisor v0.0.0-20250324165734-5857f47bd43b/go.mod h1:8LpS0IJW1VmWzUm3ylb0e2SK5QDm5lO/2qwWLZgRpBU= -github.com/metacubex/nftables v0.0.0-20250708020511-be959b9e8902 h1:+OzINlacUgggoeiEBNtXwDzedhlijf6sYiBfavBdDac= -github.com/metacubex/nftables v0.0.0-20250708020511-be959b9e8902/go.mod h1:Hw5GBK4x+A1fhll+OQdPpbsS0YGHqxFZFxr17PFd2zg= +github.com/metacubex/nftables v0.0.0-20250503052935-30a69ab87793 h1:1Qpuy+sU3DmyX9HwI+CrBT/oLNJngvBorR2RbajJcqo= +github.com/metacubex/nftables v0.0.0-20250503052935-30a69ab87793/go.mod h1:RjRNb4G52yAgfR+Oe/kp9G4PJJ97Fnj89eY1BFO3YyA= github.com/metacubex/quic-go v0.53.1-0.20250628094454-fda5262d1d9c h1:ABQzmOaZddM3q0OYeoZEc0XF+KW+dUdPNvY/c5rsunI= github.com/metacubex/quic-go v0.53.1-0.20250628094454-fda5262d1d9c/go.mod h1:eWlAK3zsKI0P8UhYpXlIsl3mtW4D6MpMNuYLIu8CKWI= github.com/metacubex/randv2 v0.2.0 h1:uP38uBvV2SxYfLj53kuvAjbND4RUDfFJjwr4UigMiLs= @@ -131,8 +129,8 @@ github.com/metacubex/sing-shadowsocks2 v0.2.5-0.20250621023950-93d605a2143d h1:E github.com/metacubex/sing-shadowsocks2 v0.2.5-0.20250621023950-93d605a2143d/go.mod h1:+ukTd0OPFglT3bnKAYTJWYPbuox6HYNXE235r5tHdUk= github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2 h1:gXU+MYPm7Wme3/OAY2FFzVq9d9GxPHOqu5AQfg/ddhI= github.com/metacubex/sing-shadowtls v0.0.0-20250503063515-5d9f966d17a2/go.mod h1:mbfboaXauKJNIHJYxQRa+NJs4JU9NZfkA+I33dS2+9E= -github.com/metacubex/sing-tun v0.4.7-0.20250708161036-39e27fac35ed h1:biW5cGsM+953MpQhowBn7qKmIcHwtqWu7ygDH4xNJSI= -github.com/metacubex/sing-tun v0.4.7-0.20250708161036-39e27fac35ed/go.mod h1:t6aROwOw+QJxnWNTlffNAdZZGpri4JEiswoltkrWT64= +github.com/metacubex/sing-tun v0.4.7-0.20250611091011-60774779fdd8 h1:4zWKqxTx75TbfW2EmlQ3hxM6RTRg2PYOAVMCnU4I61I= +github.com/metacubex/sing-tun v0.4.7-0.20250611091011-60774779fdd8/go.mod h1:2YywXPWW8Z97kTH7RffOeykKzU+l0aiKlglWV1PAS64= github.com/metacubex/sing-vmess v0.2.2 h1:nG6GIKF1UOGmlzs+BIetdGHkFZ20YqFVIYp5Htqzp+4= github.com/metacubex/sing-vmess v0.2.2/go.mod h1:CVDNcdSLVYFgTHQlubr88d8CdqupAUDqLjROos+H9xk= github.com/metacubex/sing-wireguard v0.0.0-20250503063753-2dc62acc626f h1:Sr/DYKYofKHKc4GF3qkRGNuj6XA6c0eqPgEDN+VAsYU= diff --git a/nodepass/.github/workflows/docker.yml b/nodepass/.github/workflows/docker.yml new file mode 100644 index 0000000000..cb7a8731ca --- /dev/null +++ b/nodepass/.github/workflows/docker.yml @@ -0,0 +1,66 @@ +name: Docker + +on: + push: + tags: [ 'v*.*.*' ] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + VERSION: ${{ github.ref_name }} + +jobs: + build: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + id-token: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4.2.2 + + - name: Install cosign + if: github.event_name != 'pull_request' + uses: sigstore/cosign-installer@v3.8.2 + with: + cosign-release: 'v2.2.4' + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3.10.0 + + - name: Log into registry ${{ env.REGISTRY }} + if: github.event_name != 'pull_request' + uses: docker/login-action@v3.4.0 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract Docker metadata + id: meta + uses: docker/metadata-action@v5.7.0 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + - name: Build and push Docker image + id: build-and-push + uses: docker/build-push-action@v6.16.0 + with: + context: . + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + platforms: linux/amd64,linux/arm64 + build-args: VERSION=${{ env.VERSION }} + provenance: false + + - name: Sign the published Docker image + if: ${{ github.event_name != 'pull_request' }} + env: + TAGS: ${{ steps.meta.outputs.tags }} + DIGEST: ${{ steps.build-and-push.outputs.digest }} + run: echo ${{ steps.meta.outputs.tags }} | tr ',' '\n' | xargs -I {} cosign sign --yes {}@${DIGEST} diff --git a/nodepass/.github/workflows/release.yml b/nodepass/.github/workflows/release.yml new file mode 100644 index 0000000000..2943d01698 --- /dev/null +++ b/nodepass/.github/workflows/release.yml @@ -0,0 +1,24 @@ +name: Release + +on: + push: + tags: [ 'v*.*.*' ] + +jobs: + goreleaser: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4.2.2 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@v5.4.0 + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v6.3.0 + with: + distribution: goreleaser + version: 'latest' + args: release --clean + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/nodepass/.goreleaser.yml b/nodepass/.goreleaser.yml new file mode 100644 index 0000000000..db2a018ddd --- /dev/null +++ b/nodepass/.goreleaser.yml @@ -0,0 +1,36 @@ +version: 2 + +builds: + - env: + - CGO_ENABLED=0 + main: ./cmd/nodepass + goos: + - darwin + - freebsd + - linux + - windows + goarch: + - 386 + - arm + - amd64 + - arm64 + - mips + - mipsle + - mips64 + - mips64le + goarm: + - 6 + - 7 + gomips: + - hardfloat + - softfloat + flags: + - -trimpath + ldflags: + - -s -w -X main.version={{ .Tag }} + +archives: + - formats: [tar.gz] +release: + prerelease: true + mode: replace diff --git a/nodepass/CODE_OF_CONDUCT.md b/nodepass/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..f33c19ff37 --- /dev/null +++ b/nodepass/CODE_OF_CONDUCT.md @@ -0,0 +1,40 @@ +# Code of Conduct + +## Our Pledge + +We are committed to providing a friendly, safe and welcoming environment for all, regardless of age, disability, ethnicity, gender identity, level of experience, nationality, personal appearance, race, religion, or sexual orientation. + +## Our Standards + +**Positive behaviors include:** +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +**Unacceptable behaviors include:** +* Harassment, trolling, or discriminatory comments +* Personal attacks or insulting/derogatory language +* Publishing others' private information without permission +* Any conduct that could reasonably be considered inappropriate in a professional setting + +## Enforcement + +Project maintainers are responsible for clarifying standards and may take appropriate corrective action in response to unacceptable behavior, including: + +* Warning the individual +* Temporary restriction from project spaces +* Permanent ban from the project community + +## Scope + +This Code of Conduct applies to all project spaces, including GitHub repositories, issue trackers, social media accounts, and any events where individuals represent the project. + +## Reporting + +If you experience or witness unacceptable behavior, please report it to **team@mail.nodepass.eu**. All reports will be handled confidentially. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.1. diff --git a/nodepass/CONTRIBUTING.md b/nodepass/CONTRIBUTING.md new file mode 100644 index 0000000000..df6bfde271 --- /dev/null +++ b/nodepass/CONTRIBUTING.md @@ -0,0 +1,319 @@ +# Contributing to NodePass + +Thank you for your interest in contributing to NodePass! We welcome all kinds of contributions, from bug reports and feature requests to code improvements and documentation updates. + +## Table of Contents + +- [Organization](#organization) +- [Code of Conduct](#code-of-conduct) +- [Getting Started](#getting-started) +- [Development Setup](#development-setup) +- [Contributing Guidelines](#contributing-guidelines) +- [Code Style and Standards](#code-style-and-standards) +- [Testing](#testing) +- [Documentation](#documentation) +- [Submitting Changes](#submitting-changes) +- [Community and Support](#community-and-support) + +## Code of Conduct + +Please read and follow our [Code of Conduct](CODE_OF_CONDUCT.md). We are committed to providing a welcoming and inclusive environment for all contributors. + +## Getting Started + +### Prerequisites + +- **Go** (as specified in `go.mod`) +- **Git** for version control +- **Docker** (optional, for container-based development and testing) +- Basic knowledge of TCP/UDP networking concepts +- Familiarity with TLS/SSL concepts for security features + +### Understanding the Architecture + +NodePass is built on a three-tier architecture: + +- **Server Mode**: Accepts incoming tunnel connections with configurable security +- **Client Mode**: Establishes outbound connections to tunnel servers +- **Master Mode**: Provides RESTful API for dynamic instance management + +Key components: +- `/cmd/nodepass/`: Main application entry point and core dispatch logic +- `/internal/`: Core implementation packages (server, client, master, common utilities) +- `/docs/`: Comprehensive documentation in English and Chinese +- External dependencies: NodePassProject ecosystem libraries for certificates, connections, logging, and pooling + +## Development Setup + +### 1. Fork and Clone + +```bash +# Fork the repository on GitHub, then clone your fork +git clone https://github.com/YOUR_USERNAME/nodepass.git +cd nodepass + +# Add the upstream repository +git remote add upstream https://github.com/yosebyte/nodepass.git +``` + +### 2. Install Dependencies + +```bash +# Download and install dependencies +go mod download + +# Verify dependencies +go mod verify +``` + +### 3. Build and Test + +```bash +# Build the application +go build -o nodepass ./cmd/nodepass + +# Test the build +./nodepass "server://localhost:10101/127.0.0.1:8080?log=debug&tls=0" +``` + +### 4. Development with Docker (Optional) + +```bash +# Build development container +docker build --build-arg VERSION=dev -t nodepass:dev . + +# Run in container +docker run --rm -p 10101:10101 nodepass:dev "server://:10101/127.0.0.1:8080?log=debug&tls=0" +``` + +## Contributing Guidelines + +### Types of Contributions + +- **🐛 Bug Reports**: Help us identify and fix issues +- **✨ Feature Requests**: Suggest new features or improvements +- **📝 Documentation**: Improve existing docs or add new ones +- **🔧 Code Contributions**: Bug fixes, feature implementations, refactoring +- **🌐 Translations**: Help translate documentation to other languages +- **🧪 Testing**: Add test cases and improve test coverage + +### Reporting Issues + +When reporting bugs or requesting features, please: + +1. **Search existing issues** to avoid duplicates +2. **Use our issue templates** when available +3. **Provide detailed information**: + - NodePass version and build information + - Operating system and architecture + - Network configuration details + - Complete command-line arguments used + - Expected vs. actual behavior + - Relevant log output (use `log=debug` for detailed logs) + - Steps to reproduce the issue + +### Feature Requests + +For new features: + +1. **Check the roadmap** and existing feature requests +2. **Describe the use case** clearly +3. **Explain the expected behavior** +4. **Consider backwards compatibility** +5. **Discuss implementation approach** if you plan to contribute code + +## Code Style and Standards + +### Go Code Style + +We follow standard Go conventions with project-specific guidelines: + +- Write idiomatic Go code following [Effective Go](https://golang.org/doc/effective_go.html) +- Use `gofmt` for consistent formatting and `go vet` to catch common errors +- Follow the single responsibility principle and prefer composition over inheritance +- Use descriptive variable and function names with proper Go naming conventions +- Include both Chinese and English comments for public APIs (maintaining project tradition) +- Implement proper error handling with context wrapping +- Use the project's logging framework consistently with appropriate log levels +- Protect shared state with mutexes and use channels for goroutine coordination +- Always handle goroutine cleanup with proper defer and recover patterns + +### Configuration and CLI + +- Use URL-based configuration syntax: `scheme://[password@]host:port/target?param=value` +- Support environment variables for sensitive configuration +- Provide sensible defaults for all optional parameters +- Validate configuration early in the application lifecycle + +### Performance Considerations + +- Minimize allocations in hot paths +- Use connection pooling for frequent connections +- Implement graceful degradation under load +- Profile memory and CPU usage for critical paths +- Use buffered I/O where appropriate + +## Testing + +### Testing Strategy + +Currently, the project focuses on integration testing through real-world usage scenarios. We welcome contributions to improve test coverage: + +#### Manual Testing + +1. **Basic Functionality**: Test server, client, and master modes with debug logging +2. **TLS Modes**: Verify all three TLS security levels (0, 1, 2) +3. **Protocol Support**: Test TCP tunneling and UDP forwarding with various applications + +#### Future Testing Goals + +We encourage contributions in these areas: +- Unit Tests for individual functions and methods +- Integration Tests for component interactions +- Benchmark Tests for performance regression detection +- Fuzzing Tests for security and robustness +- End-to-End Tests for complete workflow validation + +### Testing Guidelines + +When adding tests: +- Use Go's standard testing package with `*_test.go` naming convention +- Write table-driven tests where applicable +- Include both positive and negative test cases +- Test error conditions and edge cases +- Use meaningful test names that describe the scenario + +## Documentation + +### Documentation Standards + +- **Write in clear, simple English** +- **Include practical examples** for all features +- **Maintain both English and Chinese versions** when possible +- **Use consistent formatting** and structure +- **Test all code examples** to ensure they work + +### Documentation Structure + +- `README.md`: Project overview and quick start +- `docs/en/`: English documentation +- `docs/zh/`: Chinese documentation +- Inline code comments for complex logic +- API documentation for master mode endpoints + +### Contributing to Documentation + +1. **Update both language versions** when possible +2. **Test all examples** before submitting +3. **Use proper Markdown formatting** +4. **Include relevant screenshots** for UI components +5. **Cross-reference related documentation** + +## Submitting Changes + +### Pull Request Process + +1. **Create a feature branch** from the latest `main`: + ```bash + git checkout main + git pull upstream main + git checkout -b feature/your-feature-name + ``` + +2. **Make your changes** following the guidelines above + +3. **Test your changes** thoroughly by building the application and running real scenarios + +4. **Commit your changes** with descriptive messages using [Conventional Commits](https://www.conventionalcommits.org/) format: + - `feat`: New feature + - `fix`: Bug fix + - `docs`: Documentation changes + - `refactor`: Code refactoring + - `perf`: Performance improvements + - `test`: Adding or updating tests + +5. **Push to your fork** and create a pull request + +### Commit Message Guidelines + +Use [Conventional Commits](https://www.conventionalcommits.org/) format: + +``` +[optional scope]: + +[optional body] + +[optional footer(s)] +``` + +**Examples:** +- `feat(server): add support for IPv6 addresses` +- `fix(client): resolve connection timeout issues in high-latency networks` +- `docs: update installation guide with Docker instructions` +- `refactor(common): simplify address parsing logic` + +### Pull Request Guidelines + +**Before submitting:** +- [ ] Code follows the project style guidelines +- [ ] All tests pass (or explain why they should be skipped) +- [ ] Documentation is updated if needed +- [ ] Commit messages follow the conventional format +- [ ] No merge conflicts with the main branch + +**In your pull request:** +- [ ] Provide a clear description of changes +- [ ] Reference any related issues +- [ ] Include testing instructions +- [ ] Add screenshots for UI changes +- [ ] List any breaking changes + +### Review Process + +1. **Automated checks** run on all pull requests +2. **Code review** by project maintainers +3. **Testing** in various environments +4. **Documentation review** for user-facing changes +5. **Final approval** and merge + +## Community and Support + +### Communication Channels + +- **GitHub Issues**: Bug reports and feature requests +- **Telegram Channel**: [@NodePassChannel](https://t.me/NodePassChannel) - Updates and announcements +- **Telegram Group**: [@NodePassGroup](https://t.me/NodePassGroup) - Community discussion +- **Discord**: [Join our server](https://discord.gg/2cnXcnDMGc) - Real-time chat + +### Getting Help + +If you need help: + +1. **Check the documentation** in the `docs/` directory +2. **Search existing issues** for similar problems +3. **Ask in our community channels** for general questions +4. **Create a GitHub issue** for bugs or feature requests + +### Recognition + +We appreciate all contributions! Contributors will be: + +- **Listed in our contributors** section +- **Mentioned in release notes** for significant contributions +- **Invited to become maintainers** for consistent, high-quality contributions + +### Maintainer Responsibilities + +Current maintainers handle: + +- **Code review** and pull request management +- **Release planning** and version management +- **Community management** and support +- **Security** issue handling +- **Roadmap** planning and prioritization + +--- + +Thank you for contributing to NodePass! Your contributions help make universal TCP/UDP tunneling more accessible and reliable for everyone. + +For questions about contributing, please reach out through our community channels or create a GitHub issue. diff --git a/nodepass/Dockerfile b/nodepass/Dockerfile new file mode 100644 index 0000000000..ee62f6a6c2 --- /dev/null +++ b/nodepass/Dockerfile @@ -0,0 +1,11 @@ +FROM golang:alpine AS builder +RUN apk update && apk add --no-cache ca-certificates +WORKDIR /root +ADD . . +ARG VERSION +WORKDIR /root/cmd/nodepass +RUN env CGO_ENABLED=0 go build -v -trimpath -ldflags "-s -w -X main.version=${VERSION}" +FROM scratch +COPY --from=builder /etc/ssl/certs /etc/ssl/certs +COPY --from=builder /root/cmd/nodepass/nodepass /nodepass +ENTRYPOINT ["/nodepass"] diff --git a/nodepass/LICENSE b/nodepass/LICENSE new file mode 100644 index 0000000000..214e20c278 --- /dev/null +++ b/nodepass/LICENSE @@ -0,0 +1,28 @@ +BSD 3-Clause License + +Copyright (c) 2025, 𝐘𝐨𝐬𝐞𝐛𝐲𝐭𝐞 + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/nodepass/README.md b/nodepass/README.md new file mode 100644 index 0000000000..4419e73acf --- /dev/null +++ b/nodepass/README.md @@ -0,0 +1,120 @@ +
+ nodepass + +[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go) +[![GitHub release](https://img.shields.io/github/v/release/yosebyte/nodepass)](https://github.com/yosebyte/nodepass/releases) +[![GitHub downloads](https://img.shields.io/github/downloads/yosebyte/nodepass/total.svg)](https://github.com/yosebyte/nodepass/releases) +[![Go Report Card](https://goreportcard.com/badge/github.com/yosebyte/nodepass)](https://goreportcard.com/report/github.com/yosebyte/nodepass) +[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) +[![Go Reference](https://pkg.go.dev/badge/github.com/yosebyte/nodepass.svg)](https://pkg.go.dev/github.com/yosebyte/nodepass) +[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/yosebyte/nodepass) +![GitHub last commit](https://img.shields.io/github/last-commit/yosebyte/nodepass) + +English | [简体中文](README_zh.md) +
+ +**NodePass** is an open-source, lightweight, enterprise-grade TCP/UDP network tunneling solution featuring an all-in-one architecture with separation of control and data channels, along with flexible and high-performance instance control. It supports zero-configuration deployment, intelligent connection pooling, tiered TLS encryption, and seamless protocol conversion. Designed for DevOps professionals and system administrators to effortlessly handle complex network scenarios including firewall traversal, NAT bypassing, and advanced tunnel management. + +## 💎 Key Features + +- **🔀 Multiple Operating Modes** + - Server mode accepting incoming tunnels with configurable security + - Client mode for establishing outbound connections to tunnel servers + - Master mode with RESTful API for dynamic instance management + +- **🌍 Protocol Support** + - TCP tunneling with persistent connection handling + - UDP datagram forwarding with configurable buffer sizes + - Intelligent routing mechanisms for both protocols + +- **🛡️ Security Options** + - TLS Mode 0: Unencrypted mode for maximum speed in trusted networks + - TLS Mode 1: Self-signed certificates for quick secure setup + - TLS Mode 2: Custom certificate validation for enterprise security + +- **⚡ Performance Features** + - Smart connection pooling with real-time capacity adaptation + - Dynamic interval adjustment based on network conditions + - Minimal resource footprint even under heavy load + +- **🧰 Simple Configuration** + - Zero configuration files required + - Simple command-line parameters + - Environment variables for fine-tuning performance + +## 📋 Quick Start + +### 📥 Installation + +- **Pre-built Binaries**: Download from [releases page](https://github.com/yosebyte/nodepass/releases). +- **Container Image**: `docker pull ghcr.io/yosebyte/nodepass:latest` + +### 🚀 Basic Usage + +**Server Mode** +```bash +nodepass "server://:10101/127.0.0.1:8080?log=debug&tls=1" +``` + +**Client Mode** +```bash +nodepass "client://server:10101/127.0.0.1:8080?min=128" +``` + +**Master Mode (API)** +```bash +nodepass "master://:10101/api?log=debug&tls=1" +``` + +## 📚 Documentation + +Explore the complete documentation to learn more about NodePass: + +- [Installation Guide](/docs/en/installation.md) +- [Usage Instructions](/docs/en/usage.md) +- [Configuration Options](/docs/en/configuration.md) +- [API Reference](/docs/en/api.md) +- [Examples](/docs/en/examples.md) +- [How It Works](/docs/en/how-it-works.md) +- [Troubleshooting](/docs/en/troubleshooting.md) + +## 🌐 Ecosystem + +The [NodePassProject](https://github.com/NodePassProject) organization develops various frontend applications and auxiliary tools to enhance the NodePass experience: + +- **[NodePassDash](https://github.com/NodePassProject/NodePassDash)**: A modern NodePass management interface that provides master management, instance management, traffic statistics, history records, and more. + +- **[NodePanel](https://github.com/NodePassProject/NodePanel)**: A lightweight frontend panel that provides visual tunnel management, deployable on Vercel or Cloudflare Pages. + +- **[npsh](https://github.com/NodePassProject/npsh)**: A collection of one-click scripts that provide simple deployment for API or Dashboard with flexible configuration and management. + +## 💬 Discussion + +- Follow our [Telegram Channel](https://t.me/NodePassChannel) for updates and community support. + +- Join our [Discord](https://discord.gg/2cnXcnDMGc) and [Telegram Group](https://t.me/NodePassGroup) to share experiences and ideas. + +## 📄 License + +Project `NodePass` is licensed under the [BSD 3-Clause License](LICENSE). + +## ⚖️ Disclaimer + +This project is provided "as is" without any warranties. Users assume all risks and must comply with local laws for legal use only. Developers are not liable for any direct, indirect, incidental, or consequential damages. Secondary development requires commitment to legal use and self-responsibility for legal compliance. Developers reserve the right to modify software features and this disclaimer at any time. Final interpretation rights belong to developers. + +## 🤝 Sponsors + + + + + + +
+ + + +
+ +## ⭐ Stargazers + +[![Stargazers over time](https://starchart.cc/yosebyte/nodepass.svg?variant=adaptive)](https://starchart.cc/yosebyte/nodepass) diff --git a/nodepass/README_zh.md b/nodepass/README_zh.md new file mode 100644 index 0000000000..8e6fbfbf9c --- /dev/null +++ b/nodepass/README_zh.md @@ -0,0 +1,120 @@ +
+ nodepass + +[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go) +[![GitHub release](https://img.shields.io/github/v/release/yosebyte/nodepass)](https://github.com/yosebyte/nodepass/releases) +[![GitHub downloads](https://img.shields.io/github/downloads/yosebyte/nodepass/total.svg)](https://github.com/yosebyte/nodepass/releases) +[![Go Report Card](https://goreportcard.com/badge/github.com/yosebyte/nodepass)](https://goreportcard.com/report/github.com/yosebyte/nodepass) +[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) +[![Go Reference](https://pkg.go.dev/badge/github.com/yosebyte/nodepass.svg)](https://pkg.go.dev/github.com/yosebyte/nodepass) +[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/yosebyte/nodepass) +![GitHub last commit](https://img.shields.io/github/last-commit/yosebyte/nodepass) + +[English](README.md) | 简体中文 +
+ +**NodePass** 是一款开源、轻量的企业级 TCP/UDP 网络隧道解决方案,采用多合一架构设计,通过控制通道与数据通道分离,实现灵活、高性能的实例管控。支持零配置文件部署,内置智能连接池、分级 TLS 加密和无缝协议转换。专为 DevOps 工程师和系统管理员打造,助力轻松应对防火墙穿透、NAT 绕过和高级隧道管理等复杂网络场景。 + +## 💎 核心功能 + +- **🔀 多种操作模式** + - 服务端模式接受传入隧道连接并提供可配置的安全选项 + - 客户端模式用于建立与隧道服务端的出站连接 + - 主控模式提供RESTful API进行动态实例管理 + +- **🌍 协议支持** + - TCP隧道传输与持久连接管理 + - UDP数据报转发与可配置的缓冲区大小 + - 两种协议的智能路由机制 + +- **🛡️ 安全选项** + - TLS模式0:在可信网络中获得最大速度的无加密模式 + - TLS模式1:使用自签名证书提供快速安全设置 + - TLS模式2:使用自定义证书验证实现企业级安全 + +- **⚡ 性能特性** + - 智能连接池,具备实时容量自适应功能 + - 基于网络状况的动态间隔调整 + - 高负载下保持最小资源占用 + +- **🧰 简单配置** + - 零配置文件设计 + - 简洁的命令行参数 + - 环境变量支持性能精细调优 + +## 📋 快速开始 + +### 📥 安装方法 + +- **预编译二进制文件**: 从[发布页面](https://github.com/yosebyte/nodepass/releases)下载。 +- **容器镜像**: `docker pull ghcr.io/yosebyte/nodepass:latest` + +### 🚀 基本用法 + +**服务端模式** +```bash +nodepass "server://:10101/127.0.0.1:8080?log=debug&tls=1" +``` + +**客户端模式** +```bash +nodepass "client://server:10101/127.0.0.1:8080?min=128" +``` + +**主控模式 (API)** +```bash +nodepass "master://:10101/api?log=debug&tls=1" +``` + +## 📚 文档 + +探索完整文档以了解更多关于NodePass的信息: + +- [安装指南](/docs/zh/installation.md) +- [使用说明](/docs/zh/usage.md) +- [配置选项](/docs/zh/configuration.md) +- [API参考](/docs/zh/api.md) +- [使用示例](/docs/zh/examples.md) +- [工作原理](/docs/zh/how-it-works.md) +- [故障排除](/docs/zh/troubleshooting.md) + +## 🌐 生态系统 + +[NodePassProject](https://github.com/NodePassProject) 组织开发了各种前端应用和辅助工具来增强 NodePass 体验: + +- **[NodePassDash](https://github.com/NodePassProject/NodePassDash)**: 现代化的 NodePass 管理界面,提供主控管理、实例管理、流量统计、历史记录等功能。 + +- **[NodePanel](https://github.com/NodePassProject/NodePanel)**: 轻量化的前端面板,提供可视化的隧道管理功能,在 Vercel 或 Cloudflare Pages 轻松部署。 + +- **[npsh](https://github.com/NodePassProject/npsh)**: 简单易用的 NodePass 一键脚本合集,包括 API 主控、Dash 面板的安装部署、灵活配置和辅助管理。 + +## 💬 讨论 + +- 关注我们的 [Telegram 频道](https://t.me/NodePassChannel) 获取最新更新和社区支持。 + +- 加入我们的 [Discord](https://discord.gg/2cnXcnDMGc) 和 [Telegram 群组](https://t.me/NodePassGroup) 分享经验和想法。 + +## 📄 许可协议 + +`NodePass`项目根据[BSD 3-Clause许可证](LICENSE)授权。 + +## ⚖️ 免责声明 + +本项目以“现状”提供,开发者不提供任何明示或暗示的保证。用户使用风险自担,需遵守当地法律法规,仅限合法用途。开发者对任何直接、间接、偶然或后果性损害概不负责。进行二次开发须承诺合法使用并自负法律责任。开发者保留随时修改软件功能及本声明的权利。最终解释权归开发者所有。 + +## 🤝 赞助商 + + + + + + +
+ + + +
+ +## ⭐ Star趋势 + +[![Stargazers over time](https://starchart.cc/yosebyte/nodepass.svg?variant=adaptive)](https://starchart.cc/yosebyte/nodepass) diff --git a/nodepass/SECURITY.md b/nodepass/SECURITY.md new file mode 100644 index 0000000000..a9d2c6cea9 --- /dev/null +++ b/nodepass/SECURITY.md @@ -0,0 +1,161 @@ +# Security Policy + +## Supported Versions + +We provide security updates for the following versions of NodePass: + +| Version | Supported | +| ------- | ------------------ | +| Latest | :white_check_mark: | + +## Security Features + +NodePass implements multiple security layers: + +### TLS Encryption Modes + +- **TLS Mode 0**: Unencrypted mode for trusted networks (highest performance) +- **TLS Mode 1**: Self-signed certificates with TLS 1.3 (balanced security) +- **TLS Mode 2**: Custom certificate validation for enterprise security + +### Network Security + +- Password-based tunnel authentication +- Connection pooling with capacity limits +- Graceful degradation under load +- Configurable timeout and retry mechanisms + +## Reporting Security Vulnerabilities + +We take security seriously. If you discover a security vulnerability in NodePass, please report it responsibly. + +### Where to Report + +- **Email**: team@mail.nodepass.eu +- **Subject**: [SECURITY] Brief description of the issue + +### What to Include + +Please provide the following information: + +1. **Description** of the vulnerability +2. **Steps to reproduce** the issue +3. **Potential impact** and affected versions +4. **Your contact information** for follow-up +5. **Any proof-of-concept code** (if applicable) + +### Response Process + +1. **Acknowledgment**: We will acknowledge receipt within 48 hours +2. **Assessment**: Initial assessment within 5 business days +3. **Updates**: Regular updates on investigation progress +4. **Resolution**: Security patch and public disclosure coordination + +### Responsible Disclosure + +- Please **do not** create public GitHub issues for security vulnerabilities +- Give us reasonable time to investigate and patch the issue +- We will coordinate public disclosure timing with you +- Security researchers will be credited in our security advisories + +## Security Best Practices + +### For Users + +- **Use TLS Mode 1 or 2** in production environments +- **Choose strong passwords** for tunnel authentication +- **Keep NodePass updated** to the latest version +- **Monitor logs** for suspicious activity +- **Limit network exposure** by binding to specific interfaces +- **Use firewall rules** to restrict access to tunnel ports + +### For Developers + +- **Validate all inputs** including URL parameters and network data +- **Use secure coding practices** following Go security guidelines +- **Implement proper error handling** without leaking sensitive information +- **Test security features** thoroughly before release +- **Follow the principle of least privilege** in code design + +## Security Architecture + +### Network Layer + +- TLS 1.3 encryption for secure data transmission +- Certificate validation and auto-reload capabilities +- Protection against common network attacks + +### Application Layer + +- Input validation and sanitization +- Secure memory handling for sensitive data +- Proper resource cleanup and connection management + +### Operational Security + +- Minimal container image based on scratch +- No unnecessary dependencies or services +- Clear separation of concerns between components + +## Known Security Considerations + +### TLS Mode 0 Usage + +- Only use in completely trusted networks +- Not recommended for internet-facing deployments +- Provides maximum performance at the cost of encryption + +### Master API Security + +- Secure the API endpoint with proper authentication +- Use reverse proxy for additional security layers +- Monitor API access and implement rate limiting + +## Security Updates + +Security updates are released as: + +- **Patch releases** for critical vulnerabilities +- **Minor releases** for security enhancements +- **Documentation updates** for security best practices + +Subscribe to our release notifications: +- [GitHub Releases](https://github.com/yosebyte/nodepass/releases) +- [Telegram Channel](https://t.me/NodePassChannel) + +## Ecosystem Security + +### NodePassProject Libraries + +Our core dependencies are maintained by the NodePassProject organization: + +- **cert**: Certificate generation and management +- **conn**: Secure connection handling +- **logs**: Secure logging with sensitive data protection +- **pool**: Connection pool management with resource limits + +### Third-Party Dependencies + +- We minimize external dependencies +- All dependencies are regularly audited for security issues +- Updates are applied promptly when security issues are discovered + +## Contact Information + +For security-related questions or concerns: + +- **Security Team**: team@mail.nodepass.eu +- **General Issues**: [GitHub Issues](https://github.com/yosebyte/nodepass/issues) +- **Community**: [Telegram Group](https://t.me/NodePassGroup) + +## Attribution + +We appreciate security researchers who help improve NodePass security. Contributors to our security will be acknowledged in: + +- Security advisories +- Release notes +- Our contributors list + +--- + +**Note**: This security policy applies to the NodePass core project. For security issues in ecosystem projects (NodePassDash, NodePanel, etc.), please refer to their respective repositories in the [NodePassProject](https://github.com/NodePassProject) organization. diff --git a/nodepass/cmd/nodepass/core.go b/nodepass/cmd/nodepass/core.go new file mode 100644 index 0000000000..2155e18112 --- /dev/null +++ b/nodepass/cmd/nodepass/core.go @@ -0,0 +1,101 @@ +package main + +import ( + "crypto/tls" + "net/url" + "time" + + "github.com/NodePassProject/cert" + "github.com/yosebyte/nodepass/internal" +) + +// coreDispatch 根据URL方案分派到不同的运行模式 +func coreDispatch(parsedURL *url.URL) { + var core interface{ Run() } + + switch scheme := parsedURL.Scheme; scheme { + case "server", "master": + tlsCode, tlsConfig := getTLSProtocol(parsedURL) + if scheme == "server" { + core = internal.NewServer(parsedURL, tlsCode, tlsConfig, logger) + } else { + core = internal.NewMaster(parsedURL, tlsCode, tlsConfig, logger, version) + } + case "client": + core = internal.NewClient(parsedURL, logger) + default: + logger.Error("Unknown core: %v", scheme) + getExitInfo() + } + + core.Run() +} + +// getTLSProtocol 获取TLS配置 +func getTLSProtocol(parsedURL *url.URL) (string, *tls.Config) { + // 生成基本TLS配置 + tlsConfig, err := cert.NewTLSConfig("yosebyte/nodepass:" + version) + if err != nil { + logger.Error("Generate failed: %v", err) + logger.Warn("TLS code-0: nil cert") + return "0", nil + } + + tlsConfig.MinVersion = tls.VersionTLS13 + tlsCode := parsedURL.Query().Get("tls") + + switch tlsCode { + case "0": + // 不使用加密 + logger.Info("TLS code-0: unencrypted") + return tlsCode, nil + + case "1": + // 使用内存中的证书 + logger.Info("TLS code-1: RAM cert with TLS 1.3") + return tlsCode, tlsConfig + + case "2": + // 使用自定义证书 + crtFile, keyFile := parsedURL.Query().Get("crt"), parsedURL.Query().Get("key") + cert, err := tls.LoadX509KeyPair(crtFile, keyFile) + if err != nil { + logger.Error("Cert load failed: %v", err) + logger.Warn("TLS code-1: RAM cert with TLS 1.3") + return "1", tlsConfig + } + + // 缓存证书并设置自动重载 + cachedCert := cert + lastReload := time.Now() + tlsConfig = &tls.Config{ + MinVersion: tls.VersionTLS13, + GetCertificate: func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + // 定期重载证书 + if time.Since(lastReload) >= internal.ReloadInterval { + newCert, err := tls.LoadX509KeyPair(crtFile, keyFile) + if err != nil { + logger.Error("Cert reload failed: %v", err) + } else { + logger.Debug("TLS cert reloaded: %v", crtFile) + cachedCert = newCert + } + lastReload = time.Now() + } + return &cachedCert, nil + }, + } + + if cert.Leaf != nil { + logger.Info("TLS code-2: %v with TLS 1.3", cert.Leaf.Subject.CommonName) + } else { + logger.Warn("TLS code-2: unknown cert name with TLS 1.3") + } + return tlsCode, tlsConfig + + default: + // 默认不使用加密 + logger.Warn("TLS code-0: unencrypted") + return "0", nil + } +} diff --git a/nodepass/cmd/nodepass/main.go b/nodepass/cmd/nodepass/main.go new file mode 100644 index 0000000000..01488f4ff5 --- /dev/null +++ b/nodepass/cmd/nodepass/main.go @@ -0,0 +1,91 @@ +package main + +import ( + "net/url" + "os" + "runtime" + + "github.com/NodePassProject/logs" +) + +var ( + // 全局日志记录器 + logger = logs.NewLogger(logs.Info, true) + // 程序版本 + version = "dev" +) + +// main 程序入口 +func main() { + parsedURL := getParsedURL(os.Args) + initLogLevel(parsedURL.Query().Get("log")) + coreDispatch(parsedURL) +} + +// getParsedURL 解析URL参数 +func getParsedURL(args []string) *url.URL { + if len(args) < 2 { + getExitInfo() + } + + parsedURL, err := url.Parse(args[1]) + if err != nil { + logger.Error("URL parse: %v", err) + getExitInfo() + } + + return parsedURL +} + +// initLogLevel 初始化日志级别 +func initLogLevel(level string) { + switch level { + case "debug": + logger.SetLogLevel(logs.Debug) + logger.Debug("Init log level: DEBUG") + case "warn": + logger.SetLogLevel(logs.Warn) + logger.Warn("Init log level: WARN") + case "error": + logger.SetLogLevel(logs.Error) + logger.Error("Init log level: ERROR") + default: + logger.SetLogLevel(logs.Info) + logger.Info("Init log level: INFO") + } +} + +// getExitInfo 输出帮助信息并退出程序 +func getExitInfo() { + logger.SetLogLevel(logs.Info) + logger.Info(`Version: %v %v/%v + +╭─────────────────────────────────────────────────────────╮ +│ ░░█▀█░█▀█░░▀█░█▀▀░█▀█░█▀█░█▀▀░█▀▀░░ │ +│ ░░█░█░█░█░█▀█░█▀▀░█▀▀░█▀█░▀▀█░▀▀█░░ │ +│ ░░▀░▀░▀▀▀░▀▀▀░▀▀▀░▀░░░▀░▀░▀▀▀░▀▀▀░░ │ +├─────────────────────────────────────────────────────────┤ +│ >Universal TCP/UDP Tunneling Solution │ +│ >https://github.com/yosebyte/nodepass │ +├─────────────────────────────────────────────────────────┤ +│ Usage: nodepass "" │ +├─────────────────────────────────────────────────────────┤ +│ server://password@tunnel/target?log=X&tls=X&crt=X&key=X │ +│ client://password@tunnel/target?log=X&min=X&max=X │ +│ master://host:port/prefix?log=X&tls=X&crt=X&key=X │ +├──────────┬─────────────────────────┬────────────────────┤ +│ Keys │ Values │ Description │ +├──────────┼─────────────────────────┼────────────────────┤ +│ tunnel │ host:port (IP | domain) │ Tunnel address │ +│ target │ host:port (IP | domain) │ Target address │ +│ log │ debug | warn | error │ Default level info │ +│ tls │ 0 off | 1 on | 2 verify │ Default TLS code-0 │ +│ crt │ │ Custom certificate │ +│ key │ │ Custom private key │ +│ min │ │ Min pool capacity │ +│ max │ │ Max pool capacity │ +│ prefix │ │ Master API prefix │ +╰──────────┴─────────────────────────┴────────────────────╯ +`, version, runtime.GOOS, runtime.GOARCH) + os.Exit(1) +} diff --git a/nodepass/docs/en/api.md b/nodepass/docs/en/api.md new file mode 100644 index 0000000000..8852b8957b --- /dev/null +++ b/nodepass/docs/en/api.md @@ -0,0 +1,1127 @@ +# NodePass API Reference + +## Overview + +NodePass offers a RESTful API in Master Mode that enables programmatic control and integration with frontend applications. This section provides comprehensive d // Configure auto-start policy for new instance based on type + if (data.success) { + const shouldAutoStart = config.type === 'server' || config.critical === true; + await setAutoStartPolicy(data.data.id, shouldAutoStart); + }ntation of the API endpoints, integration patterns, and best practices. + +## Master Mode API + +When running NodePass in Master Mode (`master://`), it exposes a REST API that allows frontend applications to: + +1. Create and manage NodePass server and client instances +2. Monitor connection status and statistics +3. Control running instances (start, stop, restart) +4. Configure auto-start policies for automatic instance management +5. Configure behavior through parameters + +### Base URL + +``` +master:///?& +``` + +Where: +- `` is the address specified in the master mode URL (e.g., `0.0.0.0:9090`) +- `` is the optional API prefix (if not specified, `/api` will be used as the prefix) + +### Starting Master Mode + +To start NodePass in Master Mode with default settings: + +```bash +nodepass "master://0.0.0.0:9090?log=info" +``` + +With custom API prefix and TLS enabled: + +```bash +nodepass "master://0.0.0.0:9090/admin?log=info&tls=1" +``` + +### Available Endpoints + +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/instances` | GET | List all NodePass instances | +| `/instances` | POST | Create a new NodePass instance | +| `/instances/{id}` | GET | Get details about a specific instance | +| `/instances/{id}` | PATCH | Update instance state or control operations | +| `/instances/{id}` | PUT | Update instance URL configuration | +| `/instances/{id}` | DELETE | Remove a specific instance | +| `/events` | GET | Subscribe to instance events using SSE | +| `/info` | GET | Get master service information | +| `/openapi.json` | GET | OpenAPI specification | +| `/docs` | GET | Swagger UI documentation | + +### API Authentication + +The Master API now supports API Key authentication to prevent unauthorized access. The system automatically generates an API Key on first startup. + +#### API Key Features + +1. **Automatic Generation**: Created automatically when master mode is first started +2. **Persistent Storage**: The API Key is saved along with other instance configurations in the `nodepass.gob` file +3. **Retention After Restart**: The API Key remains the same after restarting the master +4. **Selective Protection**: Only critical API endpoints are protected, public documentation remains accessible + +#### Protected Endpoints + +The following endpoints require API Key authentication: +- `/instances` (all methods) +- `/instances/{id}` (all methods: GET, PATCH, PUT, DELETE) +- `/events` +- `/info` + +The following endpoints are publicly accessible (no API Key required): +- `/openapi.json` +- `/docs` + +#### How to Use the API Key + +Include the API Key in your API requests: + +```javascript +// Using an API Key for instance management requests +async function getInstances() { + const response = await fetch(`${API_URL}/instances`, { + method: 'GET', + headers: { + 'X-API-Key': 'your-api-key-here' + } + }); + + return await response.json(); +} +``` + +#### How to Get and Regenerate API Key + +The API Key can be found in the system startup logs, and can be regenerated using: + +```javascript +// Regenerate the API Key (requires knowing the current API Key) +async function regenerateApiKey() { + const response = await fetch(`${API_URL}/instances/${apiKeyID}`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': 'current-api-key' + }, + body: JSON.stringify({ action: 'restart' }) + }); + + const result = await response.json(); + return result.url; // The new API Key +} +``` + +**Note**: The API Key ID is fixed as `********` (eight asterisks). In the internal implementation, this is a special instance ID used to store and manage the API Key. + +## Frontend Integration Guidelines + +When integrating NodePass with frontend applications, consider the following important points: + +### Instance Persistence + +NodePass Master Mode now supports instance persistence using the gob serialization format. Instances and their states are saved to a `nodepass.gob` file in the same directory as the executable, and automatically restored when the master restarts. + +Key persistence features: +- Instance configurations are automatically saved to disk +- Instance state (running/stopped) is preserved +- Auto-start policies are preserved across master restarts +- Traffic statistics are retained between restarts +- Instances with auto-start policy enabled will automatically start when master restarts +- No need for manual re-registration after restart + +**Note:** While instance configurations are now persisted, frontend applications should still maintain their own record of instance configurations as a backup strategy. + +### Instance ID Persistence + +With NodePass now using gob format for persistent storage of instance state, instance IDs **no longer change** after a master restart. This means: + +1. Frontend applications can safely use instance IDs as unique identifiers +2. Instance configurations, states, and statistics are automatically restored after restart +3. No need to implement logic for handling instance ID changes + +This greatly simplifies frontend integration by eliminating the previous complexity of handling instance recreation and ID mapping. + +### Auto-start Policy Management + +NodePass now supports configurable auto-start policies for instances, allowing for automatic instance management and improved reliability. The auto-start policy feature enables: + +1. **Automatic Instance Recovery**: Instances with auto-start policy enabled will automatically start when the master service restarts +2. **Selective Auto-start**: Configure which instances should auto-start based on their importance or role +3. **Persistent Policy Storage**: Auto-start policies are saved and restored across master restarts +4. **Fine-grained Control**: Each instance can have its own auto-start policy setting + +#### How Auto-start Policy Works + +- **Policy Assignment**: Each instance has a `restart` boolean field that determines its auto-start behavior +- **Master Startup**: When the master starts, it automatically launches all instances with `restart: true` +- **Policy Persistence**: Auto-start policies are saved in the same `nodepass.gob` file as other instance data +- **Runtime Management**: Auto-start policies can be modified while instances are running + +#### Best Practices for Auto-start Policy + +1. **Enable for Server Instances**: Server instances typically should have auto-start policy enabled for high availability +2. **Selective Client Auto-start**: Enable auto-start policy for critical client connections only +3. **Testing Scenarios**: Disable auto-start policy for temporary or testing instances +4. **Load Balancing**: Use auto-start policies to maintain minimum instance counts for load distribution + +```javascript +// Example: Configure auto-start policies based on instance role +async function configureAutoStartPolicies(instances) { + for (const instance of instances) { + // Enable auto-start for servers and critical clients + const shouldAutoStart = instance.type === 'server' || + instance.tags?.includes('critical'); + + await setAutoStartPolicy(instance.id, shouldAutoStart); + } +} +``` + +### Instance Lifecycle Management + +For proper lifecycle management: + +1. **Creation**: Store instance configurations and URLs + ```javascript + async function createNodePassInstance(config) { + const response = await fetch(`${API_URL}/instances`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + url: `server://0.0.0.0:${config.port}/${config.target}?tls=${config.tls}` + }) + }); + + const data = await response.json(); + + // Configure restart policy for new instance based on type + if (data.success) { + const shouldAutoRestart = config.type === 'server' || config.critical === true; + await setRestartPolicy(data.data.id, shouldAutoRestart); + } + + // Store in frontend persistence + saveInstanceConfig({ + id: data.data.id, + originalConfig: config, + url: data.data.url + }); + + return data; + } + ``` + +2. **Status Monitoring**: Monitor instance state changes + + NodePass provides two methods for monitoring instance status: + + A. **Using SSE (Recommended)**: Receive real-time events via persistent connection + ```javascript + function connectToEventSource() { + const eventSource = new EventSource(`${API_URL}/events`, { + // If authentication is needed, native EventSource doesn't support custom headers + // Need to use fetch API to implement a custom SSE client + }); + + // If using API Key, use custom implementation instead of native EventSource + // Example using native EventSource (for non-protected endpoints) + eventSource.addEventListener('instance', (event) => { + const data = JSON.parse(event.data); + + switch (data.type) { + case 'initial': + console.log('Initial instance state:', data.instance); + updateInstanceUI(data.instance); + break; + case 'create': + console.log('Instance created:', data.instance); + addInstanceToUI(data.instance); + break; + case 'update': + console.log('Instance updated:', data.instance); + updateInstanceUI(data.instance); + break; + case 'delete': + console.log('Instance deleted:', data.instance); + removeInstanceFromUI(data.instance.id); + break; + case 'log': + console.log(`Instance ${data.instance.id} log:`, data.logs); + appendLogToInstanceUI(data.instance.id, data.logs); + break; + case 'shutdown': + console.log('Master service is shutting down'); + // Close the event source and show notification + eventSource.close(); + showShutdownNotification(); + break; + } + }); + + eventSource.addEventListener('error', (error) => { + console.error('SSE connection error:', error); + // Attempt to reconnect after a delay + setTimeout(() => { + eventSource.close(); + connectToEventSource(); + }, 5000); + }); + + return eventSource; + } + + // Example of creating SSE connection with API Key + function connectToEventSourceWithApiKey(apiKey) { + // Native EventSource doesn't support custom headers, need to use fetch API + fetch(`${API_URL}/events`, { + method: 'GET', + headers: { + 'X-API-Key': apiKey, + 'Cache-Control': 'no-cache' + } + }).then(response => { + if (!response.ok) { + throw new Error(`HTTP error: ${response.status}`); + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ''; + + function processStream() { + reader.read().then(({ value, done }) => { + if (done) { + console.log('Connection closed'); + // Try to reconnect + setTimeout(() => connectToEventSourceWithApiKey(apiKey), 5000); + return; + } + + buffer += decoder.decode(value, { stream: true }); + + const lines = buffer.split('\n\n'); + buffer = lines.pop() || ''; + + for (const line of lines) { + if (line.trim() === '') continue; + + const eventMatch = line.match(/^event: (.+)$/m); + const dataMatch = line.match(/^data: (.+)$/m); + + if (eventMatch && dataMatch) { + const data = JSON.parse(dataMatch[1]); + // Process events - see switch code above + } + } + + processStream(); + }).catch(error => { + console.error('Read error:', error); + // Try to reconnect + setTimeout(() => connectToEventSourceWithApiKey(apiKey), 5000); + }); + } + + processStream(); + }).catch(error => { + console.error('Connection error:', error); + // Try to reconnect + setTimeout(() => connectToEventSourceWithApiKey(apiKey), 5000); + }); + } + ``` + + B. **Traditional Polling (Alternative)**: Use in environments where SSE is not supported + ```javascript + function startInstanceMonitoring(instanceId, interval = 5000) { + return setInterval(async () => { + try { + const response = await fetch(`${API_URL}/instances/${instanceId}`); + const data = await response.json(); + + if (data.success) { + updateInstanceStatus(instanceId, data.data.status); + updateInstanceMetrics(instanceId, { + connections: data.data.connections, + pool_size: data.data.pool_size, + uptime: data.data.uptime + }); + } + } catch (error) { + markInstanceUnreachable(instanceId); + } + }, interval); + } + ``` + + **Recommendation:** Prefer the SSE approach as it provides more efficient real-time monitoring and reduces server load. Only use the polling approach for client environments with specific compatibility needs or where SSE is not supported. + +3. **Instance Alias Management**: Set readable names for instances + ```javascript + // Batch set instance aliases + async function setInstanceAliases(instances) { + for (const instance of instances) { + // Generate meaningful aliases based on instance type and purpose + const alias = `${instance.type}-${instance.region || 'default'}-${instance.port || 'auto'}`; + await updateInstanceAlias(instance.id, alias); + } + } + + // Find instance by alias + async function findInstanceByAlias(targetAlias) { + const response = await fetch(`${API_URL}/instances`, { + headers: { 'X-API-Key': apiKey } + }); + const data = await response.json(); + + if (data.success) { + return data.data.find(instance => instance.alias === targetAlias); + } + return null; + } + ``` + +4. **Control Operations**: Start, stop, restart instances + ```javascript + async function controlInstance(instanceId, action) { + // action can be: start, stop, restart + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + method: 'PATCH', // Note: API has been updated to use PATCH instead of PUT + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ action }) + }); + + const data = await response.json(); + return data.success; + } + + // Update instance alias + async function updateInstanceAlias(instanceId, alias) { + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey // If API Key is enabled + }, + body: JSON.stringify({ alias }) + }); + + const data = await response.json(); + return data.success; + } + + // Update instance URL configuration + async function updateInstanceURL(instanceId, newURL) { + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey // If API Key is enabled + }, + body: JSON.stringify({ url: newURL }) + }); + + const data = await response.json(); + return data.success; + } + ``` + +5. **Auto-start Policy Management**: Configure automatic startup behavior + ```javascript + async function setAutoStartPolicy(instanceId, enableAutoStart) { + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + method: 'PATCH', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ restart: enableAutoStart }) + }); + + const data = await response.json(); + return data.success; + } + + // Combined operation: control instance and update auto-start policy + async function controlInstanceWithAutoStart(instanceId, action, enableAutoStart) { + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + method: 'PATCH', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + action: action, + restart: enableAutoStart + }) + }); + + const data = await response.json(); + return data.success; + } + + // Combined operation: update alias, control instance and auto-start policy + async function updateInstanceComplete(instanceId, alias, action, enableAutoStart) { + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + method: 'PATCH', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + alias: alias, + action: action, + restart: enableAutoStart + }) + }); + + const data = await response.json(); + return data.success; + } + ``` + +#### Complete Auto-start Policy Usage Example + +Here's a comprehensive example showing how to implement auto-start policy management in a real-world scenario: + +```javascript +// Scenario: Setting up a load-balanced server cluster with auto-start policies +async function setupServerCluster(serverConfigs) { + const clusterInstances = []; + + for (const config of serverConfigs) { + try { + // Create server instance + const instance = await createNodePassInstance({ + type: 'server', + port: config.port, + target: config.target, + critical: config.isPrimary, // Primary servers are critical + tls: config.enableTLS + }); + + if (instance.success) { + // Set meaningful instance alias + const alias = `${config.role}-server-${config.port}`; + await updateInstanceAlias(instance.data.id, alias); + + // Configure auto-start policy based on server role + const autoStartPolicy = config.isPrimary || config.role === 'essential'; + await setAutoStartPolicy(instance.data.id, autoStartPolicy); + + // Start the instance + await controlInstance(instance.data.id, 'start'); + + clusterInstances.push({ + id: instance.data.id, + alias: alias, + role: config.role, + autoStartEnabled: autoStartPolicy + }); + + console.log(`Server ${alias} created with auto-start policy: ${autoStartPolicy}`); + } + } catch (error) { + console.error(`Failed to create server ${config.role}:`, error); + } + } + + return clusterInstances; +} + +// Monitor cluster health and adjust auto-start policies dynamically +async function monitorClusterHealth(clusterInstances) { + const healthyInstances = []; + + for (const cluster of clusterInstances) { + const instance = await fetch(`${API_URL}/instances/${cluster.id}`); + const data = await instance.json(); + + if (data.success && data.data.status === 'running') { + healthyInstances.push(cluster); + } else { + // If a critical instance is down, enable auto-start for backup instances + if (cluster.role === 'primary') { + await enableBackupInstanceAutoStart(clusterInstances); + } + } + } + + return healthyInstances; +} + +async function enableBackupInstanceAutoStart(clusterInstances) { + const backupInstances = clusterInstances.filter(c => c.role === 'backup'); + for (const backup of backupInstances) { + await setAutoStartPolicy(backup.id, true); + console.log(`Enabled auto-start policy for backup instance: ${backup.id}`); + } +} +``` + +### Real-time Event Monitoring with SSE + +NodePass now supports Server-Sent Events (SSE) for real-time monitoring of instance state changes. This allows frontend applications to receive instant notifications about instance creation, updates, and deletions without polling. + +#### Using the SSE Endpoint + +The SSE endpoint is available at: +``` +GET /events +``` + +This endpoint establishes a persistent connection that delivers events in real-time using the SSE protocol format. + +#### Event Types + +The following event types are supported: + +1. `initial` - Sent when a connection is established, containing the current state of all instances +2. `create` - Sent when a new instance is created +3. `update` - Sent when an instance is updated (status change, start/stop operations) +4. `delete` - Sent when an instance is deleted +5. `shutdown` - Sent when the master service is about to shut down, notifying frontend applications to close their connections +6. `log` - Sent when an instance produces new log content, including the log text + +#### Handling Instance Logs + +The new `log` event type allows for real-time reception and display of instance log output. This is useful for monitoring and debugging: + +```javascript +// Handle log events +function appendLogToInstanceUI(instanceId, logText) { + // Find or create log container + let logContainer = document.getElementById(`logs-${instanceId}`); + if (!logContainer) { + logContainer = document.createElement('div'); + logContainer.id = `logs-${instanceId}`; + document.getElementById('instance-container').appendChild(logContainer); + } + + // Create new log entry + const logEntry = document.createElement('div'); + logEntry.className = 'log-entry'; + + // Can parse ANSI color codes or format logs here + logEntry.textContent = logText; + + // Add to container + logContainer.appendChild(logEntry); + + // Scroll to latest log + logContainer.scrollTop = logContainer.scrollHeight; +} +``` + +When implementing log handling, consider the following best practices: + +1. **Buffer Management**: Limit the number of log entries to prevent memory issues +2. **ANSI Color Parsing**: Parse ANSI color codes in logs for better readability +3. **Filtering Options**: Provide options to filter logs by severity or content +4. **Search Functionality**: Allow users to search within instance logs +5. **Log Persistence**: Optionally save logs to local storage for review after page refresh + +#### JavaScript Client Implementation + +Here's an example of how to consume the SSE endpoint in a JavaScript frontend: + +```javascript +function connectToEventSource() { + const eventSource = new EventSource(`${API_URL}/events`, { + // If authentication is needed, native EventSource doesn't support custom headers + // Need to use fetch API to implement a custom SSE client + }); + + // If using API Key, use custom implementation instead of native EventSource + // Example using native EventSource (for non-protected endpoints) + eventSource.addEventListener('instance', (event) => { + const data = JSON.parse(event.data); + + switch (data.type) { + case 'initial': + console.log('Initial instance state:', data.instance); + updateInstanceUI(data.instance); + break; + case 'create': + console.log('Instance created:', data.instance); + addInstanceToUI(data.instance); + break; + case 'update': + console.log('Instance updated:', data.instance); + updateInstanceUI(data.instance); + break; + case 'delete': + console.log('Instance deleted:', data.instance); + removeInstanceFromUI(data.instance.id); + break; + case 'log': + console.log(`Instance ${data.instance.id} log:`, data.logs); + appendLogToInstanceUI(data.instance.id, data.logs); + break; + case 'shutdown': + console.log('Master service is shutting down'); + // Close the event source and show notification + eventSource.close(); + showShutdownNotification(); + break; + } + }); + + eventSource.addEventListener('error', (error) => { + console.error('SSE connection error:', error); + // Attempt to reconnect after a delay + setTimeout(() => { + eventSource.close(); + connectToEventSource(); + }, 5000); + }); + + return eventSource; +} + +// Example of creating SSE connection with API Key +function connectToEventSourceWithApiKey(apiKey) { + // Native EventSource doesn't support custom headers, need to use fetch API + fetch(`${API_URL}/events`, { + method: 'GET', + headers: { + 'X-API-Key': apiKey, + 'Cache-Control': 'no-cache' + } + }).then(response => { + if (!response.ok) { + throw new Error(`HTTP error: ${response.status}`); + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ''; + + function processStream() { + reader.read().then(({ value, done }) => { + if (done) { + console.log('Connection closed'); + // Try to reconnect + setTimeout(() => connectToEventSourceWithApiKey(apiKey), 5000); + return; + } + + buffer += decoder.decode(value, { stream: true }); + + const lines = buffer.split('\n\n'); + buffer = lines.pop() || ''; + + for (const line of lines) { + if (line.trim() === '') continue; + + const eventMatch = line.match(/^event: (.+)$/m); + const dataMatch = line.match(/^data: (.+)$/m); + + if (eventMatch && dataMatch) { + const data = JSON.parse(dataMatch[1]); + // Process events - see switch code above + } + } + + processStream(); + }).catch(error => { + console.error('Read error:', error); + // Try to reconnect + setTimeout(() => connectToEventSourceWithApiKey(apiKey), 5000); + }); + } + + processStream(); + }).catch(error => { + console.error('Connection error:', error); + // Try to reconnect + setTimeout(() => connectToEventSourceWithApiKey(apiKey), 5000); + }); +} +``` + +#### Benefits of SSE over Polling + +Using SSE for instance monitoring offers several advantages over traditional polling: + +1. **Reduced Latency**: Changes are delivered in real-time +2. **Reduced Server Load**: Eliminates unnecessary polling requests +3. **Bandwidth Efficiency**: Only sends data when changes occur +4. **Native Browser Support**: Built-in browser support without additional libraries +5. **Automatic Reconnection**: Browsers automatically reconnect if the connection is lost + +#### Best Practices for SSE Implementation + +When implementing SSE in your frontend: + +1. **Handle Reconnection**: While browsers attempt to reconnect automatically, implement custom logic for persistent connections +2. **Process Events Efficiently**: Keep event processing fast to avoid UI blocking +3. **Implement Fallback**: For environments where SSE is not supported, implement a polling fallback +4. **Handle Errors**: Properly handle connection errors and disconnects + +### Traffic Statistics + +The Master API provides traffic statistics, but there are important requirements to note: + +1. **Enable Debug Mode**: Traffic statistics are only available when debug mode is enabled. + + ```bash + # Master with debug mode enabled + nodepass master://0.0.0.0:10101?log=debug + ``` + + Without enabling debug mode, traffic statistics will not be collected or returned by the API. + +2. **Basic Traffic Metrics**: NodePass periodically provides cumulative TCP and UDP traffic values in both inbound and outbound directions. The frontend application needs to store and process these values to derive meaningful statistics. + ```javascript + function processTrafficStats(instanceId, currentStats) { + // Store the current timestamp + const timestamp = Date.now(); + + // If we have previous stats for this instance, calculate the difference + if (previousStats[instanceId]) { + const timeDiff = timestamp - previousStats[instanceId].timestamp; + const tcpInDiff = currentStats.tcp_in - previousStats[instanceId].tcp_in; + const tcpOutDiff = currentStats.tcp_out - previousStats[instanceId].tcp_out; + const udpInDiff = currentStats.udp_in - previousStats[instanceId].udp_in; + const udpOutDiff = currentStats.udp_out - previousStats[instanceId].udp_out; + + // Store historical data for graphs + storeTrafficHistory(instanceId, { + timestamp, + tcp_in_rate: tcpInDiff / timeDiff * 1000, // bytes per second + tcp_out_rate: tcpOutDiff / timeDiff * 1000, + udp_in_rate: udpInDiff / timeDiff * 1000, + udp_out_rate: udpOutDiff / timeDiff * 1000 + }); + } + + // Update the previous stats for next calculation + previousStats[instanceId] = { + timestamp, + tcp_in: currentStats.tcp_in, + tcp_out: currentStats.tcp_out, + udp_in: currentStats.udp_in, + udp_out: currentStats.udp_out + }; + } + ``` + +3. **Data Persistence**: Since the API only provides cumulative values, the frontend must implement proper storage and calculation logic + ```javascript + // Example of frontend storage structure for traffic history + const trafficHistory = {}; + + function storeTrafficHistory(instanceId, metrics) { + if (!trafficHistory[instanceId]) { + trafficHistory[instanceId] = { + timestamps: [], + tcp_in_rates: [], + tcp_out_rates: [], + udp_in_rates: [], + udp_out_rates: [] + }; + } + + trafficHistory[instanceId].timestamps.push(metrics.timestamp); + trafficHistory[instanceId].tcp_in_rates.push(metrics.tcp_in_rate); + trafficHistory[instanceId].tcp_out_rates.push(metrics.tcp_out_rate); + trafficHistory[instanceId].udp_in_rates.push(metrics.udp_in_rate); + trafficHistory[instanceId].udp_out_rates.push(metrics.udp_out_rate); + + // Keep history size manageable + const MAX_HISTORY = 1000; + if (trafficHistory[instanceId].timestamps.length > MAX_HISTORY) { + trafficHistory[instanceId].timestamps.shift(); + trafficHistory[instanceId].tcp_in_rates.shift(); + trafficHistory[instanceId].tcp_out_rates.shift(); + trafficHistory[instanceId].udp_in_rates.shift(); + trafficHistory[instanceId].udp_out_rates.shift(); + } + } + ``` + +## Instance Data Structure + +Instance objects in API responses contain the following fields: + +```json +{ + "id": "a1b2c3d4", // Unique instance identifier + "alias": "web-server-01", // Instance alias (optional, for friendly display names) + "type": "server", // Instance type: server or client + "status": "running", // Instance status: running, stopped, or error + "url": "server://...", // Instance configuration URL + "restart": true, // Auto-start policy + "tcprx": 1024, // TCP bytes received + "tcptx": 2048, // TCP bytes transmitted + "udprx": 512, // UDP bytes received + "udptx": 256 // UDP bytes transmitted +} +``` + +**Notes:** +- `alias` field is optional and will be an empty string if not set +- Traffic statistics fields (tcprx, tcptx, udprx, udptx) are only valid when debug mode is enabled +- `restart` field controls the instance's auto-start behavior + +## System Information Endpoint + +The `/info` endpoint provides system information about the NodePass Master service. This endpoint is useful for monitoring, troubleshooting, and verifying system status. + +### Request + +``` +GET /info +``` + +API Key Authentication Required: Yes + +### Response + +The response contains the following system information fields: + +```json +{ + "os": "linux", // Operating system type + "arch": "amd64", // System architecture + "ver": "1.2.0", // NodePass version + "name": "example.com", // Tunnel hostname + "uptime": 11525, // API uptime in seconds + "log": "info", // Log level + "tls": "1", // TLS status + "crt": "/path/to/cert", // Certificate path + "key": "/path/to/key" // Key path +} +``` + +### Usage Example + +```javascript +// Get system information +async function getSystemInfo() { + const response = await fetch(`${API_URL}/info`, { + method: 'GET', + headers: { + 'X-API-Key': apiKey + } + }); + + return await response.json(); +} + +// Display service uptime +function displayServiceUptime() { + getSystemInfo().then(info => { + console.log(`Service uptime: ${info.uptime} seconds`); + // You can also format it for better readability + const hours = Math.floor(info.uptime / 3600); + const minutes = Math.floor((info.uptime % 3600) / 60); + const seconds = info.uptime % 60; + console.log(`Service uptime: ${hours}h ${minutes}m ${seconds}s`); + }); +} +``` + +### Monitoring Best Practices + +- **Regular Polling**: Periodically poll this endpoint to ensure service is running +- **Version Verification**: Check version number after deploying updates +- **Uptime Monitoring**: Monitor uptime to detect unexpected restarts +- **Log Level Verification**: Confirm that the current log level matches expectations + +## API Endpoint Documentation + +For detailed API documentation including request and response examples, please use the built-in Swagger UI documentation available at the `/docs` endpoint. This interactive documentation provides comprehensive information about: + +- Available endpoints +- Required parameters +- Response formats +- Example requests and responses +- Schema definitions + +### Accessing Swagger UI + +To access the Swagger UI documentation: + +``` +http(s)://[]/docs +``` + +For example: +``` +http://localhost:9090/api/docs +``` + +The Swagger UI provides a convenient way to explore and test the API directly in your browser. You can execute API calls against your running NodePass Master instance and see the actual responses. + +## Complete API Reference + +### Instance Management Endpoints Details + +#### GET /instances +- **Description**: Get list of all instances +- **Authentication**: API Key required +- **Response**: Array of instance objects +- **Example**: +```javascript +const instances = await fetch(`${API_URL}/instances`, { + headers: { 'X-API-Key': apiKey } +}); +``` + +#### POST /instances +- **Description**: Create new instance +- **Authentication**: API Key required +- **Request Body**: `{ "url": "client:// or server:// format URL" }` +- **Response**: Newly created instance object +- **Example**: +```javascript +const newInstance = await fetch(`${API_URL}/instances`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey + }, + body: JSON.stringify({ url: 'server://0.0.0.0:8080/localhost:3000' }) +}); +``` + +#### GET /instances/{id} +- **Description**: Get specific instance details +- **Authentication**: API Key required +- **Response**: Instance object +- **Example**: +```javascript +const instance = await fetch(`${API_URL}/instances/abc123`, { + headers: { 'X-API-Key': apiKey } +}); +``` + +#### PATCH /instances/{id} +- **Description**: Update instance state, alias, or perform control actions +- **Authentication**: API Key required +- **Request Body**: `{ "alias": "new alias", "action": "start|stop|restart|reset", "restart": true|false }` +- **Note**: Only specified fields are updated without interrupting running instances. `action: "reset"` will clear the traffic statistics (tcprx, tcptx, udprx, udptx) for the instance. +- **Example**: +```javascript +// Update alias and auto-start policy +await fetch(`${API_URL}/instances/abc123`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey + }, + body: JSON.stringify({ + alias: 'Web Server', + restart: true + }) +}); + +// Control instance operations +await fetch(`${API_URL}/instances/abc123`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey + }, + body: JSON.stringify({ action: 'restart' }) +}); + +// Clear traffic statistics +await fetch(`${API_URL}/instances/abc123`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey + }, + body: JSON.stringify({ action: 'reset' }) +}); +``` + +#### PUT /instances/{id} +- **Description**: Fully update the instance URL configuration +- **Authentication**: API Key required +- **Request Body**: `{ "url": "new client:// or server:// style URL" }` +- **Note**: The instance will be restarted. +- **Restriction**: API Key instance (ID `********`) does not support this operation +- **Example**: +```javascript +// Update instance URL +await fetch(`${API_URL}/instances/abc123`, { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey + }, + body: JSON.stringify({ + url: 'server://0.0.0.0:9090/localhost:8080?tls=1' + }) +}); +``` + +#### DELETE /instances/{id} +- **Description**: Delete instance +- **Authentication**: API Key required +- **Response**: 204 No Content +- **Restrictions**: API Key instance (ID `********`) cannot be deleted +- **Example**: +```javascript +await fetch(`${API_URL}/instances/abc123`, { + method: 'DELETE', + headers: { 'X-API-Key': apiKey } +}); +``` + +### Other Endpoints + +#### GET /events +- **Description**: Establish SSE connection to receive real-time events +- **Authentication**: API Key required +- **Response**: Server-Sent Events stream +- **Event Types**: `initial`, `create`, `update`, `delete`, `shutdown`, `log` + +#### GET /info +- **Description**: Get master service information +- **Authentication**: API Key required +- **Response**: Contains system info, version, uptime, etc. + +#### GET /openapi.json +- **Description**: Get OpenAPI 3.1.1 specification +- **Authentication**: No authentication required +- **Response**: JSON formatted API specification + +#### GET /docs +- **Description**: Swagger UI documentation interface +- **Authentication**: No authentication required +- **Response**: HTML formatted interactive documentation + +### Instance URL Format Specification + +Instance URLs must follow these formats: + +#### Server Mode +``` +server://:/:? +``` + +Examples: +- `server://0.0.0.0:8080/localhost:3000` - Listen on port 8080, forward to local port 3000 +- `server://0.0.0.0:9090/localhost:8080?tls=1` - Server with TLS enabled + +#### Client Mode +``` +client://:/:? +``` + +Examples: +- `client://example.com:8080/localhost:3000` - Connect to remote server, listen locally on port 3000 +- `client://vpn.example.com:443/localhost:22?tls=1` - Connect to VPN server via TLS + +#### Supported Parameters + +| Parameter | Description | Values | Default | +|-----------|-------------|---------|---------| +| `tls` | TLS encryption level | `0`(none), `1`(self-signed), `2`(certificate) | `0` | +| `log` | Log level | `trace`, `debug`, `info`, `warn`, `error` | `info` | +| `crt` | Certificate path | File path | None | +| `key` | Private key path | File path | None | diff --git a/nodepass/docs/en/configuration.md b/nodepass/docs/en/configuration.md new file mode 100644 index 0000000000..91b0f93e2f --- /dev/null +++ b/nodepass/docs/en/configuration.md @@ -0,0 +1,226 @@ +# Configuration Options + +NodePass uses a minimalist approach to configuration, with all settings specified via command-line parameters and environment variables. This guide explains all available configuration options and provides recommendations for various deployment scenarios. + +## Log Levels + +NodePass provides five log verbosity levels that control the amount of information displayed: + +- `debug`: Verbose debugging information - shows all operations and connections +- `info`: General operational information (default) - shows startup, shutdown, and key events +- `warn`: Warning conditions - only shows potential issues that don't affect core functionality +- `error`: Error conditions - shows only problems that affect functionality +- `event`: Event recording - shows important operational events and traffic statistics + +You can set the log level in the command URL: + +```bash +nodepass server://0.0.0.0:10101/0.0.0.0:8080?log=debug +``` + +## TLS Encryption Modes + +For server and master modes, NodePass offers three TLS security levels for data channels: + +- **Mode 0**: No TLS encryption (plain TCP/UDP) + - Fastest performance, no overhead + - No security for data channel (only use in trusted networks) + +- **Mode 1**: Self-signed certificate (automatically generated) + - Good security with minimal setup + - Certificate is automatically generated and not verified + - Protects against passive eavesdropping + +- **Mode 2**: Custom certificate (requires `crt` and `key` parameters) + - Highest security with certificate validation + - Requires providing certificate and key files + - Suitable for production environments + +Example with TLS Mode 1 (self-signed): +```bash +nodepass server://0.0.0.0:10101/0.0.0.0:8080?tls=1 +``` + +Example with TLS Mode 2 (custom certificate): +```bash +nodepass "server://0.0.0.0:10101/0.0.0.0:8080?tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem" +``` + +## Connection Pool Capacity Parameters + +Connection pool capacity can be configured via URL query parameters: + +- `min`: Minimum connection pool capacity (default: 64) +- `max`: Maximum connection pool capacity (default: 8192) + +Example: +```bash +# Set minimum pool to 32 and maximum to 4096 +nodepass "client://server.example.com:10101/127.0.0.1:8080?min=32&max=4096" +``` + +## Environment Variables + +NodePass behavior can be fine-tuned using environment variables. Below is the complete list of available variables with their descriptions, default values, and recommended settings for different scenarios. + +| Variable | Description | Default | Example | +|----------|-------------|---------|---------| +| `NP_SEMAPHORE_LIMIT` | Maximum number of concurrent connections | 1024 | `export NP_SEMAPHORE_LIMIT=2048` | +| `NP_UDP_DATA_BUF_SIZE` | Buffer size for UDP packets | 8192 | `export NP_UDP_DATA_BUF_SIZE=16384` | +| `NP_UDP_READ_TIMEOUT` | Timeout for UDP read operations | 20s | `export NP_UDP_READ_TIMEOUT=30s` | +| `NP_UDP_DIAL_TIMEOUT` | Timeout for establishing UDP connections | 20s | `export NP_UDP_DIAL_TIMEOUT=30s` | +| `NP_TCP_READ_TIMEOUT` | Timeout for TCP read operations | 20s | `export NP_TCP_READ_TIMEOUT=30s` | +| `NP_TCP_DIAL_TIMEOUT` | Timeout for establishing TCP connections | 20s | `export NP_TCP_DIAL_TIMEOUT=30s` | +| `NP_MIN_POOL_INTERVAL` | Minimum interval between connection creations | 1s | `export NP_MIN_POOL_INTERVAL=500ms` | +| `NP_MAX_POOL_INTERVAL` | Maximum interval between connection creations | 5s | `export NP_MAX_POOL_INTERVAL=3s` | +| `NP_REPORT_INTERVAL` | Interval for health check reports | 5s | `export NP_REPORT_INTERVAL=10s` | +| `NP_SERVICE_COOLDOWN` | Cooldown period before restart attempts | 3s | `export NP_SERVICE_COOLDOWN=5s` | +| `NP_SHUTDOWN_TIMEOUT` | Timeout for graceful shutdown | 5s | `export NP_SHUTDOWN_TIMEOUT=10s` | +| `NP_RELOAD_INTERVAL` | Interval for cert/pool reload | 1h | `export NP_RELOAD_INTERVAL=30m` | + +### Connection Pool Tuning + +The connection pool parameters are important settings for performance tuning: + +#### Pool Capacity Settings + +- `min` (URL parameter): Ensures a minimum number of available connections + - Too low: Increased latency during traffic spikes as new connections must be established + - Too high: Wasted resources maintaining idle connections + - Recommended starting point: 25-50% of your average concurrent connections + +- `max` (URL parameter): Prevents excessive resource consumption while handling peak loads + - Too low: Connection failures during traffic spikes + - Too high: Potential resource exhaustion affecting system stability + - Recommended starting point: 150-200% of your peak concurrent connections + +#### Pool Interval Settings + +- `NP_MIN_POOL_INTERVAL`: Controls the minimum time between connection creation attempts + - Too low: May overwhelm network with connection attempts + - Recommended range: 500ms-2s depending on network latency + +- `NP_MAX_POOL_INTERVAL`: Controls the maximum time between connection creation attempts + - Too high: May result in pool depletion during traffic spikes + - Recommended range: 3s-10s depending on expected traffic patterns + +#### Connection Management + +- `NP_SEMAPHORE_LIMIT`: Controls the maximum number of concurrent tunnel operations + - Too low: Rejected connections during traffic spikes + - Too high: Potential memory pressure from too many concurrent goroutines + - Recommended range: 1000-5000 for most applications, higher for high-throughput scenarios + +### UDP Settings + +For applications relying heavily on UDP traffic: + +- `NP_UDP_DATA_BUF_SIZE`: Buffer size for UDP packets + - Increase for applications sending large UDP packets + - Default (8192) works well for most cases + - Consider increasing to 16384 or higher for media streaming or game servers + +- `NP_UDP_READ_TIMEOUT`: Timeout for UDP read operations + - Increase for high-latency networks or applications with slow response times + - Decrease for low-latency applications requiring quick failover + +- `NP_UDP_DIAL_TIMEOUT`: Timeout for establishing UDP connections + - Increase for high-latency networks or applications with slow response times + - Decrease for low-latency applications requiring quick failover + +### TCP Settings + +For optimizing TCP connections: + +- `NP_TCP_READ_TIMEOUT`: Timeout for TCP read operations + - Increase for high-latency networks or servers with slow response times + - Decrease for applications that need to detect disconnections quickly + - Affects wait time during data transfer phases + +- `NP_TCP_DIAL_TIMEOUT`: Timeout for establishing TCP connections + - Increase for unstable network conditions + - Decrease for applications that need quick connection success/failure determination + - Affects initial connection establishment phase + +### Service Management Settings + +- `NP_REPORT_INTERVAL`: Controls how frequently health status is reported + - Lower values provide more frequent updates but increase log volume + - Higher values reduce log output but provide less immediate visibility + +- `NP_RELOAD_INTERVAL`: Controls how frequently TLS certificates are checked for changes + - Lower values detect certificate changes faster but increase file system operations + - Higher values reduce overhead but delay detection of certificate updates + +- `NP_SERVICE_COOLDOWN`: Time to wait before attempting service restarts + - Lower values attempt recovery faster but might cause thrashing in case of persistent issues + - Higher values provide more stability but slower recovery from transient issues + +- `NP_SHUTDOWN_TIMEOUT`: Maximum time to wait for connections to close during shutdown + - Lower values ensure quicker shutdown but may interrupt active connections + - Higher values allow more time for connections to complete but delay shutdown + +## Configuration Profiles + +Here are some recommended environment variable configurations for common scenarios: + +### High-Throughput Configuration + +For applications requiring maximum throughput (e.g., media streaming, file transfers): + +URL parameters: +```bash +nodepass "client://server.example.com:10101/127.0.0.1:8080?min=128&max=8192" +``` + +Environment variables: +```bash +export NP_MIN_POOL_INTERVAL=500ms +export NP_MAX_POOL_INTERVAL=3s +export NP_SEMAPHORE_LIMIT=8192 +export NP_UDP_DATA_BUF_SIZE=32768 +export NP_REPORT_INTERVAL=10s +``` + +### Low-Latency Configuration + +For applications requiring minimal latency (e.g., gaming, financial trading): + +URL parameters: +```bash +nodepass "client://server.example.com:10101/127.0.0.1:8080?min=256&max=4096" +``` + +Environment variables: +```bash +export NP_MIN_POOL_INTERVAL=100ms +export NP_MAX_POOL_INTERVAL=1s +export NP_SEMAPHORE_LIMIT=4096 +export NP_UDP_READ_TIMEOUT=5s +export NP_REPORT_INTERVAL=1s +``` + +### Resource-Constrained Configuration + +For deployment on systems with limited resources (e.g., IoT devices, small VPS): + +URL parameters: +```bash +nodepass "client://server.example.com:10101/127.0.0.1:8080?min=16&max=512" +``` + +Environment variables: +```bash +export NP_MIN_POOL_INTERVAL=2s +export NP_MAX_POOL_INTERVAL=10s +export NP_SEMAPHORE_LIMIT=512 +export NP_REPORT_INTERVAL=30s +export NP_SHUTDOWN_TIMEOUT=3s +``` + +## Next Steps + +- See [usage instructions](/docs/en/usage.md) for basic operational commands +- Explore [examples](/docs/en/examples.md) to understand deployment patterns +- Learn about [how NodePass works](/docs/en/how-it-works.md) to optimize your configuration +- Check the [troubleshooting guide](/docs/en/troubleshooting.md) if you encounter issues \ No newline at end of file diff --git a/nodepass/docs/en/examples.md b/nodepass/docs/en/examples.md new file mode 100644 index 0000000000..2af5796ece --- /dev/null +++ b/nodepass/docs/en/examples.md @@ -0,0 +1,261 @@ +# Usage Examples + +This page provides practical examples of NodePass in various deployment scenarios. These examples cover common use cases and can be adapted to suit your specific requirements. + +## Basic Server Setup with TLS Options + +### Example 1: No TLS Encryption + +When speed is more important than security (e.g., in trusted networks): + +```bash +nodepass "server://0.0.0.0:10101/127.0.0.1:8080?log=debug&tls=0" +``` + +This starts a NodePass server that: +- Listens for tunnel connections on all interfaces, port 10101 +- Forwards traffic to localhost:8080 +- Uses debug logging for detailed information +- Uses no encryption for data channels (fastest performance) + +### Example 2: Self-Signed Certificate + +For balanced security and ease of setup (recommended for most cases): + +```bash +nodepass "server://0.0.0.0:10101/127.0.0.1:8080?log=debug&tls=1" +``` + +This configuration: +- Automatically generates a self-signed certificate +- Provides encryption without requiring certificate management +- Protects data traffic from passive eavesdropping +- Works well for internal or testing environments + +### Example 3: Custom Domain Certificate + +For production environments requiring verified certificates: + +```bash +nodepass "server://0.0.0.0:10101/127.0.0.1:8080?log=debug&tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem" +``` + +This setup: +- Uses your provided TLS certificate and private key +- Offers the highest security level with certificate validation +- Is ideal for production environments and public-facing services +- Allows clients to verify the server's identity + +## Connecting to a NodePass Server + +### Example 4: Basic Client Connection + +Connect to a NodePass server with default settings: + +```bash +nodepass client://server.example.com:10101/127.0.0.1:8080 +``` + +This client: +- Connects to the NodePass server at server.example.com:10101 +- Forwards received traffic to localhost:8080 +- Automatically adopts the server's TLS security policy +- Uses the default info log level + +### Example 5: Client with Debug Logging + +For troubleshooting connection issues: + +```bash +nodepass client://server.example.com:10101/127.0.0.1:8080?log=debug +``` + +This enables verbose output to help identify: +- Connection establishment issues +- Signal processing +- Data transfer details +- Error conditions + +## Database Access Through Firewall + +### Example 6: Database Tunneling + +Enable secure access to a database server behind a firewall: + +```bash +# Server side (outside secured network) with TLS encryption +nodepass server://:10101/127.0.0.1:5432?tls=1 + +# Client side (inside the firewall) +nodepass client://server.example.com:10101/127.0.0.1:5432 +``` + +This configuration: +- Creates an encrypted tunnel to a PostgreSQL database (port 5432) +- Allows secure access to the database without exposing it directly to the internet +- Encrypts all database traffic with a self-signed certificate +- Maps the remote database to appear as a local service on the client side + +## Secure Microservice Communication + +### Example 7: Service-to-Service Communication + +Enable secure communication between microservices: + +```bash +# Service A (consuming API) with custom certificate +nodepass "server://0.0.0.0:10101/127.0.0.1:8081?log=warn&tls=2&crt=/path/to/service-a.crt&key=/path/to/service-a.key" + +# Service B (providing API) +nodepass client://service-a:10101/127.0.0.1:8082 +``` + +This setup: +- Creates a secure channel between two microservices +- Uses a custom certificate for service identity verification +- Limits logging to warnings and errors only +- Maps service A's API to appear as a local service on service B + +## IoT Device Management + +### Example 8: IoT Gateway + +Create a central access point for IoT devices: + +```bash +# Central management server +nodepass "server://0.0.0.0:10101/127.0.0.1:8888?log=info&tls=1" + +# IoT device +nodepass client://mgmt.example.com:10101/127.0.0.1:80 +``` + +This configuration: +- Enables secure connections from distributed IoT devices to a central server +- Uses self-signed certificates for adequate security +- Allows embedded devices to expose their local web interfaces securely +- Centralizes device management through a single endpoint + +## Multi-environment Development + +### Example 9: Development Environment Access + +Access different development environments through tunnels: + +```bash +# Production API access tunnel +nodepass client://tunnel.example.com:10101/127.0.0.1:3443 + +# Development environment +nodepass server://tunnel.example.com:10101/127.0.0.1:3000 + +# Testing environment +nodepass "server://tunnel.example.com:10101/127.0.0.1:3001?log=warn&tls=1" +``` + +This setup: +- Creates secure access to multiple environments (production, development, testing) +- Uses different levels of logging based on environment sensitivity +- Enables developers to access environments without direct network exposure +- Maps remote services to different local ports for easy identification + +## Container Deployment + +### Example 10: Containerized NodePass + +Deploy NodePass in a Docker environment: + +```bash +# Create a network for the containers +docker network create nodepass-net + +# Deploy NodePass server with self-signed certificate +docker run -d --name nodepass-server \ + --network nodepass-net \ + -p 10101:10101 \ + ghcr.io/yosebyte/nodepass "server://0.0.0.0:10101/web-service:80?log=info&tls=1" + +# Deploy a web service as target +docker run -d --name web-service \ + --network nodepass-net \ + nginx:alpine + +# Deploy NodePass client +docker run -d --name nodepass-client \ + -p 8080:8080 \ + ghcr.io/yosebyte/nodepass client://nodepass-server:10101/127.0.0.1:8080?log=info + +# Access the web service via http://localhost:8080 +``` + +This configuration: +- Creates a containerized tunnel between services +- Uses Docker networking to connect containers +- Exposes only necessary ports to the host +- Provides secure access to an internal web service + +## Master API Management + +### Example 11: Centralized Management + +Set up a central controller for multiple NodePass instances: + +```bash +# Start the master API service with self-signed certificate +nodepass "master://0.0.0.0:9090?log=info&tls=1" +``` + +You can then manage instances via API calls: + +```bash +# Create a server instance +curl -X POST http://localhost:9090/api/v1/instances \ + -H "Content-Type: application/json" \ + -d '{"url":"server://0.0.0.0:10101/0.0.0.0:8080?tls=1"}' + +# Create a client instance +curl -X POST http://localhost:9090/api/v1/instances \ + -H "Content-Type: application/json" \ + -d '{"url":"client://localhost:10101/127.0.0.1:8081"}' + +# List all running instances +curl http://localhost:9090/api/v1/instances + +# Control an instance (replace {id} with actual instance ID) +curl -X PUT http://localhost:9090/api/v1/instances/{id} \ + -H "Content-Type: application/json" \ + -d '{"action":"restart"}' +``` + +This setup: +- Provides a central management interface for all NodePass instances +- Allows dynamic creation and control of tunnels +- Offers a RESTful API for automation and integration +- Includes a built-in Swagger UI at http://localhost:9090/api/v1/docs + +### Example 12: Custom API Prefix + +Use a custom API prefix for the master mode: + +```bash +# Start with custom API prefix +nodepass "master://0.0.0.0:9090/admin?log=info&tls=1" + +# Create an instance using the custom prefix +curl -X POST http://localhost:9090/admin/v1/instances \ + -H "Content-Type: application/json" \ + -d '{"url":"server://0.0.0.0:10101/0.0.0.0:8080?tls=1"}' +``` + +This allows: +- Integration with existing API gateways +- Custom URL paths for security or organizational purposes +- Swagger UI access at http://localhost:9090/admin/v1/docs + +## Next Steps + +Now that you've seen various usage examples, you might want to: + +- Learn about [configuration options](/docs/en/configuration.md) for fine-tuning +- Understand [how NodePass works](/docs/en/how-it-works.md) under the hood +- Check the [troubleshooting guide](/docs/en/troubleshooting.md) for common issues \ No newline at end of file diff --git a/nodepass/docs/en/how-it-works.md b/nodepass/docs/en/how-it-works.md new file mode 100644 index 0000000000..fe8f36b7b5 --- /dev/null +++ b/nodepass/docs/en/how-it-works.md @@ -0,0 +1,313 @@ +# How NodePass Works + +This page explains the internal architecture and data flow mechanisms of NodePass, providing insights into how the different components interact to create efficient and secure tunnels. + +## Architecture Overview + +NodePass creates a network architecture with separate channels for control and data: + +1. **Control Channel (Tunnel)**: + - Unencrypted TCP connection between client and server + - Used exclusively for signaling and coordination + - Maintains persistent connection for the lifetime of the tunnel + +2. **Data Channel (Target)**: + - Configurable TLS encryption options: + - **Mode 0**: Unencrypted data transfer (fastest, least secure) + - **Mode 1**: Self-signed certificate encryption (good security, no verification) + - **Mode 2**: Verified certificate encryption (highest security, requires valid certificates) + - Created on-demand for each connection or datagram + - Used for actual application data transfer + +3. **Server Mode Operation**: + - Listens for control connections on the tunnel endpoint + - When traffic arrives at the target endpoint, signals the client via the control channel + - Establishes data channels with the specified TLS mode when needed + - Supports bidirectional data flow: connections can be initiated from either server or client side + +4. **Client Mode Operation**: + - Connects to the server's control channel + - Listens for signals indicating incoming connections + - Creates data connections using the TLS security level specified by the server + - Forwards data between the secure channel and local target + - Supports bidirectional data flow: data flow direction is automatically selected based on target address + +5. **Client Single-End Forwarding Mode**: + - Automatically enabled when tunnel address is a local address (e.g., 127.0.0.1) + - Client directly listens on local port without server control channel coordination + - Uses connection pooling technology for TCP connections to significantly improve forwarding performance + - Suitable for pure local forwarding scenarios, reducing network overhead and latency + - Supports high-performance single-end forwarding for both TCP and UDP protocols + +5. **Protocol Support**: + - **TCP**: Full bidirectional streaming with persistent connections, supports connection pool optimization in client single-end forwarding mode + - **UDP**: Datagram forwarding with configurable buffer sizes and timeouts + +## Data Transmission Flow + +NodePass establishes a bidirectional data flow through its tunnel architecture, supporting both TCP and UDP protocols. The system supports three data flow modes: + +### Data Flow Mode Explanation +- **Server Receives Mode (dataFlow: "-")**: Server listens on target address, client listens locally, data flows from target address to client local +- **Server Sends Mode (dataFlow: "+")**: Server connects to remote target address, client listens locally, data flows from client local to remote target +- **Client Single-End Forwarding Mode**: Client directly listens locally and forwards to target address without server coordination, using connection pooling technology for high-performance forwarding + +The data flow mode is automatically determined based on tunnel address and target address: +- If tunnel address is a local address (localhost, 127.0.0.1, etc.), enables Client Single-End Forwarding Mode +- If target address is a local address, uses Server Receives Mode +- If target address is a remote address, uses Server Sends Mode + +### Server-Side Flow (Server Receives Mode) +1. **Connection Initiation**: + ``` + [Target Client] → [Target Listener] → [Server: Target Connection Created] + ``` + - For TCP: Client establishes persistent connection to target listener + - For UDP: Server receives datagrams on UDP socket bound to target address + +2. **Signal Generation**: + ``` + [Server] → [Generate Unique Connection ID] → [Signal Client via Unencrypted TCP Tunnel] + ``` + - For TCP: Generates a `//#1` signal + - For UDP: Generates a `//#2` signal + +3. **Connection Preparation**: + ``` + [Server] → [Create Remote Connection in Pool with Configured TLS Mode] → [Wait for Client Connection] + ``` + - Both protocols use the same connection pool mechanism with unique connection IDs + - TLS configuration applied based on the specified mode (0, 1, or 2) + +4. **Data Exchange**: + ``` + [Target Connection] ⟷ [Exchange/Transfer] ⟷ [Remote Connection] + ``` + - For TCP: Uses `conn.DataExchange()` for continuous bidirectional data streaming + - For UDP: Individual datagrams are forwarded with configurable buffer sizes + +### Client-Side Flow +1. **Signal Reception**: + ``` + [Client] → [Read Signal from TCP Tunnel] → [Parse Connection ID] + ``` + - Client differentiates between TCP and UDP signals based on URL scheme + +2. **Connection Establishment**: + ``` + [Client] → [Retrieve Connection from Pool] → [Connect to Remote Endpoint] + ``` + - Connection management is protocol-agnostic at this stage + +3. **Local Connection**: + ``` + [Client] → [Connect to Local Target] → [Establish Local Connection] + ``` + - For TCP: Establishes persistent TCP connection to local target + - For UDP: Creates UDP socket for datagram exchange with local target + +4. **Data Exchange**: + ``` + [Remote Connection] ⟷ [Exchange/Transfer] ⟷ [Local Target Connection] + ``` + - For TCP: Uses `conn.DataExchange()` for continuous bidirectional data streaming + - For UDP: Reads single datagram, forwards it, waits for response with timeout, then returns response + +### Client Single-End Forwarding Flow +1. **Mode Detection**: + ``` + [Client] → [Detect Tunnel Address as Local Address] → [Enable Single-End Forwarding Mode] + ``` + - Automatically detects if tunnel address is localhost, 127.0.0.1, or other local addresses + - Enables single-end forwarding mode, skipping server control channel establishment + +2. **Local Listening**: + ``` + [Client] → [Start Listener on Tunnel Port] → [Wait for Local Connections] + ``` + - Directly starts TCP or UDP listener on specified tunnel port + - No need to connect to remote server, achieving zero-latency startup + +3. **Connection Pool Initialization** (TCP Only): + ``` + [Client] → [Initialize Target Connection Pool] → [Pre-establish Connections to Target Address] + ``` + - Creates high-performance connection pool for TCP forwarding + - Pre-establishes multiple connections to target address, significantly reducing connection establishment latency + - Connection pool size can be dynamically adjusted based on concurrent demand + +4. **High-Performance Forwarding**: + ``` + [Local Connection] → [Get Target Connection from Pool] → [Direct Data Exchange] → [Connection Reuse or Release] + ``` + - For TCP: Quickly gets pre-established target connection from pool for efficient data exchange + - For UDP: Directly forwards datagrams to target address without connection pool + - Optimized data path minimizing forwarding overhead and latency + +### Protocol-Specific Characteristics +- **TCP Exchange**: + - Persistent connections for full-duplex communication + - Continuous data streaming until connection termination + - Error handling with automatic reconnection + - **Client Single-End Forwarding Optimization**: Pre-established connections through connection pooling technology, significantly reducing connection establishment latency + +- **UDP Exchange**: + - One-time datagram forwarding with configurable buffer sizes (`UDP_DATA_BUF_SIZE`) + - Read timeout control for response waiting (`UDP_READ_TIMEOUT`) + - Optimized for low-latency, stateless communications + - **Client Single-End Forwarding Optimization**: Direct forwarding mechanism without connection pool, achieving minimal latency + +## Signal Communication Mechanism + +NodePass uses a sophisticated URL-based signaling protocol through the TCP tunnel: + +### Signal Types +1. **Tunnel Signal**: + - Format: `#` + - Purpose: Informs the client about the tls code + - Timing: Sent on tunnel handshake + +2. **TCP Launch Signal**: + - Format: `//#1` + - Purpose: Requests the client to establish a TCP connection for a specific ID + - Timing: Sent when a new TCP connection to the target service is received + +3. **UDP Launch Signal**: + - Format: `//#2` + - Purpose: Requests the client to handle UDP traffic for a specific ID + - Timing: Sent when UDP data is received on the target port + +### Signal Flow +1. **Signal Generation**: + - Server creates URL-formatted signals for specific events + - Signal is terminated with a newline character for proper parsing + +2. **Signal Transmission**: + - Server writes signals to the TCP tunnel connection + - Uses a mutex to prevent concurrent writes to the tunnel + +3. **Signal Reception**: + - Client uses a buffered reader to read signals from the tunnel + - Signals are trimmed and parsed into URL format + +4. **Signal Processing**: + - Client places valid signals in a buffered channel (signalChan) + - A dedicated goroutine processes signals from the channel + - Semaphore pattern prevents signal overflow + +5. **Signal Execution**: + - Remote signals update the client's remote address configuration + - Launch signals trigger the `clientOnce()` method to establish connections + +### Signal Resilience +- Buffered channel with configurable capacity prevents signal loss during high load +- Semaphore implementation ensures controlled concurrency +- Error handling for malformed or unexpected signals + +## Connection Pool Architecture + +NodePass implements an efficient connection pooling system for managing network connections: + +### Pool Design +1. **Pool Types**: + - **Client Pool**: Pre-establishes connections to the remote endpoint + - **Server Pool**: Manages incoming connections from clients + +2. **Pool Components**: + - **Connection Storage**: Thread-safe map of connection IDs to net.Conn objects + - **ID Channel**: Buffered channel for available connection IDs + - **Capacity Management**: Dynamic adjustment based on usage patterns + - **Interval Control**: Time-based throttling between connection creations + - **Connection Factory**: Customizable connection creation function + +### Connection Lifecycle +1. **Connection Creation**: + - Connections are created up to the configured capacity + - Each connection is assigned a unique ID + - IDs and connections are stored in the pool + +2. **Connection Acquisition**: + - Client retrieves connections using connection IDs + - Server retrieves the next available connection from the pool + - Connections are validated before being returned + +3. **Connection Usage**: + - Connection is removed from the pool when acquired + - Used for data exchange between endpoints + - No connection reuse (one-time use model) + +4. **Connection Termination**: + - Connections are closed after use + - Resources are properly released + - Error handling ensures clean termination + +### Pool Management +1. **Capacity Control**: + - `MIN_POOL_CAPACITY`: Ensures minimum available connections + - `MAX_POOL_CAPACITY`: Prevents excessive resource consumption + - Dynamic scaling based on demand patterns + +2. **Interval Control**: + - `MIN_POOL_INTERVAL`: Minimum time between connection creation attempts + - `MAX_POOL_INTERVAL`: Maximum time between connection creation attempts + - Adaptive time-based throttling to optimize resource usage + +3. **Dynamic Pool Adaptation**: + The connection pool employs a dual-adaptive mechanism to ensure optimal performance: + + **A. Capacity Adjustment** + - Pool capacity dynamically adjusts based on real-time usage patterns + - If connection creation success rate is low (<20%), capacity decreases to minimize resource waste + - If connection creation success rate is high (>80%), capacity increases to accommodate higher traffic + - Gradual scaling prevents oscillation and provides stability + - Respects configured minimum and maximum capacity boundaries + + **B. Interval Adjustment** + - Creation intervals adapt based on pool idle connection count + - When idle connections are low (<20% of capacity), intervals decrease toward min interval + - When idle connections are high (>80% of capacity), intervals increase toward max interval + - Prevents overwhelming network resources during periods of low demand + - Accelerates connection creation during high demand periods when pool is depleting + +## Master API Architecture + +In master mode, NodePass provides a RESTful API for centralized management: + +### API Components +1. **HTTP/HTTPS Server**: + - Listens on configured address and port + - Optional TLS encryption with same modes as tunnel server + - Configurable API prefix path + +2. **Instance Management**: + - In-memory registry of NodePass instances + - UID-based instance identification + - State tracking for each instance (running, stopped, etc.) + +3. **RESTful Endpoints**: + - Standard CRUD operations for instances + - Instance control actions (start, stop, restart) + - Health status reporting + - OpenAPI specification for API documentation + +### Instance Lifecycle Management +1. **Instance Creation**: + - URL-based configuration similar to command line + - Dynamic initialization based on instance type + - Parameter validation before instance creation + +2. **Instance Control**: + - Start/stop/restart capabilities + - Graceful shutdown with configurable timeout + - Resource cleanup on termination + +3. **API Security**: + - TLS encryption options for API connections + - Same security modes as tunnel server + - Certificate management for HTTPS + +## Next Steps + +- For practical examples of deploying NodePass, see the [examples page](/docs/en/examples.md) +- To fine-tune NodePass for your specific needs, explore the [configuration options](/docs/en/configuration.md) +- If you encounter any issues, check the [troubleshooting guide](/docs/en/troubleshooting.md) \ No newline at end of file diff --git a/nodepass/docs/en/installation.md b/nodepass/docs/en/installation.md new file mode 100644 index 0000000000..8b440d717b --- /dev/null +++ b/nodepass/docs/en/installation.md @@ -0,0 +1,111 @@ +# Installation Guide + +This guide provides detailed instructions for installing NodePass using different methods. Choose the option that best suits your environment and requirements. + +## System Requirements + +- Go 1.24 or higher (for building from source) +- Network connectivity between server and client endpoints +- Admin privileges may be required for binding to ports below 1024 + +## Installation Options + +### Option 1: Pre-built Binaries + +The easiest way to get started with NodePass is to download a pre-built binary for your platform. + +1. Visit the [releases page](https://github.com/yosebyte/nodepass/releases) on GitHub +2. Download the appropriate binary for your operating system (Windows, macOS, Linux) +3. Extract the archive if necessary +4. Make the binary executable (Linux/macOS): + ```bash + chmod +x nodepass + ``` +5. Move the binary to a location in your PATH: + - Linux/macOS: `sudo mv nodepass /usr/local/bin/` + - Windows: Add the location to your PATH environment variable + +### Option 2: Using Go Install + +If you have Go installed on your system, you can use the `go install` command: + +```bash +go install github.com/yosebyte/nodepass/cmd/nodepass@latest +``` + +This command downloads the source code, compiles it, and installs the binary in your Go bin directory (usually `$GOPATH/bin`). + +### Option 3: Building from Source + +For the latest development version or to customize the build: + +```bash +# Clone the repository +git clone https://github.com/yosebyte/nodepass.git + +# Navigate to the project directory +cd nodepass + +# Build the binary +go build -o nodepass ./cmd/nodepass + +# Optional: Install to your GOPATH/bin +go install ./cmd/nodepass +``` + +### Option 4: Using Container Image + +NodePass is available as a container image on GitHub Container Registry, perfect for containerized environments: + +```bash +# Pull the container image +docker pull ghcr.io/yosebyte/nodepass:latest + +# Run in server mode +docker run -d --name nodepass-server -p 10101:10101 -p 8080:8080 \ + ghcr.io/yosebyte/nodepass server://0.0.0.0:10101/0.0.0.0:8080 + +# Run in client mode +docker run -d --name nodepass-client \ + -e MIN_POOL_CAPACITY=32 \ + -e MAX_POOL_CAPACITY=512 \ + -p 8080:8080 \ + ghcr.io/yosebyte/nodepass client://nodepass-server:10101/127.0.0.1:8080 +``` + +### Option 5: Using Management Script (Linux Only) + +For Linux systems, we provide a one-click script: + +```bash +bash <(curl -sSL https://run.nodepass.eu/np.sh) +``` + +- This script provides easy-to-use master mode (API mode) installation, configuration, and management functions. +- For details, please refer to [https://github.com/NodePassProject/npsh](https://github.com/NodePassProject/npsh) + +## Verifying Installation + +After installation, verify that NodePass is correctly installed by checking the version: + +```bash +nodepass +``` + +## Next Steps + +Now that you have NodePass installed, you can: + +- Learn about its basic [usage](/docs/en/usage.md) +- Explore [configuration options](/docs/en/configuration.md) +- Try out some [examples](/docs/en/examples.md) + +## Troubleshooting Installation Issues + +If you encounter any issues during installation: + +- Ensure your system meets the minimum requirements +- Check that you have the correct permissions to install software +- For Go-related issues, verify your Go installation with `go version` +- For container-related issues, ensure Docker is properly installed and running +- See our [troubleshooting guide](/docs/en/troubleshooting.md) for more help diff --git a/nodepass/docs/en/troubleshooting.md b/nodepass/docs/en/troubleshooting.md new file mode 100644 index 0000000000..12b4f00499 --- /dev/null +++ b/nodepass/docs/en/troubleshooting.md @@ -0,0 +1,287 @@ +# Troubleshooting Guide + +This guide helps you diagnose and resolve common issues you might encounter when using NodePass. For each problem, we provide possible causes and step-by-step solutions. + +## Connection Issues + +### Unable to Establish Tunnel Connection + +**Symptoms**: Client cannot connect to the server's tunnel endpoint, or connection is immediately dropped. + +**Possible Causes and Solutions**: + +1. **Network Connectivity Issues** + - Verify basic connectivity with `ping` or `telnet` to the server address + - Check if the specified port is reachable: `telnet server.example.com 10101` + - Ensure no firewall is blocking the tunnel port (typically 10101) + +2. **Server Not Running** + - Verify the NodePass server is running with `ps aux | grep nodepass` on Linux/macOS + - Check server logs for any startup errors + - Try restarting the server process + +3. **Incorrect Addressing** + - Double-check the tunnel address format in your client command + - Ensure you're using the correct hostname/IP and port + - If using DNS names, verify they resolve to the correct IP addresses + +4. **TLS Configuration Mismatch** + - If server requires TLS but client doesn't support it, connection will fail + - Check server logs for TLS handshake errors + - Ensure certificates are correctly configured if using TLS mode 2 + +### Data Not Flowing Through the Tunnel + +**Symptoms**: Tunnel connection established, but application data isn't reaching the destination. + +**Possible Causes and Solutions**: + +1. **Target Service Not Running** + - Verify the target service is running on both server and client sides + - Check if you can connect directly to the service locally + +2. **Port Conflicts** + - Ensure the target port isn't already in use by another application + - Use `netstat -tuln` to check for port usage + +3. **Protocol Mismatch** + - Verify you're tunneling the correct protocol (TCP vs UDP) + - Some applications require specific protocol support + +4. **Incorrect Target Address** + - Double-check the target address in both server and client commands + - For server-side targets, ensure they're reachable from the server + - For client-side targets, ensure they're reachable from the client + +### Connection Stability Issues + +**Symptoms**: Tunnel works initially but disconnects frequently or becomes unresponsive. + +**Possible Causes and Solutions**: + +1. **Network Instability** + - Check for packet loss or high latency in your network + - Consider a more stable network connection for production deployments + +2. **Resource Constraints** + - Monitor CPU and memory usage on both client and server + - Adjust pool parameters if resources are being exhausted (see Performance section) + - Check file descriptor limits with `ulimit -n` on Linux/macOS + +3. **Timeout Configuration** + - Adjust `UDP_READ_TIMEOUT` if using UDP with slow response times + - Consider adjusting TCP keepalive settings at the OS level for long-lived connections + +4. **Overloaded Server** + - Check server logs for signs of connection overload + - Adjust `MAX_POOL_CAPACITY` and `SEMAPHORE_LIMIT` to handle the load + - Consider scaling horizontally with multiple NodePass instances + +## Certificate Issues + +### TLS Handshake Failures + +**Symptoms**: Connection attempts fail with TLS handshake errors. + +**Possible Causes and Solutions**: + +1. **Invalid Certificate** + - Verify certificate validity: `openssl x509 -in cert.pem -text -noout` + - Ensure the certificate hasn't expired + - Check that the certificate is issued for the correct domain/IP + +2. **Missing or Inaccessible Certificate Files** + - Confirm file paths to certificates and keys are correct + - Verify file permissions allow the NodePass process to read them + - Check for file corruption by opening certificates in a text editor + +3. **Certificate Trust Issues** + - If using custom CAs, ensure they are properly trusted + - For self-signed certificates, confirm TLS mode 1 is being used + - For verified certificates, ensure the CA chain is complete + +4. **Key Format Problems** + - Ensure private keys are in the correct format (usually PEM) + - Check for passphrase protection on private keys (not supported directly) + +### Certificate Renewal Issues + +**Symptoms**: After certificate renewal, secure connections start failing. + +**Possible Causes and Solutions**: + +1. **New Certificate Not Loaded** + - Restart NodePass to force loading of new certificates + - Check if `RELOAD_INTERVAL` is set correctly to automatically detect changes + +2. **Certificate Chain Incomplete** + - Ensure the full certificate chain is included in the certificate file + - Verify chain order: your certificate first, then intermediate certificates + +3. **Key Mismatch** + - Verify the new certificate matches the private key: + ```bash + openssl x509 -noout -modulus -in cert.pem | openssl md5 + openssl rsa -noout -modulus -in key.pem | openssl md5 + ``` + - If outputs differ, certificate and key don't match + +## Performance Optimization + +### High Latency + +**Symptoms**: Connections work but have noticeable delays. + +**Possible Causes and Solutions**: + +1. **Pool Configuration** + - Increase `MIN_POOL_CAPACITY` to have more connections ready + - Decrease `MIN_POOL_INTERVAL` to create connections faster + - Adjust `SEMAPHORE_LIMIT` if connection queue is backing up + +2. **Network Path** + - Check for network congestion or high-latency links + - Consider deploying NodePass closer to either the client or server + - Use a traceroute to identify potential bottlenecks + +3. **TLS Overhead** + - If extreme low latency is required and security is less critical, consider using TLS mode 0 + - For a balance, use TLS mode 1 with session resumption + +4. **Resource Contention** + - Ensure the host system has adequate CPU and memory + - Check for other processes competing for resources + - Consider dedicated hosts for high-traffic deployments + +### High CPU Usage + +**Symptoms**: NodePass process consuming excessive CPU resources. + +**Possible Causes and Solutions**: + +1. **Pool Thrashing** + - If pool is constantly creating and destroying connections, adjust timings + - Increase `MIN_POOL_INTERVAL` to reduce connection creation frequency + - Find a good balance for `MIN_POOL_CAPACITY` and `MAX_POOL_CAPACITY` + +2. **Excessive Logging** + - Reduce log level from debug to info or warn for production use + - Check if logs are being written to a slow device + +3. **TLS Overhead** + - TLS handshakes are CPU-intensive; consider session caching + - Use TLS mode 1 instead of mode 2 if certificate validation is less critical + +4. **Traffic Volume** + - High throughput can cause CPU saturation + - Consider distributing traffic across multiple NodePass instances + - Vertical scaling (more CPU cores) may be necessary for very high throughput + +### Memory Leaks + +**Symptoms**: NodePass memory usage grows continuously over time. + +**Possible Causes and Solutions**: + +1. **Connection Leaks** + - Ensure `SHUTDOWN_TIMEOUT` is sufficient to properly close connections + - Check for proper error handling in custom scripts or management code + - Monitor connection counts with system tools like `netstat` + +2. **Pool Size Issues** + - If `MAX_POOL_CAPACITY` is very large, memory usage will be higher + - Monitor actual pool usage vs. configured capacity + - Adjust capacity based on actual concurrent connection needs + +3. **Debug Logging** + - Extensive debug logging can consume memory in high-traffic scenarios + - Use appropriate log levels for production environments + +## UDP-Specific Issues + +### UDP Data Loss + +**Symptoms**: UDP packets are not reliably forwarded through the tunnel. + +**Possible Causes and Solutions**: + +1. **Buffer Size Limitations** + - If UDP packets are large, increase `UDP_DATA_BUF_SIZE` + - Default of 8192 bytes may be too small for some applications + +2. **Timeout Issues** + - If responses are slow, increase `UDP_READ_TIMEOUT` + - For applications with variable response times, find an optimal balance + +3. **High Packet Rate** + - UDP is handled one datagram at a time; very high rates may cause issues + - Consider increasing pool capacity for high-traffic UDP applications + +4. **Protocol Expectations** + - Some UDP applications expect specific behavior regarding packet order or timing + - NodePass provides best-effort forwarding but cannot guarantee UDP properties beyond what the network provides + +### UDP Connection Tracking + +**Symptoms**: UDP sessions disconnect prematurely or fail to establish. + +**Possible Causes and Solutions**: + +1. **Connection Mapping** + - Verify client configurations match server expectations + - Check for firewalls that may be timing out UDP session tracking + +2. **Application UDP Timeout** + - Some applications have built-in UDP session timeouts + - May need to adjust application-specific keepalive settings + +## Master API Issues + +### API Accessibility Problems + +**Symptoms**: Cannot connect to the master API endpoint. + +**Possible Causes and Solutions**: + +1. **Endpoint Configuration** + - Verify API address and port in the master command + - Check if the API server is bound to the correct network interface + +2. **TLS Configuration** + - If using HTTPS (TLS modes 1 or 2), ensure client tools support TLS + - For testing, use `curl -k` to skip certificate validation + +3. **Custom Prefix Issues** + - If using a custom API prefix, ensure it's included in all requests + - Check URL formatting in API clients and scripts + +### Instance Management Failures + +**Symptoms**: Cannot create, control, or delete instances through the API. + +**Possible Causes and Solutions**: + +1. **JSON Format Issues** + - Verify request body is valid JSON + - Check for required fields in API requests + +2. **URL Parsing Problems** + - Ensure instance URLs are properly formatted and URL-encoded if necessary + - Verify URL parameters use the correct format + +3. **Instance State Conflicts** + - Cannot delete running instances without stopping them first + - Check current instance state with GET before performing actions + +4. **Permission Issues** + - Ensure the NodePass master has sufficient permissions to create processes + - Check file system permissions for any referenced certificates or keys + +## Next Steps + +If you encounter issues not covered in this guide: + +- Check the [project repository](https://github.com/yosebyte/nodepass) for known issues +- Increase the log level to `debug` for more detailed information +- Review the [How It Works](/docs/en/how-it-works.md) section to better understand internal mechanisms +- Consider joining the community discussion for assistance from other users \ No newline at end of file diff --git a/nodepass/docs/en/usage.md b/nodepass/docs/en/usage.md new file mode 100644 index 0000000000..7ea07c8108 --- /dev/null +++ b/nodepass/docs/en/usage.md @@ -0,0 +1,283 @@ +# Usage Instructions + +NodePass creates tunnels with an unencrypted TCP control channel and configurable TLS encryption options for data exchange. This guide covers the three operating modes and explains how to use each effectively. + +## Command Line Syntax + +The general syntax for NodePass commands is: + +```bash +nodepass ":///?log=&tls=&crt=&key=&min=&max=" +``` + +Where: +- ``: Specifies the operating mode (`server`, `client`, or `master`) +- ``: The tunnel endpoint address for control channel communications +- ``: The destination address for business data with bidirectional flow support (or API prefix in master mode) + +### Query Parameters + +Common query parameters: +- `log=`: Log verbosity level (`debug`, `info`, `warn`, `error`, or `event`) +- `min=`: Minimum connection pool capacity (default: 64, client mode only) +- `max=`: Maximum connection pool capacity (default: 8192, client mode only) + +TLS-related parameters (server/master modes only): +- `tls=`: TLS security level for data channels (`0`, `1`, or `2`) +- `crt=`: Path to certificate file (when `tls=2`) +- `key=`: Path to private key file (when `tls=2`) + +## Operating Modes + +NodePass offers three complementary operating modes to suit various deployment scenarios. + +### Server Mode + +Server mode establishes tunnel control channels and supports bidirectional data flow forwarding. + +```bash +nodepass "server:///?log=&tls=&crt=&key=" +``` + +#### Parameters + +- `tunnel_addr`: Address for the TCP tunnel endpoint (control channel) that clients will connect to (e.g., 10.1.0.1:10101) +- `target_addr`: The destination address for business data with bidirectional flow support (e.g., 10.1.0.1:8080) +- `log`: Log level (debug, info, warn, error, event) +- `tls`: TLS encryption mode for the target data channel (0, 1, 2) + - `0`: No TLS encryption (plain TCP/UDP) + - `1`: Self-signed certificate (automatically generated) + - `2`: Custom certificate (requires `crt` and `key` parameters) +- `crt`: Path to certificate file (required when `tls=2`) +- `key`: Path to private key file (required when `tls=2`) + +#### How Server Mode Works + +In server mode, NodePass supports two data flow directions: + +**Mode 1: Server Receives Traffic** (target_addr is local address) +1. Listens for TCP tunnel connections (control channel) on `tunnel_addr` +2. Listens for incoming TCP and UDP traffic on `target_addr` +3. When a connection arrives at `target_addr`, it signals the connected client through the control channel +4. Creates a data channel for each connection with the specified TLS encryption level + +**Mode 2: Server Sends Traffic** (target_addr is remote address) +1. Listens for TCP tunnel connections (control channel) on `tunnel_addr` +2. Waits for clients to listen locally and receive connections through the tunnel +3. Establishes connections to remote `target_addr` and forwards data + +#### Examples + +```bash +# No TLS encryption for data channel - Server receives mode +nodepass "server://10.1.0.1:10101/10.1.0.1:8080?log=debug&tls=0" + +# Self-signed certificate (auto-generated) - Server sends mode +nodepass "server://10.1.0.1:10101/192.168.1.100:8080?log=debug&tls=1" + +# Custom domain certificate - Server receives mode +nodepass "server://10.1.0.1:10101/10.1.0.1:8080?log=debug&tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem" +``` + +### Client Mode + +Client mode connects to a NodePass server and supports bidirectional data flow forwarding. + +```bash +nodepass "client:///?log=&min=&max=" +``` + +#### Parameters + +- `tunnel_addr`: Address of the NodePass server's tunnel endpoint to connect to (e.g., 10.1.0.1:10101) +- `target_addr`: The destination address for business data with bidirectional flow support (e.g., 127.0.0.1:8080) +- `log`: Log level (debug, info, warn, error, event) +- `min`: Minimum connection pool capacity (default: 64) +- `max`: Maximum connection pool capacity (default: 8192) + +#### How Client Mode Works + +In client mode, NodePass supports three operating modes: + +**Mode 1: Client Single-End Forwarding** (when tunnel address is local) +1. Listens for TCP and UDP connections on the local tunnel address +2. Uses connection pooling technology to pre-establish TCP connections to target address, eliminating connection latency +3. Directly forwards received traffic to the target address with high performance +4. No handshake with server required, enables point-to-point direct forwarding +5. Suitable for local proxy and simple forwarding scenarios + +**Mode 2: Client Receives Traffic** (when server sends traffic) +1. Connects to the server's TCP tunnel endpoint (control channel) +2. Listens locally and waits for connections through the tunnel +3. Establishes connections to local `target_addr` and forwards data + +**Mode 3: Client Sends Traffic** (when server receives traffic) +1. Connects to the server's TCP tunnel endpoint (control channel) +2. Listens for signals from the server through this control channel +3. When a signal is received, establishes a data connection with the TLS security level specified by the server +4. Creates a connection to `target_addr` and forwards traffic + +#### Examples + +```bash +# Client single-end forwarding mode - Local proxy listening on port 1080, forwarding to target server +nodepass client://127.0.0.1:1080/target.example.com:8080?log=debug + +# Connect to a NodePass server and adopt its TLS security policy - Client sends mode +nodepass client://server.example.com:10101/127.0.0.1:8080 + +# Connect with debug logging - Client receives mode +nodepass client://server.example.com:10101/192.168.1.100:8080?log=debug + +# Custom connection pool capacity - High performance configuration +nodepass "client://server.example.com:10101/127.0.0.1:8080?min=128&max=4096" + +# Resource-constrained configuration - Small connection pool +nodepass "client://server.example.com:10101/127.0.0.1:8080?min=16&max=512&log=info" +``` + +### Master Mode (API) + +Master mode runs a RESTful API server for centralized management of NodePass instances. + +```bash +nodepass "master://[]?log=&tls=&crt=&key=" +``` + +#### Parameters + +- `api_addr`: Address where the API service will listen (e.g., 0.0.0.0:9090) +- `prefix`: Optional API prefix path (e.g., /management). Default is `/api` +- `log`: Log level (debug, info, warn, error, event) +- `tls`: TLS encryption mode for the API service (0, 1, 2) + - `0`: No TLS encryption (HTTP) + - `1`: Self-signed certificate (HTTPS with auto-generated cert) + - `2`: Custom certificate (HTTPS with provided cert) +- `crt`: Path to certificate file (required when `tls=2`) +- `key`: Path to private key file (required when `tls=2`) + +#### How Master Mode Works + +In master mode, NodePass: +1. Runs a RESTful API server that allows dynamic management of NodePass instances +2. Provides endpoints for creating, starting, stopping, and monitoring client and server instances +3. Includes Swagger UI for easy API exploration at `{prefix}/v1/docs` +4. Automatically inherits TLS and logging settings for instances created through the API + +#### API Endpoints + +All endpoints are relative to the configured prefix (default: `/api`): + +- `GET {prefix}/v1/instances` - List all instances +- `POST {prefix}/v1/instances` - Create a new instance with JSON body: `{"url": "server://0.0.0.0:10101/0.0.0.0:8080"}` +- `GET {prefix}/v1/instances/{id}` - Get instance details +- `PATCH {prefix}/v1/instances/{id}` - Update instance with JSON body: `{"action": "start|stop|restart"}` +- `DELETE {prefix}/v1/instances/{id}` - Delete instance +- `GET {prefix}/v1/openapi.json` - OpenAPI specification +- `GET {prefix}/v1/docs` - Swagger UI documentation + +#### Examples + +```bash +# Start master with HTTP using default API prefix (/api) +nodepass "master://0.0.0.0:9090?log=info" + +# Start master with custom API prefix (/management) +nodepass "master://0.0.0.0:9090/management?log=info" + +# Start master with HTTPS (self-signed certificate) +nodepass "master://0.0.0.0:9090/admin?log=info&tls=1" + +# Start master with HTTPS (custom certificate) +nodepass "master://0.0.0.0:9090?log=info&tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem" +``` + +## Managing NodePass Instances + +### Creating and Managing via API + +You can use standard HTTP requests to manage NodePass instances through the master API: + +```bash +# Create and manage instances via API (using default prefix) +curl -X POST http://localhost:9090/api/v1/instances \ + -H "Content-Type: application/json" \ + -d '{"url":"server://0.0.0.0:10101/0.0.0.0:8080?tls=1"}' + +# Using custom prefix +curl -X POST http://localhost:9090/admin/v1/instances \ + -H "Content-Type: application/json" \ + -d '{"url":"server://0.0.0.0:10101/0.0.0.0:8080?tls=1"}' + +# List all running instances +curl http://localhost:9090/api/v1/instances + +# Control an instance (replace {id} with actual instance ID) +curl -X PUT http://localhost:9090/api/v1/instances/{id} \ + -H "Content-Type: application/json" \ + -d '{"action":"restart"}' +``` + +## Bidirectional Data Flow Explanation + +NodePass supports flexible bidirectional data flow configuration: + +### Client Single-End Forwarding Mode +- **Client**: Listens on local tunnel address, uses connection pooling technology to directly forward to target address +- **Connection Pool Optimization**: Pre-establishes TCP connections, eliminates connection latency, provides high-performance forwarding +- **No Server Required**: Operates independently without server handshake +- **Use Case**: Local proxy, simple port forwarding, testing environments, high-performance forwarding + +### Server Receives Mode (dataFlow: "-") +- **Server**: Listens for incoming connections on target_addr, forwards through tunnel to client +- **Client**: Connects to local target_addr to provide services +- **Use Case**: Expose internal services to external access + +### Server Sends Mode (dataFlow: "+") +- **Server**: Connects to remote target_addr to fetch data, sends through tunnel to client +- **Client**: Listens locally to receive connections from server +- **Use Case**: Access remote services through tunnel proxy + +The system automatically selects the appropriate operation mode based on tunnel and target addresses: +- If the client's tunnel address is a local address, enables single-end forwarding mode +- If target address is a local address, uses Server Receives Mode +- If target address is a remote address, uses Server Sends Mode + +## Tunnel Key + +NodePass uses tunnel keys to authenticate connections between clients and servers. The key can be specified in two ways: + +### Key Derivation Rules + +1. **Explicit Key**: Specify the username part in the URL as the key + ```bash + # Use "mypassword" as the tunnel key + nodepass server://mypassword@10.1.0.1:10101/10.1.0.1:8080 + nodepass client://mypassword@10.1.0.1:10101/127.0.0.1:8080 + ``` + +2. **Port-Derived Key**: If no username is specified, the system uses the hexadecimal value of the port number as the key + ```bash + # Port 10101's hexadecimal value "2775" will be used as the tunnel key + nodepass server://10.1.0.1:10101/10.1.0.1:8080 + nodepass client://10.1.0.1:10101/127.0.0.1:8080 + ``` + +### Handshake Process + +The handshake process between client and server is as follows: + +1. **Client Connection**: Client connects to the server's tunnel address +2. **Key Authentication**: Client sends XOR-encrypted tunnel key +3. **Server Verification**: Server decrypts and verifies if the key matches +4. **Configuration Sync**: Upon successful verification, server sends tunnel configuration (including TLS mode) +5. **Connection Established**: Handshake complete, data transmission begins + +This design ensures that only clients with the correct key can establish tunnel connections. + +## Next Steps + +- Learn about [configuration options](/docs/en/configuration.md) to fine-tune NodePass +- Explore [examples](/docs/en/examples.md) of common deployment scenarios +- Understand [how NodePass works](/docs/en/how-it-works.md) under the hood +- Check the [troubleshooting guide](/docs/en/troubleshooting.md) if you encounter issues \ No newline at end of file diff --git a/nodepass/docs/zh/api.md b/nodepass/docs/zh/api.md new file mode 100644 index 0000000000..2ff83440a6 --- /dev/null +++ b/nodepass/docs/zh/api.md @@ -0,0 +1,1044 @@ +# NodePass API参考 + +## 概述 + +NodePass在主控模式(Master Mode)下提供了RESTful API,使前端应用能够以编程方式进行控制和集成。本节提供API端点、集成模式和最佳实践的全面文档。 + +## 主控模式API + +当NodePass以主控模式(`master://`)运行时,它会暴露REST API,允许前端应用: + +1. 创建和管理NodePass服务器和客户端实例 +2. 监控连接状态和统计信息 +3. 控制运行中的实例(启动、停止、重启) +4. 配置实例自启动策略以实现自动化管理 +5. 通过参数配置行为 + +### 基本URL + +``` +master:///?& +``` + +其中: +- ``是主控模式URL中指定的地址(例如`0.0.0.0:9090`) +- ``是可选的API前缀(如果未指定,则使用`/api`) + +### 启动主控模式 + +使用默认设置启动主控模式的NodePass: + +```bash +nodepass "master://0.0.0.0:9090?log=info" +``` + +使用自定义API前缀和启用TLS: + +```bash +nodepass "master://0.0.0.0:9090/admin?log=info&tls=1" +``` + +### 可用端点 + +| 端点 | 方法 | 描述 | +|----------|--------|-------------| +| `/instances` | GET | 列出所有NodePass实例 | +| `/instances` | POST | 创建新的NodePass实例 | +| `/instances/{id}` | GET | 获取特定实例的详细信息 | +| `/instances/{id}` | PATCH | 更新实例状态或控制操作 | +| `/instances/{id}` | PUT | 更新实例URL配置 | +| `/instances/{id}` | DELETE | 删除特定实例 | +| `/events` | GET | 使用SSE订阅实例事件通知 | +| `/info` | GET | 获取主控服务信息 | +| `/openapi.json` | GET | OpenAPI规范 | +| `/docs` | GET | Swagger UI文档界面 | + +### API认证 + +NodePass主控API现在支持API Key认证,可以防止未经授权的访问。系统会在首次启动时自动生成一个API Key。 + +#### API Key特点 + +1. **自动生成**:首次启动主控模式时自动创建 +2. **持久化存储**:API Key与其他实例配置一起保存在`nodepass.gob`文件中 +3. **重启后保留**:重启主控后API Key保持不变 +4. **选择性保护**:仅保护关键API端点,公共文档仍可访问 + +#### 受保护的端点 + +以下端点需要API Key认证: +- `/instances`(所有方法) +- `/instances/{id}`(所有方法:GET、PATCH、PUT、DELETE) +- `/events` +- `/info` + +以下端点可公开访问(无需API Key): +- `/openapi.json` +- `/docs` + +#### 如何使用API Key + +在API请求中包含API Key: + +```javascript +// 使用API Key进行实例管理请求 +async function getInstances() { + const response = await fetch(`${API_URL}/instances`, { + method: 'GET', + headers: { + 'X-API-Key': 'your-api-key-here' + } + }); + + return await response.json(); +} +``` + +#### 如何获取和重新生成API Key + +API Key可以在系统启动日志中找到,也可以通过以下方式重新生成: + +```javascript +// 重新生成API Key(需要知道当前的API Key) +async function regenerateApiKey() { + const response = await fetch(`${API_URL}/instances/${apiKeyID}`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': 'current-api-key' + }, + body: JSON.stringify({ action: 'restart' }) + }); + + const result = await response.json(); + return result.url; // 新的API Key +} +``` + +**注意**: API Key ID 固定为 `********`(八个星号)。在内部实现中,这是一个特殊的实例ID,用于存储和管理API Key。 + +### 使用SSE实时事件监控 + +NodePass现在支持服务器发送事件(SSE)功能,用于实时监控实例状态变化。这使前端应用能够即时接收实例创建、更新和删除的通知,无需轮询。 + +#### 使用SSE端点 + +SSE端点位于: +``` +GET /events +``` + +此端点建立持久连接,使用SSE协议格式实时传递事件。如果启用了API Key认证,需要在请求头中包含有效的API Key。 + +#### 事件类型 + +支持以下事件类型: + +1. `initial` - 连接建立时发送,包含所有实例的当前状态 +2. `create` - 创建新实例时发送 +3. `update` - 实例更新时发送(状态变更、启动/停止操作) +4. `delete` - 实例被删除时发送 +5. `shutdown` - 主控服务即将关闭时发送,通知前端应用关闭连接 +6. `log` - 实例产生新日志内容时发送,包含日志文本 + +#### JavaScript客户端实现 + +以下是JavaScript前端消费SSE端点的示例: + +```javascript +function connectToEventSource() { + const eventSource = new EventSource(`${API_URL}/events`, { + // 如果需要认证,原生EventSource不支持自定义请求头 + // 需要使用fetch API实现自定义SSE客户端 + }); + + // 如果使用API Key,需要使用自定义实现代替原生EventSource + // 下面是使用原生EventSource的示例 + eventSource.addEventListener('instance', (event) => { + const data = JSON.parse(event.data); + + switch (data.type) { + case 'initial': + console.log('初始实例状态:', data.instance); + updateInstanceUI(data.instance); + break; + case 'create': + console.log('实例已创建:', data.instance); + addInstanceToUI(data.instance); + break; + case 'update': + console.log('实例已更新:', data.instance); + updateInstanceUI(data.instance); + break; + case 'delete': + console.log('实例已删除:', data.instance); + removeInstanceFromUI(data.instance.id); + break; + case 'log': + console.log(`实例 ${data.instance.id} 日志:`, data.logs); + appendLogToInstanceUI(data.instance.id, data.logs); + break; + case 'shutdown': + console.log('主控服务即将关闭'); + // 关闭事件源并显示通知 + eventSource.close(); + showShutdownNotification(); + break; + } + }); + + eventSource.addEventListener('error', (error) => { + console.error('SSE连接错误:', error); + // 延迟后尝试重新连接 + setTimeout(() => { + eventSource.close(); + connectToEventSource(); + }, 5000); + }); + + return eventSource; +} + +// 使用API Key创建SSE连接的示例 +function connectToEventSourceWithApiKey(apiKey) { + // 原生EventSource不支持自定义请求头,需要使用fetch API + fetch(`${API_URL}/events`, { + method: 'GET', + headers: { + 'X-API-Key': apiKey, + 'Cache-Control': 'no-cache' + } + }).then(response => { + if (!response.ok) { + throw new Error(`HTTP错误: ${response.status}`); + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ''; + + function processStream() { + reader.read().then(({ value, done }) => { + if (done) { + console.log('连接已关闭'); + // 尝试重新连接 + setTimeout(() => connectToEventSourceWithApiKey(apiKey), 5000); + return; + } + + buffer += decoder.decode(value, { stream: true }); + + const lines = buffer.split('\n\n'); + buffer = lines.pop() || ''; + + for (const line of lines) { + if (line.trim() === '') continue; + + const eventMatch = line.match(/^event: (.+)$/m); + const dataMatch = line.match(/^data: (.+)$/m); + + if (eventMatch && dataMatch) { + const data = JSON.parse(dataMatch[1]); + // 处理事件 - 见上面的switch代码 + } + } + + processStream(); + }).catch(error => { + console.error('读取错误:', error); + // 尝试重新连接 + setTimeout(() => connectToEventSourceWithApiKey(apiKey), 5000); + }); + } + + processStream(); + }).catch(error => { + console.error('连接错误:', error); + // 尝试重新连接 + setTimeout(() => connectToEventSourceWithApiKey(apiKey), 5000); + }); +} +``` + +#### 处理实例日志 + +新增的`log`事件类型允许实时接收和显示实例的日志输出。这对于监控和调试非常有用: + +```javascript +// 处理日志事件 +function appendLogToInstanceUI(instanceId, logText) { + // 找到或创建日志容器 + let logContainer = document.getElementById(`logs-${instanceId}`); + if (!logContainer) { + logContainer = document.createElement('div'); + logContainer.id = `logs-${instanceId}`; + document.getElementById('instance-container').appendChild(logContainer); + } + + // 创建新的日志条目 + const logEntry = document.createElement('div'); + logEntry.className = 'log-entry'; + + // 可以在这里解析ANSI颜色代码或格式化日志 + logEntry.textContent = logText; + + // 添加到容器 + logContainer.appendChild(logEntry); + + // 滚动到最新日志 + logContainer.scrollTop = logContainer.scrollHeight; +} +``` + +#### SSE相比轮询的优势 + +使用SSE监控实例状态比传统轮询提供多种优势: + +1. **减少延迟**:变更实时传递 +2. **减轻服务器负载**:消除不必要的轮询请求 +3. **带宽效率**:只在发生变更时发送数据 +4. **原生浏览器支持**:无需额外库的内置浏览器支持 +5. **自动重连**:浏览器在连接丢失时自动重连 + +#### SSE实现的最佳实践 + +在前端实现SSE时: + +1. **处理重连**:虽然浏览器会自动尝试重连,但应实现自定义逻辑以确保持久连接 +2. **高效处理事件**:保持事件处理快速,避免UI阻塞 +3. **实现回退机制**:在不支持SSE的环境中,实现轮询回退 +4. **处理错误**:正确处理连接错误和断开 +5. **日志管理**:为每个实例维护日志缓冲区,避免无限制增长 + +## 前端集成指南 + +在将NodePass与前端应用集成时,请考虑以下重要事项: + +### 实例持久化 + +NodePass主控模式现在支持使用gob序列化格式进行实例持久化。实例及其状态会保存到与可执行文件相同目录下的`nodepass.gob`文件中,并在主控重启时自动恢复。 + +主要持久化特性: +- 实例配置自动保存到磁盘 +- 实例状态(运行/停止)得到保留 +- 自启动策略在主控重启间保持不变 +- 流量统计数据在重启之间保持 +- 启用自启动策略的实例在主控重启时自动启动 +- 重启后无需手动重新注册 + +**注意:** 虽然实例配置现在已经持久化,前端应用仍应保留自己的实例配置记录作为备份策略。 + +### 实例生命周期管理 + +为了合理管理生命周期: + +1. **创建**:存储实例配置和URL + ```javascript + async function createNodePassInstance(config) { + const response = await fetch(`${API_URL}/instances`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey // 如果启用了API Key + }, + body: JSON.stringify({ + url: `server://0.0.0.0:${config.port}/${config.target}?tls=${config.tls}` + }) + }); + + const data = await response.json(); + + // 根据类型为新实例配置自启动策略 + if (data.success) { + const shouldAutoRestart = config.type === 'server' || config.critical === true; + await setAutoStartPolicy(data.data.id, shouldAutoRestart); + } + + // 存储在前端持久化存储中 + saveInstanceConfig({ + id: data.data.id, + originalConfig: config, + url: data.data.url + }); + + return data; + } + ``` + +2. **状态监控**:监控实例状态变化 + + NodePass提供两种监控实例状态的方法: + + A. **使用SSE(推荐)**:通过持久连接接收实时事件 + ```javascript + function connectToEventSource() { + const eventSource = new EventSource(`${API_URL}/events`, { + // 如果需要认证,需要使用自定义实现 + }); + + // 或者使用带API Key的自定义实现 + // connectToEventSourceWithApiKey(apiKey); + + eventSource.addEventListener('instance', (event) => { + const data = JSON.parse(event.data); + // 处理不同类型的事件:initial, create, update, delete, log + // ...处理逻辑见前面的"使用SSE实时事件监控"部分 + }); + + // 错误处理和重连逻辑 + // ...详见前面的示例 + + return eventSource; + } + ``` + + B. **传统轮询(备选)**:在不支持SSE的环境中使用 + ```javascript + function startInstanceMonitoring(instanceId, interval = 5000) { + return setInterval(async () => { + try { + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + headers: { + 'X-API-Key': apiKey // 如果启用了API Key + } + }); + const data = await response.json(); + + if (data.success) { + updateInstanceStatus(instanceId, data.data.status); + updateInstanceMetrics(instanceId, { + connections: data.data.connections, + pool_size: data.data.pool_size, + uptime: data.data.uptime + }); + } + } catch (error) { + markInstanceUnreachable(instanceId); + } + }, interval); + } + ``` + + **选择建议:** 优先使用SSE方式,它提供更高效的实时监控,减轻服务器负担。仅在客户端不支持SSE或需要特定环境兼容性时使用轮询方式。 + +3. **实例别名管理**:为实例设置易读的名称 + ```javascript + // 批量设置实例别名 + async function setInstanceAliases(instances) { + for (const instance of instances) { + // 根据实例类型和用途生成有意义的别名 + const alias = `${instance.type}-${instance.region || 'default'}-${instance.port || 'auto'}`; + await updateInstanceAlias(instance.id, alias); + } + } + + // 根据别名查找实例 + async function findInstanceByAlias(targetAlias) { + const response = await fetch(`${API_URL}/instances`, { + headers: { 'X-API-Key': apiKey } + }); + const data = await response.json(); + + if (data.success) { + return data.data.find(instance => instance.alias === targetAlias); + } + return null; + } + ``` + +4. **控制操作**:启动、停止、重启实例 + ```javascript + async function controlInstance(instanceId, action) { + // action可以是: start, stop, restart + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + method: 'PATCH', // 注意:API已更新为使用PATCH方法而非PUT + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey // 如果启用了API Key + }, + body: JSON.stringify({ action }) + }); + + const data = await response.json(); + return data.success; + } + + // 更新实例别名 + async function updateInstanceAlias(instanceId, alias) { + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey // 如果启用了API Key + }, + body: JSON.stringify({ alias }) + }); + + const data = await response.json(); + return data.success; + } + + // 更新实例URL配置 + async function updateInstanceURL(instanceId, newURL) { + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey // 如果启用了API Key + }, + body: JSON.stringify({ url: newURL }) + }); + + const data = await response.json(); + return data.success; + } + ``` + +5. **自启动策略管理**:配置自动启动行为 + ```javascript + async function setAutoStartPolicy(instanceId, enableAutoStart) { + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey // 如果启用了API Key + }, + body: JSON.stringify({ restart: enableAutoStart }) + }); + + const data = await response.json(); + return data.success; + } + + // 组合操作:控制实例并更新自启动策略 + async function controlInstanceWithAutoStart(instanceId, action, enableAutoStart) { + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey // 如果启用了API Key + }, + body: JSON.stringify({ + action: action, + restart: enableAutoStart + }) + }); + + const data = await response.json(); + return data.success; + } + + // 组合操作:同时更新别名、控制实例和自启动策略 + async function updateInstanceComplete(instanceId, alias, action, enableAutoStart) { + const response = await fetch(`${API_URL}/instances/${instanceId}`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey // 如果启用了API Key + }, + body: JSON.stringify({ + alias: alias, + action: action, + restart: enableAutoStart + }) + }); + + const data = await response.json(); + return data.success; + } + ``` + +#### 自启动策略完整使用示例 + +以下是一个全面的示例,展示了如何在实际场景中实现自启动策略管理: + +```javascript +// 场景:建立带有自启动策略的负载均衡服务器集群 +async function setupServerCluster(serverConfigs) { + const clusterInstances = []; + + for (const config of serverConfigs) { + try { + // 创建服务器实例 + const instance = await createNodePassInstance({ + type: 'server', + port: config.port, + target: config.target, + critical: config.isPrimary, // 主服务器为关键实例 + tls: config.enableTLS + }); + + if (instance.success) { + // 设置有意义的实例别名 + const alias = `${config.role}-server-${config.port}`; + await updateInstanceAlias(instance.data.id, alias); + + // 根据服务器角色配置自启动策略 + const autoStartPolicy = config.isPrimary || config.role === 'essential'; + await setAutoStartPolicy(instance.data.id, autoStartPolicy); + + // 启动实例 + await controlInstance(instance.data.id, 'start'); + + clusterInstances.push({ + id: instance.data.id, + alias: alias, + role: config.role, + autoStartEnabled: autoStartPolicy + }); + + console.log(`服务器 ${alias} 已创建,自启动策略: ${autoStartPolicy}`); + } + } catch (error) { + console.error(`创建服务器 ${config.role} 失败:`, error); + } + } + + return clusterInstances; +} + +// 监控集群健康状态并动态调整自启动策略 +async function monitorClusterHealth(clusterInstances) { + const healthyInstances = []; + + for (const cluster of clusterInstances) { + const instance = await fetch(`${API_URL}/instances/${cluster.id}`, { + headers: { 'X-API-Key': apiKey } + }); + const data = await instance.json(); + + if (data.success && data.data.status === 'running') { + healthyInstances.push(cluster); + } else { + // 如果关键实例宕机,为备份实例启用自启动策略 + if (cluster.role === 'primary') { + await enableBackupInstanceAutoStart(clusterInstances); + } + } + } + + return healthyInstances; +} + +async function enableBackupInstanceAutoStart(clusterInstances) { + const backupInstances = clusterInstances.filter(c => c.role === 'backup'); + for (const backup of backupInstances) { + await setAutoStartPolicy(backup.id, true); + console.log(`已为备份实例启用自启动策略: ${backup.id}`); + } +} +``` + +### 流量统计 + +主控API提供流量统计数据,但需要注意以下重要事项: + +1. **启用调试模式**:流量统计功能仅在启用调试模式时可用。 + + ```bash + # 启用调试模式的主控 + nodepass master://0.0.0.0:10101?log=debug + ``` + + 如果未启用调试模式,API将不会收集或返回流量统计数据。 + +2. **基本流量指标**:NodePass周期性地提供TCP和UDP流量在入站和出站方向上的累计值,前端应用需要存储和处理这些值以获得有意义的统计信息。 + ```javascript + function processTrafficStats(instanceId, currentStats) { + // 存储当前时间戳 + const timestamp = Date.now(); + + // 如果我们有该实例的前一个统计数据,计算差值 + if (previousStats[instanceId]) { + const timeDiff = timestamp - previousStats[instanceId].timestamp; + const tcpInDiff = currentStats.tcp_in - previousStats[instanceId].tcp_in; + const tcpOutDiff = currentStats.tcp_out - previousStats[instanceId].tcp_out; + const udpInDiff = currentStats.udp_in - previousStats[instanceId].udp_in; + const udpOutDiff = currentStats.udp_out - previousStats[instanceId].udp_out; + + // 存储历史数据用于图表展示 + storeTrafficHistory(instanceId, { + timestamp, + tcp_in_rate: tcpInDiff / timeDiff * 1000, // 每秒字节数 + tcp_out_rate: tcpOutDiff / timeDiff * 1000, + udp_in_rate: udpInDiff / timeDiff * 1000, + udp_out_rate: udpOutDiff / timeDiff * 1000 + }); + } + + // 更新前一个统计数据,用于下次计算 + previousStats[instanceId] = { + timestamp, + tcp_in: currentStats.tcp_in, + tcp_out: currentStats.tcp_out, + udp_in: currentStats.udp_in, + udp_out: currentStats.udp_out + }; + } + ``` + +3. **数据持久化**:由于API只提供累计值,前端必须实现适当的存储和计算逻辑 + ```javascript + // 前端流量历史存储结构示例 + const trafficHistory = {}; + + function storeTrafficHistory(instanceId, metrics) { + if (!trafficHistory[instanceId]) { + trafficHistory[instanceId] = { + timestamps: [], + tcp_in_rates: [], + tcp_out_rates: [], + udp_in_rates: [], + udp_out_rates: [] + }; + } + + trafficHistory[instanceId].timestamps.push(metrics.timestamp); + trafficHistory[instanceId].tcp_in_rates.push(metrics.tcp_in_rate); + trafficHistory[instanceId].tcp_out_rates.push(metrics.tcp_out_rate); + trafficHistory[instanceId].udp_in_rates.push(metrics.udp_in_rate); + trafficHistory[instanceId].udp_out_rates.push(metrics.udp_out_rate); + + // 保持历史数据量可管理 + const MAX_HISTORY = 1000; + if (trafficHistory[instanceId].timestamps.length > MAX_HISTORY) { + trafficHistory[instanceId].timestamps.shift(); + trafficHistory[instanceId].tcp_in_rates.shift(); + trafficHistory[instanceId].tcp_out_rates.shift(); + trafficHistory[instanceId].udp_in_rates.shift(); + trafficHistory[instanceId].udp_out_rates.shift(); + } + } + ``` + +### 实例ID持久化 + +由于NodePass现在使用gob格式持久化存储实例状态,实例ID在主控重启后**不再发生变化**。这意味着: + +1. 前端应用可以安全地使用实例ID作为唯一标识符 +2. 实例配置、状态和统计数据在重启后自动恢复 +3. 不再需要实现实例ID变化的处理逻辑 + +这极大简化了前端集成,消除了以前处理实例重新创建和ID映射的复杂性。 + +### 自启动策略管理 + +NodePass现在支持为实例配置自启动策略,实现自动化实例管理并提高可靠性。自启动策略功能具备以下特性: + +1. **自动实例恢复**:启用自启动策略的实例在主控服务重启时会自动启动 +2. **选择性自启动**:根据实例的重要性或角色配置哪些实例应该自动启动 +3. **持久化策略存储**:自启动策略在主控重启间保存和恢复 +4. **细粒度控制**:每个实例都可以有自己的自启动策略设置 + +#### 自启动策略工作原理 + +- **策略分配**:每个实例都有一个`restart`布尔字段,决定其自启动行为 +- **主控启动**:主控启动时,自动启动所有`restart: true`的实例 +- **策略持久化**:自启动策略与其他实例数据一起保存在`nodepass.gob`文件中 +- **运行时管理**:自启动策略可以在实例运行时修改 + +#### 自启动策略最佳实践 + +1. **为服务器实例启用**:服务器实例通常应启用自启动策略以确保高可用性 +2. **选择性客户端自启动**:仅为关键客户端连接启用自启动策略 +3. **测试场景**:为临时或测试实例禁用自启动策略 +4. **负载均衡**:使用自启动策略维持最小实例数量以分配负载 + +```javascript +// 示例:根据实例角色配置自启动策略 +async function configureAutoStartPolicies(instances) { + for (const instance of instances) { + // 为服务器和关键客户端启用自启动 + const shouldAutoStart = instance.type === 'server' || + instance.tags?.includes('critical'); + + await setAutoStartPolicy(instance.id, shouldAutoStart); + } +} +``` + +## 实例数据结构 + +API响应中的实例对象包含以下字段: + +```json +{ + "id": "a1b2c3d4", // 实例唯一标识符 + "alias": "web-server-01", // 实例别名(可选,用于显示友好名称) + "type": "server", // 实例类型:server 或 client + "status": "running", // 实例状态:running、stopped 或 error + "url": "server://...", // 实例配置URL + "restart": true, // 自启动策略 + "tcprx": 1024, // TCP接收字节数 + "tcptx": 2048, // TCP发送字节数 + "udprx": 512, // UDP接收字节数 + "udptx": 256 // UDP发送字节数 +} +``` + +**注意:** +- `alias` 字段为可选,如果未设置则为空字符串 +- 流量统计字段(tcprx、tcptx、udprx、udptx)仅在启用调试模式时有效 +- `restart` 字段控制实例的自启动行为 + +## 系统信息端点 + +`/info` 端点提供了关于NodePass主控服务的系统信息。这个端点对于监控、故障排除和系统状态验证非常有用。 + +### 请求 + +``` +GET /info +``` + +需要 API Key 认证:是 + +### 响应 + +响应包含以下系统信息字段: + +```json +{ + "os": "linux", // 操作系统类型 + "arch": "amd64", // 系统架构 + "ver": "1.2.0", // NodePass版本 + "name": "example.com", // 隧道主机名 + "uptime": 11525, // API运行时间(秒) + "log": "info", // 日志级别 + "tls": "1", // TLS启用状态 + "crt": "/path/to/cert", // 证书路径 + "key": "/path/to/key" // 密钥路径 +} +``` + +### 使用示例 + +```javascript +// 获取系统信息 +async function getSystemInfo() { + const response = await fetch(`${API_URL}/info`, { + method: 'GET', + headers: { + 'X-API-Key': apiKey + } + }); + + return await response.json(); +} + +// 显示服务运行时间 +function displayServiceUptime() { + getSystemInfo().then(info => { + console.log(`服务已运行: ${info.uptime} 秒`); + // 也可以格式化为更友好的显示 + const hours = Math.floor(info.uptime / 3600); + const minutes = Math.floor((info.uptime % 3600) / 60); + const seconds = info.uptime % 60; + console.log(`服务已运行: ${hours}小时${minutes}分${seconds}秒`); + }); +} +``` + +### 监控最佳实践 + +- **定期检查**:定期轮询此端点以确保服务正常运行 +- **版本验证**:在部署更新后检查版本号 +- **运行时间监控**:监控运行时间以检测意外重启 +- **日志级别验证**:确认当前日志级别符合预期 + +## API端点文档 + +有关详细的API文档(包括请求和响应示例),请使用`/docs`端点提供的内置Swagger UI文档。这个交互式文档提供了以下全面信息: + +- 可用的端点 +- 必需的参数 +- 响应格式 +- 请求和响应示例 +- 架构定义 + +### 访问Swagger UI + +要访问Swagger UI文档: + +``` +http(s)://[]/docs +``` + +例如: +``` +http://localhost:9090/api/docs +``` + +Swagger UI提供了一种方便的方式,直接在浏览器中探索和测试API。您可以针对运行中的NodePass主控实例执行API调用,并查看实际响应。 + +## 完整的API参考 + +### 实例管理端点详细说明 + +#### GET /instances +- **描述**:获取所有实例列表 +- **认证**:需要API Key +- **响应**:实例数组 +- **示例**: +```javascript +const instances = await fetch(`${API_URL}/instances`, { + headers: { 'X-API-Key': apiKey } +}); +``` + +#### POST /instances +- **描述**:创建新实例 +- **认证**:需要API Key +- **请求体**:`{ "url": "client://或server://格式的URL" }` +- **响应**:新创建的实例对象 +- **示例**: +```javascript +const newInstance = await fetch(`${API_URL}/instances`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey + }, + body: JSON.stringify({ url: 'server://0.0.0.0:8080/localhost:3000' }) +}); +``` + +#### GET /instances/{id} +- **描述**:获取特定实例详情 +- **认证**:需要API Key +- **响应**:实例对象 +- **示例**: +```javascript +const instance = await fetch(`${API_URL}/instances/abc123`, { + headers: { 'X-API-Key': apiKey } +}); +``` + +#### PATCH /instances/{id} +- **描述**:更新实例状态、别名或执行控制操作 +- **认证**:需要API Key +- **请求体**:`{ "alias": "新别名", "action": "start|stop|restart|reset", "restart": true|false }` +- **特点**:不中断正在运行的实例,仅更新指定字段。`action: "reset"` 可将该实例的流量统计(tcprx、tcptx、udprx、udptx)清零。 +- **示例**: +```javascript +// 更新别名和自启动策略 +await fetch(`${API_URL}/instances/abc123`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey + }, + body: JSON.stringify({ + alias: 'Web服务器', + restart: true + }) +}); + +// 控制实例操作 +await fetch(`${API_URL}/instances/abc123`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey + }, + body: JSON.stringify({ action: 'restart' }) +}); + +// 清零流量统计 +await fetch(`${API_URL}/instances/abc123`, { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey + }, + body: JSON.stringify({ action: 'reset' }) +}); +``` + +#### PUT /instances/{id} +- **描述**:完全更新实例URL配置 +- **认证**:需要API Key +- **请求体**:`{ "url": "新的client://或server://格式的URL" }` +- **特点**:会重启实例。 +- **限制**:API Key实例(ID为`********`)不支持此操作 +- **示例**: +```javascript +// 更新实例URL +await fetch(`${API_URL}/instances/abc123`, { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + 'X-API-Key': apiKey + }, + body: JSON.stringify({ + url: 'server://0.0.0.0:9090/localhost:8080?tls=1' + }) +}); +``` + +#### DELETE /instances/{id} +- **描述**:删除实例 +- **认证**:需要API Key +- **响应**:204 No Content +- **限制**:API Key实例(ID为`********`)不可删除 +- **示例**: +```javascript +await fetch(`${API_URL}/instances/abc123`, { + method: 'DELETE', + headers: { 'X-API-Key': apiKey } +}); +``` + +### 其他端点 + +#### GET /events +- **描述**:建立SSE连接以接收实时事件 +- **认证**:需要API Key +- **响应**:Server-Sent Events流 +- **事件类型**:`initial`, `create`, `update`, `delete`, `shutdown`, `log` + +#### GET /info +- **描述**:获取主控服务信息 +- **认证**:需要API Key +- **响应**:包含系统信息、版本、运行时间等 + +#### GET /openapi.json +- **描述**:获取OpenAPI 3.1.1规范 +- **认证**:无需认证 +- **响应**:JSON格式的API规范 + +#### GET /docs +- **描述**:Swagger UI文档界面 +- **认证**:无需认证 +- **响应**:HTML格式的交互式文档 + +### 实例URL格式规范 + +实例URL必须遵循以下格式: + +#### 服务器模式 (Server Mode) +``` +server://:/:? +``` + +示例: +- `server://0.0.0.0:8080/localhost:3000` - 在8080端口监听,转发到本地3000端口 +- `server://0.0.0.0:9090/localhost:8080?tls=1` - 启用TLS的服务器 + +#### 客户端模式 (Client Mode) +``` +client://:/:? +``` + +示例: +- `client://example.com:8080/localhost:3000` - 连接到远程服务器,本地监听3000端口 +- `client://vpn.example.com:443/localhost:22?tls=1` - 通过TLS连接到VPN服务器 + +#### 支持的参数 + +| 参数 | 描述 | 值 | 默认值 | +|------|------|----|----| +| `tls` | TLS加密级别 | `0`(无), `1`(自签名), `2`(证书) | `0` | +| `log` | 日志级别 | `trace`, `debug`, `info`, `warn`, `error` | `info` | +| `crt` | 证书路径 | 文件路径 | 无 | +| `key` | 私钥路径 | 文件路径 | 无 | diff --git a/nodepass/docs/zh/configuration.md b/nodepass/docs/zh/configuration.md new file mode 100644 index 0000000000..47f2eea9d7 --- /dev/null +++ b/nodepass/docs/zh/configuration.md @@ -0,0 +1,226 @@ +# 配置选项 + +NodePass采用极简方法进行配置,所有设置都通过命令行参数和环境变量指定。本指南说明所有可用的配置选项,并为各种部署场景提供建议。 + +## 日志级别 + +NodePass提供五种日志详细级别,控制显示的信息量: + +- `debug`:详细调试信息 - 显示所有操作和连接 +- `info`:一般操作信息(默认) - 显示启动、关闭和关键事件 +- `warn`:警告条件 - 仅显示不影响核心功能的潜在问题 +- `error`:错误条件 - 仅显示影响功能的问题 +- `event`:事件记录 - 显示重要的操作事件和流量统计 + +您可以在命令URL中设置日志级别: + +```bash +nodepass server://0.0.0.0:10101/0.0.0.0:8080?log=debug +``` + +## TLS加密模式 + +对于服务器和主控模式,NodePass为数据通道提供三种TLS安全级别: + +- **模式0**:无TLS加密(明文TCP/UDP) + - 最快性能,无开销 + - 数据通道无安全保护(仅在受信任网络中使用) + +- **模式1**:自签名证书(自动生成) + - 设置简单的良好安全性 + - 证书自动生成且不验证 + - 防止被动窃听 + +- **模式2**:自定义证书(需要`crt`和`key`参数) + - 具有证书验证的最高安全性 + - 需要提供证书和密钥文件 + - 适用于生产环境 + +TLS模式1示例(自签名): +```bash +nodepass server://0.0.0.0:10101/0.0.0.0:8080?tls=1 +``` + +TLS模式2示例(自定义证书): +```bash +nodepass "server://0.0.0.0:10101/0.0.0.0:8080?tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem" +``` + +## 连接池容量参数 + +连接池容量可以通过URL查询参数进行配置: + +- `min`: 最小连接池容量(默认: 64) +- `max`: 最大连接池容量(默认: 8192) + +示例: +```bash +# 设置最小连接池为32,最大为4096 +nodepass "client://server.example.com:10101/127.0.0.1:8080?min=32&max=4096" +``` + +## 环境变量 + +可以使用环境变量微调NodePass行为。以下是所有可用变量的完整列表,包括其描述、默认值以及不同场景的推荐设置。 + +| 变量 | 描述 | 默认值 | 示例 | +|----------|-------------|---------|---------| +| `NP_SEMAPHORE_LIMIT` | 最大并发连接数 | 1024 | `export NP_SEMAPHORE_LIMIT=2048` | +| `NP_UDP_DATA_BUF_SIZE` | UDP数据包缓冲区大小 | 8192 | `export NP_UDP_DATA_BUF_SIZE=16384` | +| `NP_UDP_READ_TIMEOUT` | UDP读取操作超时 | 20s | `export NP_UDP_READ_TIMEOUT=30s` | +| `NP_UDP_DIAL_TIMEOUT` | UDP连接建立超时 | 20s | `export NP_UDP_DIAL_TIMEOUT=30s` | +| `NP_TCP_READ_TIMEOUT` | TCP读取操作超时 | 20s | `export NP_TCP_READ_TIMEOUT=30s` | +| `NP_TCP_DIAL_TIMEOUT` | TCP连接建立超时 | 20s | `export NP_TCP_DIAL_TIMEOUT=30s` | +| `NP_MIN_POOL_INTERVAL` | 连接创建之间的最小间隔 | 1s | `export NP_MIN_POOL_INTERVAL=500ms` | +| `NP_MAX_POOL_INTERVAL` | 连接创建之间的最大间隔 | 5s | `export NP_MAX_POOL_INTERVAL=3s` | +| `NP_REPORT_INTERVAL` | 健康检查报告间隔 | 5s | `export NP_REPORT_INTERVAL=10s` | +| `NP_SERVICE_COOLDOWN` | 重启尝试前的冷却期 | 3s | `export NP_SERVICE_COOLDOWN=5s` | +| `NP_SHUTDOWN_TIMEOUT` | 优雅关闭超时 | 5s | `export NP_SHUTDOWN_TIMEOUT=10s` | +| `NP_RELOAD_INTERVAL` | 证书/连接池重载间隔 | 1h | `export NP_RELOAD_INTERVAL=30m` | + +### 连接池调优 + +连接池参数是性能调优中的重要设置: + +#### 池容量设置 + +- `min` (URL参数):确保最小可用连接数 + - 太低:流量高峰期延迟增加,因为必须建立新连接 + - 太高:维护空闲连接浪费资源 + - 推荐起点:平均并发连接的25-50% + +- `max` (URL参数):防止过度资源消耗,同时处理峰值负载 + - 太低:流量高峰期连接失败 + - 太高:潜在资源耗尽影响系统稳定性 + - 推荐起点:峰值并发连接的150-200% + +#### 池间隔设置 + +- `NP_MIN_POOL_INTERVAL`:控制连接创建尝试之间的最小时间 + - 太低:可能以连接尝试压垮网络 + - 推荐范围:根据网络延迟,500ms-2s + +- `NP_MAX_POOL_INTERVAL`:控制连接创建尝试之间的最大时间 + - 太高:流量高峰期可能导致池耗尽 + - 推荐范围:根据预期流量模式,3s-10s + +#### 连接管理 + +- `NP_SEMAPHORE_LIMIT`:控制最大并发隧道操作数 + - 太低:流量高峰期拒绝连接 + - 太高:太多并发goroutine可能导致内存压力 + - 推荐范围:大多数应用1000-5000,高吞吐量场景更高 + +### UDP设置 + +对于严重依赖UDP流量的应用: + +- `NP_UDP_DATA_BUF_SIZE`:UDP数据包缓冲区大小 + - 对于发送大UDP数据包的应用增加此值 + - 默认值(8192)适用于大多数情况 + - 考虑为媒体流或游戏服务器增加到16384或更高 + +- `NP_UDP_READ_TIMEOUT`:UDP读取操作超时 + - 对于高延迟网络或响应时间慢的应用增加此值 + - 对于需要快速故障转移的低延迟应用减少此值 + +- `NP_UDP_DIAL_TIMEOUT`:UDP拨号超时 + - 对于高延迟网络增加此值 + - 对于需要快速连接的应用减少此值 + +### TCP设置 + +对于TCP连接的优化: + +- `NP_TCP_READ_TIMEOUT`:TCP读取操作超时 + - 对于高延迟网络或响应慢的服务器增加此值 + - 对于需要快速检测断开连接的应用降低此值 + - 影响数据传输过程中的等待时间 + +- `NP_TCP_DIAL_TIMEOUT`:TCP连接建立超时 + - 对于网络条件不稳定的环境增加此值 + - 对于需要快速判断连接成功与否的应用减少此值 + - 影响初始连接建立阶段 + +### 服务管理设置 + +- `NP_REPORT_INTERVAL`:控制健康状态报告频率 + - 较低值提供更频繁的更新但增加日志量 + - 较高值减少日志输出但提供较少的即时可见性 + +- `NP_RELOAD_INTERVAL`:控制检查TLS证书变更的频率 + - 较低值更快检测证书变更但增加文件系统操作 + - 较高值减少开销但延迟检测证书更新 + +- `NP_SERVICE_COOLDOWN`:尝试服务重启前的等待时间 + - 较低值更快尝试恢复但可能在持续性问题情况下导致抖动 + - 较高值提供更多稳定性但从瞬态问题中恢复较慢 + +- `NP_SHUTDOWN_TIMEOUT`:关闭期间等待连接关闭的最长时间 + - 较低值确保更快关闭但可能中断活动连接 + - 较高值允许连接有更多时间完成但延迟关闭 + +## 推荐配置 + +以下是常见场景的推荐环境变量配置: + +### 高吞吐量配置 + +对于需要最大吞吐量的应用(如媒体流、文件传输): + +URL参数: +```bash +nodepass "client://server.example.com:10101/127.0.0.1:8080?min=128&max=8192" +``` + +环境变量: +```bash +export NP_MIN_POOL_INTERVAL=500ms +export NP_MAX_POOL_INTERVAL=3s +export NP_SEMAPHORE_LIMIT=8192 +export NP_UDP_DATA_BUF_SIZE=32768 +export NP_REPORT_INTERVAL=10s +``` + +### 低延迟配置 + +对于需要最小延迟的应用(如游戏、金融交易): + +URL参数: +```bash +nodepass "client://server.example.com:10101/127.0.0.1:8080?min=256&max=4096" +``` + +环境变量: +```bash +export NP_MIN_POOL_INTERVAL=100ms +export NP_MAX_POOL_INTERVAL=1s +export NP_SEMAPHORE_LIMIT=4096 +export NP_UDP_READ_TIMEOUT=5s +export NP_REPORT_INTERVAL=1s +``` + +### 资源受限配置 + +对于在资源有限系统上的部署(如IoT设备、小型VPS): + +URL参数: +```bash +nodepass "client://server.example.com:10101/127.0.0.1:8080?min=16&max=512" +``` + +环境变量: +```bash +export NP_MIN_POOL_INTERVAL=2s +export NP_MAX_POOL_INTERVAL=10s +export NP_SEMAPHORE_LIMIT=512 +export NP_REPORT_INTERVAL=30s +export NP_SHUTDOWN_TIMEOUT=3s +``` + +## 下一步 + +- 查看[使用说明](/docs/zh/usage.md)了解基本操作命令 +- 探索[使用示例](/docs/zh/examples.md)了解部署模式 +- 了解[NodePass工作原理](/docs/zh/how-it-works.md)以优化配置 +- 如果遇到问题,请查看[故障排除指南](/docs/zh/troubleshooting.md) \ No newline at end of file diff --git a/nodepass/docs/zh/examples.md b/nodepass/docs/zh/examples.md new file mode 100644 index 0000000000..b096665170 --- /dev/null +++ b/nodepass/docs/zh/examples.md @@ -0,0 +1,261 @@ +# 使用示例 + +本页提供了NodePass在各种部署场景中的实际示例。这些示例涵盖了常见用例,可以根据您的具体需求进行调整。 + +## 基本服务器设置与TLS选项 + +### 示例1:无TLS加密 + +当速度比安全性更重要时(例如,在受信任网络中): + +```bash +nodepass "server://0.0.0.0:10101/127.0.0.1:8080?log=debug&tls=0" +``` + +这会启动一个NodePass服务器,它: +- 在所有接口的10101端口上监听隧道连接 +- 将流量转发到localhost:8080 +- 使用debug日志记录详细信息 +- 不对数据通道使用加密(最快性能) + +### 示例2:自签名证书 + +为了平衡安全性和易于设置(推荐大多数情况): + +```bash +nodepass "server://0.0.0.0:10101/127.0.0.1:8080?log=debug&tls=1" +``` + +此配置: +- 自动生成自签名证书 +- 提供加密而无需证书管理 +- 保护数据流量免受被动窃听 +- 适用于内部或测试环境 + +### 示例3:自定义域名证书 + +对于需要验证证书的生产环境: + +```bash +nodepass "server://0.0.0.0:10101/127.0.0.1:8080?log=debug&tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem" +``` + +这一设置: +- 使用您提供的TLS证书和私钥 +- 提供具有证书验证的最高安全级别 +- 适合生产环境和面向公众的服务 +- 允许客户端验证服务器的身份 + +## 连接到NodePass服务器 + +### 示例4:基本客户端连接 + +使用默认设置连接到NodePass服务器: + +```bash +nodepass client://server.example.com:10101/127.0.0.1:8080 +``` + +此客户端: +- 连接到server.example.com:10101的NodePass服务器 +- 将接收到的流量转发到localhost:8080 +- 自动采用服务器的TLS安全策略 +- 使用默认的info日志级别 + +### 示例5:带调试日志的客户端 + +用于故障排除连接问题: + +```bash +nodepass client://server.example.com:10101/127.0.0.1:8080?log=debug +``` + +这启用了详细输出,有助于识别: +- 连接建立问题 +- 信号处理 +- 数据传输详情 +- 错误情况 + +## 通过防火墙访问数据库 + +### 示例6:数据库隧道 + +启用对防火墙后的数据库服务器的安全访问: + +```bash +# 服务器端(位于安全网络外部)使用TLS加密 +nodepass server://:10101/127.0.0.1:5432?tls=1 + +# 客户端(位于防火墙内部) +nodepass client://server.example.com:10101/127.0.0.1:5432 +``` + +此配置: +- 创建到PostgreSQL数据库(端口5432)的加密隧道 +- 允许安全访问数据库而不直接将其暴露于互联网 +- 使用自签名证书加密所有数据库流量 +- 使远程数据库在客户端上显示为本地服务 + +## 安全的微服务通信 + +### 示例7:服务间通信 + +启用微服务之间的安全通信: + +```bash +# 服务A(消费API)使用自定义证书 +nodepass "server://0.0.0.0:10101/127.0.0.1:8081?log=warn&tls=2&crt=/path/to/service-a.crt&key=/path/to/service-a.key" + +# 服务B(提供API) +nodepass client://service-a:10101/127.0.0.1:8082 +``` + +此设置: +- 在两个微服务之间创建安全通道 +- 使用自定义证书进行服务身份验证 +- 将日志限制为仅警告和错误 +- 使服务A的API在服务B上显示为本地服务 + +## 物联网设备管理 + +### 示例8:物联网网关 + +创建物联网设备的中央访问点: + +```bash +# 中央管理服务器 +nodepass "server://0.0.0.0:10101/127.0.0.1:8888?log=info&tls=1" + +# 物联网设备 +nodepass client://mgmt.example.com:10101/127.0.0.1:80 +``` + +此配置: +- 使分布式物联网设备能够安全连接到中央服务器 +- 使用自签名证书提供足够的安全性 +- 允许嵌入式设备安全地暴露其本地Web界面 +- 通过单一端点集中设备管理 + +## 多环境开发 + +### 示例9:开发环境访问 + +通过隧道访问不同的开发环境: + +```bash +# 生产API访问隧道 +nodepass client://tunnel.example.com:10101/127.0.0.1:3443 + +# 开发环境 +nodepass server://tunnel.example.com:10101/127.0.0.1:3000 + +# 测试环境 +nodepass "server://tunnel.example.com:10101/127.0.0.1:3001?log=warn&tls=1" +``` + +此设置: +- 创建对多个环境(生产、开发、测试)的安全访问 +- 根据环境敏感性使用不同级别的日志记录 +- 使开发人员能够访问环境而无需直接网络暴露 +- 将远程服务映射到不同的本地端口,便于识别 + +## 容器部署 + +### 示例10:容器化NodePass + +在Docker环境中部署NodePass: + +```bash +# 为容器创建网络 +docker network create nodepass-net + +# 部署使用自签名证书的NodePass服务器 +docker run -d --name nodepass-server \ + --network nodepass-net \ + -p 10101:10101 \ + ghcr.io/yosebyte/nodepass "server://0.0.0.0:10101/web-service:80?log=info&tls=1" + +# 部署Web服务作为目标 +docker run -d --name web-service \ + --network nodepass-net \ + nginx:alpine + +# 部署NodePass客户端 +docker run -d --name nodepass-client \ + -p 8080:8080 \ + ghcr.io/yosebyte/nodepass client://nodepass-server:10101/127.0.0.1:8080?log=info + +# 通过http://localhost:8080访问Web服务 +``` + +此配置: +- 在服务之间创建容器化隧道 +- 使用Docker网络连接容器 +- 仅向主机公开必要端口 +- 提供对内部Web服务的安全访问 + +## 主控API管理 + +### 示例11:集中管理 + +为多个NodePass实例设置中央控制器: + +```bash +# 使用自签名证书启动主控API服务 +nodepass "master://0.0.0.0:9090?log=info&tls=1" +``` + +然后您可以通过API调用管理实例: + +```bash +# 创建服务器实例 +curl -X POST http://localhost:9090/api/v1/instances \ + -H "Content-Type: application/json" \ + -d '{"url":"server://0.0.0.0:10101/0.0.0.0:8080?tls=1"}' + +# 创建客户端实例 +curl -X POST http://localhost:9090/api/v1/instances \ + -H "Content-Type: application/json" \ + -d '{"url":"client://localhost:10101/127.0.0.1:8081"}' + +# 列出所有运行实例 +curl http://localhost:9090/api/v1/instances + +# 控制实例(用实际实例ID替换{id}) +curl -X PUT http://localhost:9090/api/v1/instances/{id} \ + -H "Content-Type: application/json" \ + -d '{"action":"restart"}' +``` + +此设置: +- 为所有NodePass实例提供中央管理界面 +- 允许动态创建和控制隧道 +- 提供用于自动化和集成的RESTful API +- 包含内置的Swagger UI,位于http://localhost:9090/api/v1/docs + +### 示例12:自定义API前缀 + +为主控模式使用自定义API前缀: + +```bash +# 使用自定义API前缀启动 +nodepass "master://0.0.0.0:9090/admin?log=info&tls=1" + +# 使用自定义前缀创建实例 +curl -X POST http://localhost:9090/admin/v1/instances \ + -H "Content-Type: application/json" \ + -d '{"url":"server://0.0.0.0:10101/0.0.0.0:8080?tls=1"}' +``` + +这允许: +- 与现有API网关集成 +- 用于安全或组织目的的自定义URL路径 +- 在http://localhost:9090/admin/v1/docs访问Swagger UI + +## 下一步 + +现在您已经了解了各种使用示例,您可能想要: + +- 了解[配置选项](/docs/zh/configuration.md)以进行微调 +- 理解NodePass内部[工作原理](/docs/zh/how-it-works.md) +- 查看[故障排除指南](/docs/zh/troubleshooting.md)了解常见问题 \ No newline at end of file diff --git a/nodepass/docs/zh/how-it-works.md b/nodepass/docs/zh/how-it-works.md new file mode 100644 index 0000000000..8ee9e11292 --- /dev/null +++ b/nodepass/docs/zh/how-it-works.md @@ -0,0 +1,313 @@ +# NodePass 工作原理 + +本页解释了 NodePass 的内部架构和数据流机制,提供了不同组件如何交互以创建高效、安全的隧道的深入见解。 + +## 架构概述 + +NodePass 创建了一个具有独立控制和数据通道的网络架构: + +1. **控制通道(隧道)**: + - 客户端和服务器之间的未加密 TCP 连接 + - 专门用于信号传输和协调 + - 在隧道生命周期内维持持久连接 + +2. **数据通道(目标)**: + - 可配置的 TLS 加密选项: + - **模式 0**:未加密数据传输(最快,安全性最低) + - **模式 1**:自签名证书加密(良好安全性,无验证) + - **模式 2**:验证证书加密(最高安全性,需要有效证书) + - 按需为每个连接或数据报创建 + - 用于实际应用数据传输 + +3. **服务端模式操作**: + - 在隧道端点监听控制连接 + - 当流量到达目标端点时,通过控制通道向客户端发送信号 + - 在需要时使用指定的 TLS 模式建立数据通道 + - 支持双向数据流:可以从服务端或客户端发起连接 + +4. **客户端模式操作**: + - 连接到服务端的控制通道 + - 监听指示传入连接的信号 + - 使用服务端指定的 TLS 安全级别创建数据连接 + - 在安全通道和本地目标之间转发数据 + - 支持双向数据流:根据目标地址自动选择数据流方向 + +5. **客户端单端转发模式**: + - 当隧道地址为本地地址时(如127.0.0.1)自动启用 + - 客户端直接在本地监听端口,无需服务端的控制通道协调 + - 对于TCP连接使用连接池技术,显著提高转发性能 + - 适用于纯本地转发场景,减少网络开销和延迟 + - 支持TCP和UDP协议的高性能单端转发 + +5. **协议支持**: + - **TCP**:具有持久连接的全双工流式传输,在客户端单端转发模式下支持连接池优化 + - **UDP**:具有可配置缓冲区大小和超时的数据报转发 + +## 数据传输流 + +NodePass 通过其隧道架构建立双向数据流,支持 TCP 和 UDP 协议。系统支持三种数据流模式: + +### 数据流模式说明 +- **服务端接收模式(dataFlow: "-")**:服务端在目标地址监听,客户端在本地监听,数据从目标地址流向客户端本地 +- **服务端发送模式(dataFlow: "+")**:服务端连接到远程目标地址,客户端在本地监听,数据从客户端本地流向远程目标 +- **客户端单端转发模式**:客户端在本地直接监听并转发到目标地址,无需服务端协调,使用连接池技术实现高性能转发 + +数据流模式根据隧道地址和目标地址自动确定: +- 如果隧道地址是本地地址(localhost、127.0.0.1等),启用客户端单端转发模式 +- 如果目标地址是本地地址,使用服务端接收模式 +- 如果目标地址是远程地址,使用服务端发送模式 + +### 服务端流程(服务端接收模式) +1. **连接初始化**: + ``` + [目标客户端] → [目标监听器] → [服务器:目标连接已创建] + ``` + - 对于 TCP:客户端建立到目标监听器的持久连接 + - 对于 UDP:服务器在绑定到目标地址的 UDP 套接字上接收数据报 + +2. **信号生成**: + ``` + [服务端] → [生成唯一连接 ID] → [通过未加密的 TCP 隧道向客户端发送信号] + ``` + - 对于 TCP:生成 `//#1` 信号 + - 对于 UDP:生成 `//#2` 信号 + +3. **连接准备**: + ``` + [服务端] → [在池中创建具有配置的 TLS 模式的远程连接] → [等待客户端连接] + ``` + - 两种协议都使用相同的具有唯一连接 ID 的连接池机制 + - 根据指定模式(0、1 或 2)应用 TLS 配置 + +4. **数据交换**: + ``` + [目标连接] ⟷ [交换/传输] ⟷ [远程连接] + ``` + - 对于 TCP:使用 `conn.DataExchange()` 进行持续的双向数据流传输 + - 对于 UDP:使用可配置的缓冲区大小转发单个数据报 + +### 客户端流程 +1. **信号接收**: + ``` + [客户端] → [从 TCP 隧道读取信号] → [解析连接 ID] + ``` + - 客户端根据 URL 方案区分 TCP 和 UDP 信号 + +2. **连接建立**: + ``` + [客户端] → [从池中检索连接] → [连接到远程端点] + ``` + - 此阶段的连接管理与协议无关 + +3. **本地连接**: + ``` + [客户端] → [连接到本地目标] → [建立本地连接] + ``` + - 对于 TCP:建立到本地目标的持久 TCP 连接 + - 对于 UDP:创建用于与本地目标交换数据报的 UDP 套接字 + +4. **数据交换**: + ``` + [远程连接] ⟷ [交换/传输] ⟷ [本地目标连接] + ``` + - 对于 TCP:使用 `conn.DataExchange()` 进行持续的双向数据流传输 + - 对于 UDP:读取单个数据报,转发它,使用超时等待响应,然后返回响应 + +### 客户端单端转发流程 +1. **模式识别**: + ``` + [客户端] → [检测隧道地址为本地地址] → [启用单端转发模式] + ``` + - 自动检测隧道地址是否为localhost、127.0.0.1等本地地址 + - 启用单端转发模式,跳过服务端控制通道建立 + +2. **本地监听**: + ``` + [客户端] → [在隧道端口启动监听器] → [等待本地连接] + ``` + - 直接在指定的隧道端口启动TCP或UDP监听器 + - 无需连接到远程服务端,实现零延迟启动 + +3. **连接池初始化**(仅TCP): + ``` + [客户端] → [初始化目标连接池] → [预建立连接到目标地址] + ``` + - 为TCP转发创建高性能连接池 + - 预先建立多个到目标地址的连接,显著减少连接建立延迟 + - 连接池大小可根据并发需求动态调整 + +4. **高性能转发**: + ``` + [本地连接] → [从连接池获取目标连接] → [直接数据交换] → [连接复用或释放] + ``` + - 对于TCP:从连接池中快速获取预建立的目标连接,进行高效数据交换 + - 对于UDP:直接转发数据报到目标地址,无需连接池 + - 优化的数据路径,最小化转发开销和延迟 + +### 特定协议特性 +- **TCP 交换**: + - 用于全双工通信的持久连接 + - 连接终止前的持续数据流传输 + - 具有自动重连的错误处理 + - **客户端单端转发优化**:通过连接池技术预建立连接,显著减少连接建立延迟 + +- **UDP 交换**: + - 具有可配置缓冲区大小的一次性数据报转发 (`UDP_DATA_BUF_SIZE`) + - 响应等待的读取超时控制 (`UDP_READ_TIMEOUT`) + - 针对低延迟、无状态通信进行了优化 + - **客户端单端转发优化**:直接转发机制,无需连接池,实现最低延迟 + +## 信号通信机制 + +NodePass 通过 TCP 隧道使用复杂的基于 URL 的信号协议: + +### 信号类型 +1. **隧道信号**: + - 格式:`#` + - 目的:通知客户端 TLS 代号 + - 时机:在隧道握手时发送 + +2. **TCP 启动信号**: + - 格式:`//#1` + - 目的:请求客户端为特定 ID 建立 TCP 连接 + - 时机:当接收到目标服务的新 TCP 连接时发送 + +3. **UDP 启动信号**: + - 格式:`//#2` + - 目的:请求客户端处理特定 ID 的 UDP 流量 + - 时机:当在目标端口接收到 UDP 数据时发送 + +### 信号流程 +1. **信号生成**: + - 服务端为特定事件创建 URL 格式的信号 + - 信号以换行符终止,以便正确解析 + +2. **信号传输**: + - 服务端将信号写入 TCP 隧道连接 + - 使用互斥锁防止对隧道的并发写入 + +3. **信号接收**: + - 客户端使用缓冲读取器从隧道读取信号 + - 信号被修剪并解析为 URL 格式 + +4. **信号处理**: + - 客户端将有效信号放入缓冲通道 (signalChan) + - 专用 goroutine 处理来自通道的信号 + - 信号量模式防止信号溢出 + +5. **信号执行**: + - 远程信号更新客户端的远程地址配置 + - 启动信号触发 `clientOnce()` 方法建立连接 + +### 信号弹性 +- 具有可配置容量的缓冲通道防止在高负载下信号丢失 +- 信号量实现确保受控并发 +- 对格式错误或意外信号的错误处理 + +## 连接池架构 + +NodePass 实现了一个高效的连接池系统来管理网络连接: + +### 池设计 +1. **池类型**: + - **客户端池**:预先建立到远程端点的连接 + - **服务器池**:管理来自客户端的传入连接 + +2. **池组件**: + - **连接存储**:线程安全的连接 ID 到 net.Conn 对象的映射 + - **ID 通道**:用于可用连接 ID 的缓冲通道 + - **容量管理**:基于使用模式的动态调整 + - **间隔控制**:连接创建之间的基于时间的限流 + - **连接工厂**:可定制的连接创建函数 + +### 连接生命周期 +1. **连接创建**: + - 连接创建数量不超过配置的容量 + - 每个连接都分配一个唯一 ID + - ID 和连接存储在池中 + +2. **连接获取**: + - 客户端使用连接 ID 检索连接 + - 服务端从池中检索下一个可用连接 + - 在返回前验证连接 + +3. **连接使用**: + - 获取时从池中移除连接 + - 用于端点之间的数据交换 + - 无连接重用(一次性使用模型) + +4. **连接终止**: + - 使用后关闭连接 + - 正确释放资源 + - 错误处理确保干净终止 + +### 池管理 +1. **容量控制**: + - `MIN_POOL_CAPACITY`:确保最小可用连接数 + - `MAX_POOL_CAPACITY`:防止过度资源消耗 + - 基于需求模式的动态缩放 + +2. **间隔控制**: + - `MIN_POOL_INTERVAL`:连接创建尝试之间的最小时间 + - `MAX_POOL_INTERVAL`:连接创建尝试之间的最大时间 + - 自适应基于时间的限流以优化资源使用 + +3. **动态池适应**: + 连接池采用双重自适应机制以确保最佳性能: + + **A. 容量调整** + - 池容量根据实时使用模式动态调整 + - 如果连接创建成功率低(<20%),容量减少以最小化资源浪费 + - 如果连接创建成功率高(>80%),容量增加以适应更高的流量 + - 渐进缩放防止振荡并提供稳定性 + - 遵守配置的最小和最大容量边界 + + **B. 间隔调整** + - 创建间隔根据池空闲连接数调整 + - 当空闲连接较少(容量的<20%)时,间隔向最小间隔减少 + - 当空闲连接较多(容量的>80%)时,间隔向最大间隔增加 + - 防止在低需求期间压垮网络资源 + - 在池耗尽的高需求期间加速连接创建 + +## 主控API架构 + +在主控模式下,NodePass提供RESTful API进行集中管理: + +### API组件 +1. **HTTP/HTTPS服务器**: + - 在配置的地址和端口上监听 + - 可选的TLS加密,与隧道服务器使用相同模式 + - 可配置的API前缀路径 + +2. **实例管理**: + - NodePass实例的内存注册表 + - 基于UID的实例标识 + - 每个实例的状态跟踪(运行中、已停止等) + +3. **RESTful端点**: + - 实例的标准CRUD操作 + - 实例控制操作(启动、停止、重启) + - 健康状态报告 + - API文档的OpenAPI规范 + +### 实例生命周期管理 +1. **实例创建**: + - 基于URL的配置,类似于命令行 + - 基于实例类型的动态初始化 + - 实例创建前的参数验证 + +2. **实例控制**: + - 启动/停止/重启能力 + - 可配置超时的优雅关闭 + - 终止时的资源清理 + +3. **API安全**: + - API连接的TLS加密选项 + - 与隧道服务端相同的安全模式 + - HTTPS的证书管理 + +## 下一步 + +- 有关部署NodePass的实际示例,请参阅[示例页面](/docs/zh/examples.md) +- 要根据您的特定需求优化NodePass,请探索[配置选项](/docs/zh/configuration.md) +- 如果遇到任何问题,请查看[故障排除指南](/docs/zh/troubleshooting.md) \ No newline at end of file diff --git a/nodepass/docs/zh/installation.md b/nodepass/docs/zh/installation.md new file mode 100644 index 0000000000..3db8dff484 --- /dev/null +++ b/nodepass/docs/zh/installation.md @@ -0,0 +1,111 @@ +# 安装指南 + +本指南提供了使用不同方法安装 NodePass 的详细说明。选择最适合您环境和需求的安装方式。 + +## 系统要求 + +- Go 1.24或更高版本(从源代码构建时需要) +- 服务器和客户端端点之间的网络连接 +- 绑定1024以下端口可能需要管理员权限 + +## 安装方法 + +### 方式1:预编译二进制文件 + +开始使用 NodePass 的最简单方法是为您的平台下载预编译的二进制文件。 + +1. 访问 GitHub 上的[发布页面](https://github.com/yosebyte/nodepass/releases) +2. 下载适合您操作系统的二进制文件(Windows、macOS、Linux) +3. 如有必要,解压缩档案 +4. 使二进制文件可执行(Linux/macOS): + ```bash + chmod +x nodepass + ``` +5. 将二进制文件移动到PATH中的位置: + - Linux/macOS:`sudo mv nodepass /usr/local/bin/` + - Windows:将位置添加到PATH环境变量 + +### 方式2:使用Go安装 + +如果您的系统上已安装Go,可以使用`go install`命令: + +```bash +go install github.com/yosebyte/nodepass/cmd/nodepass@latest +``` + +此命令下载源代码,编译它,并将二进制文件安装到您的Go bin目录中(通常是`$GOPATH/bin`)。 + +### 方式3:从源代码构建 + +对于最新的开发版本或自定义构建: + +```bash +# 克隆仓库 +git clone https://github.com/yosebyte/nodepass.git + +# 导航到项目目录 +cd nodepass + +# 构建二进制文件 +go build -o nodepass ./cmd/nodepass + +# 可选:安装到GOPATH/bin +go install ./cmd/nodepass +``` + +### 方式4:使用容器镜像 + +NodePass在GitHub容器注册表中提供容器镜像,非常适合容器化环境: + +```bash +# 拉取容器镜像 +docker pull ghcr.io/yosebyte/nodepass:latest + +# 服务器模式运行 +docker run -d --name nodepass-server -p 10101:10101 -p 8080:8080 \ + ghcr.io/yosebyte/nodepass server://0.0.0.0:10101/0.0.0.0:8080 + +# 客户端模式运行 +docker run -d --name nodepass-client \ + -e MIN_POOL_CAPACITY=32 \ + -e MAX_POOL_CAPACITY=512 \ + -p 8080:8080 \ + ghcr.io/yosebyte/nodepass client://nodepass-server:10101/127.0.0.1:8080 +``` + +### 方式5:使用管理脚本(仅限Linux) + +对于Linux系统,我们提供了一键脚本: + +```bash +bash <(curl -sSL https://run.nodepass.eu/np.sh) +``` + +- 本脚本提供了简单易用的 master 模式,即 API 模式的安装、配置和管理功能。 +- 详情请参阅[https://github.com/NodePassProject/npsh](https://github.com/NodePassProject/npsh) + +## 验证安装 + +安装后,通过检查版本来验证NodePass是否正确安装: + +```bash +nodepass +``` + +## 下一步 + +安装NodePass后,您可以: + +- 了解基本[使用方法](/docs/zh/usage.md) +- 探索[配置选项](/docs/zh/configuration.md) +- 尝试一些[使用示例](/docs/zh/examples.md) + +## 安装问题故障排除 + +如果在安装过程中遇到任何问题: + +- 确保您的系统满足最低要求 +- 检查是否具有安装软件的正确权限 +- 对于Go相关问题,使用`go version`验证您的Go安装 +- 对于容器相关问题,确保Docker正确安装并运行 +- 查看我们的[故障排除指南](/docs/zh/troubleshooting.md)获取更多帮助 diff --git a/nodepass/docs/zh/troubleshooting.md b/nodepass/docs/zh/troubleshooting.md new file mode 100644 index 0000000000..fe1e808368 --- /dev/null +++ b/nodepass/docs/zh/troubleshooting.md @@ -0,0 +1,287 @@ +# 故障排除指南 + +本指南帮助您诊断并解决使用NodePass时可能遇到的常见问题。对于每个问题,我们提供可能的原因和逐步解决方案。 + +## 连接问题 + +### 无法建立隧道连接 + +**症状**:客户端无法连接到服务器的隧道端点,或连接立即断开。 + +**可能的原因和解决方案**: + +1. **网络连接问题** + - 使用`ping`或`telnet`验证与服务器地址的基本连接 + - 检查指定的端口是否可达:`telnet server.example.com 10101` + - 确保没有防火墙阻止隧道端口(通常为10101) + +2. **服务器未运行** + - 在Linux/macOS上使用`ps aux | grep nodepass`验证NodePass服务器是否运行 + - 检查服务器日志中的任何启动错误 + - 尝试重启服务器进程 + +3. **地址错误** + - 仔细检查客户端命令中的隧道地址格式 + - 确保使用了正确的主机名/IP和端口 + - 如果使用DNS名称,验证它们是否解析为正确的IP地址 + +4. **TLS配置不匹配** + - 如果服务器需要TLS但客户端不支持,连接将失败 + - 检查服务器日志中的TLS握手错误 + - 如果使用TLS模式2,确保证书配置正确 + +### 数据未通过隧道流动 + +**症状**:隧道连接已建立,但应用程序数据未到达目的地。 + +**可能的原因和解决方案**: + +1. **目标服务未运行** + - 验证目标服务在服务器和客户端两侧是否运行 + - 检查是否可以在本地直接连接到该服务 + +2. **端口冲突** + - 确保目标端口没有被其他应用程序占用 + - 使用`netstat -tuln`检查端口使用情况 + +3. **协议不匹配** + - 验证您是否在隧道传输正确的协议(TCP与UDP) + - 某些应用程序需要特定的协议支持 + +4. **目标地址错误** + - 仔细检查服务器和客户端命令中的目标地址 + - 对于服务器端目标,确保它们可从服务器访问 + - 对于客户端目标,确保它们可从客户端访问 + +### 连接稳定性问题 + +**症状**:隧道最初工作但频繁断开或变得无响应。 + +**可能的原因和解决方案**: + +1. **网络不稳定** + - 检查您的网络中是否有数据包丢失或高延迟 + - 考虑为生产部署使用更稳定的网络连接 + +2. **资源限制** + - 监控客户端和服务器的CPU和内存使用情况 + - 如果资源耗尽,调整池参数(参见性能部分) + - 在Linux/macOS上使用`ulimit -n`检查文件描述符限制 + +3. **超时配置** + - 如果使用具有慢响应时间的UDP,调整`UDP_READ_TIMEOUT` + - 考虑在操作系统级别调整TCP keepalive设置以支持长寿命连接 + +4. **服务器过载** + - 检查服务器日志中的连接过载迹象 + - 调整`MAX_POOL_CAPACITY`和`SEMAPHORE_LIMIT`以处理负载 + - 考虑用多个NodePass实例水平扩展 + +## 证书问题 + +### TLS握手失败 + +**症状**:连接尝试因TLS握手错误而失败。 + +**可能的原因和解决方案**: + +1. **无效证书** + - 验证证书有效性:`openssl x509 -in cert.pem -text -noout` + - 确保证书没有过期 + - 检查证书是否针对正确的域名/IP颁发 + +2. **证书文件丢失或无法访问** + - 确认证书和密钥的文件路径正确 + - 验证文件权限允许NodePass进程读取它们 + - 通过文本编辑器打开证书检查文件是否损坏 + +3. **证书信任问题** + - 如果使用自定义CA,确保它们被正确信任 + - 对于自签名证书,确认使用TLS模式1 + - 对于验证证书,确保CA链完整 + +4. **密钥格式问题** + - 确保私钥格式正确(通常为PEM) + - 检查私钥是否有密码保护(不直接支持) + +### 证书更新问题 + +**症状**:证书更新后,安全连接开始失败。 + +**可能的原因和解决方案**: + +1. **新证书未加载** + - 重启NodePass强制加载新证书 + - 检查`RELOAD_INTERVAL`是否设置正确以自动检测变更 + +2. **证书链不完整** + - 确保证书文件中包含完整的证书链 + - 验证链顺序:首先是您的证书,然后是中间证书 + +3. **密钥不匹配** + - 验证新证书是否与私钥匹配: + ```bash + openssl x509 -noout -modulus -in cert.pem | openssl md5 + openssl rsa -noout -modulus -in key.pem | openssl md5 + ``` + - 如果输出不同,证书和密钥不匹配 + +## 性能优化 + +### 高延迟 + +**症状**:连接工作但有明显延迟。 + +**可能的原因和解决方案**: + +1. **池配置** + - 增加`MIN_POOL_CAPACITY`以准备更多连接 + - 减少`MIN_POOL_INTERVAL`以更快创建连接 + - 如果连接队列堆积,调整`SEMAPHORE_LIMIT` + +2. **网络路径** + - 检查网络拥塞或高延迟链路 + - 考虑将NodePass部署在更靠近客户端或服务器的位置 + - 使用traceroute识别潜在瓶颈 + +3. **TLS开销** + - 如果需要极低延迟且安全性不太重要,考虑使用TLS模式0 + - 为了平衡,使用带会话恢复的TLS模式1 + +4. **资源竞争** + - 确保主机系统有足够的CPU和内存 + - 检查是否有其他进程竞争资源 + - 考虑为高流量部署使用专用主机 + +### CPU使用率高 + +**症状**:NodePass进程消耗过多CPU资源。 + +**可能的原因和解决方案**: + +1. **池抖动** + - 如果池不断创建和销毁连接,调整时间 + - 增加`MIN_POOL_INTERVAL`以减少连接创建频率 + - 为`MIN_POOL_CAPACITY`和`MAX_POOL_CAPACITY`找到良好平衡 + +2. **过度日志记录** + - 在生产环境中将日志级别从debug降低到info或warn + - 检查日志是否写入缓慢设备 + +3. **TLS开销** + - TLS握手需要大量CPU;考虑会话缓存 + - 如果证书验证不太重要,使用TLS模式1而不是模式2 + +4. **流量体积** + - 高吞吐量可能导致CPU饱和 + - 考虑跨多个NodePass实例分配流量 + - 对于非常高的吞吐量,可能需要垂直扩展(更多CPU核心) + +### 内存泄漏 + +**症状**:NodePass内存使用随时间持续增长。 + +**可能的原因和解决方案**: + +1. **连接泄漏** + - 确保`SHUTDOWN_TIMEOUT`足够长以正确关闭连接 + - 检查自定义脚本或管理代码中的错误处理 + - 使用系统工具如`netstat`监控连接数量 + +2. **池大小问题** + - 如果`MAX_POOL_CAPACITY`非常大,内存使用会更高 + - 监控实际池使用情况与配置容量 + - 根据实际并发连接需求调整容量 + +3. **调试日志** + - 在高流量场景中,大量调试日志可能消耗内存 + - 在生产环境中使用适当的日志级别 + +## UDP特定问题 + +### UDP数据丢失 + +**症状**:UDP数据包无法通过隧道可靠转发。 + +**可能的原因和解决方案**: + +1. **缓冲区大小限制** + - 如果UDP数据包较大,增加`UDP_DATA_BUF_SIZE` + - 默认8192字节对某些应用程序可能太小 + +2. **超时问题** + - 如果响应较慢,增加`UDP_READ_TIMEOUT` + - 对于响应时间变化的应用程序,找到最佳平衡点 + +3. **高数据包率** + - UDP一次处理一个数据报;非常高的速率可能导致问题 + - 考虑为高流量UDP应用增加池容量 + +4. **协议期望** + - 一些UDP应用期望特定的数据包顺序或时序行为 + - NodePass提供尽力转发,但无法保证超出网络提供的UDP属性 + +### UDP连接跟踪 + +**症状**:UDP会话过早断开或无法建立。 + +**可能的原因和解决方案**: + +1. **连接映射** + - 验证客户端配置是否符合服务器期望 + - 检查防火墙是否超时UDP会话跟踪 + +2. **应用UDP超时** + - 一些应用有内置UDP会话超时 + - 可能需要调整应用特定的keepalive设置 + +## 主控API问题 + +### API可访问性问题 + +**症状**:无法连接到主控API端点。 + +**可能的原因和解决方案**: + +1. **端点配置** + - 验证主控命令中的API地址和端口 + - 检查API服务器是否绑定到正确的网络接口 + +2. **TLS配置** + - 如果使用HTTPS(TLS模式1或2),确保客户端工具支持TLS + - 对于测试,使用`curl -k`跳过证书验证 + +3. **自定义前缀问题** + - 如果使用自定义API前缀,确保所有请求中都包含它 + - 检查API客户端和脚本中的URL格式 + +### 实例管理失败 + +**症状**:无法通过API创建、控制或删除实例。 + +**可能的原因和解决方案**: + +1. **JSON格式问题** + - 验证请求体是有效的JSON + - 检查API请求中的必填字段 + +2. **URL解析问题** + - 确保实例URL格式正确,必要时进行URL编码 + - 验证URL参数使用正确格式 + +3. **实例状态冲突** + - 无法删除运行中的实例,必须先停止 + - 在执行操作前先用GET检查当前实例状态 + +4. **权限问题** + - 确保NodePass主控具有创建进程的足够权限 + - 检查任何引用的证书或密钥的文件系统权限 + +## 下一步 + +如果您遇到本指南未涵盖的问题: + +- 查看[项目仓库](https://github.com/yosebyte/nodepass)中的已知问题 +- 将日志级别增加到`debug`以获取更详细信息 +- 查看[工作原理](/docs/zh/how-it-works.md)部分以更好地理解内部机制 +- 考虑加入社区讨论,从其他用户处获取帮助 \ No newline at end of file diff --git a/nodepass/docs/zh/usage.md b/nodepass/docs/zh/usage.md new file mode 100644 index 0000000000..f732051ecb --- /dev/null +++ b/nodepass/docs/zh/usage.md @@ -0,0 +1,283 @@ +# 使用说明 + +NodePass创建一个带有未加密TCP控制通道的隧道,并为数据交换提供可配置的TLS加密选项。本指南涵盖三种操作模式并说明如何有效地使用每种模式。 + +## 命令行语法 + +NodePass命令的一般语法是: + +```bash +nodepass ":///?log=&tls=&crt=&key=&min=&max=" +``` + +其中: +- ``:指定操作模式(`server`、`client`或`master`) +- ``:控制通道通信的隧道端点地址 +- ``:业务数据的目标地址,支持双向模式(或在master模式下的API前缀) + +### 查询参数说明 + +通用查询参数: +- `log=`:日志详细级别(`debug`、`info`、`warn`、`error`或`event`) +- `min=`:最小连接池容量(默认:64,仅适用于client模式) +- `max=`:最大连接池容量(默认:8192,仅适用于client模式) + +TLS相关参数(仅适用于server/master模式): +- `tls=`:数据通道的TLS安全级别(`0`、`1`或`2`) +- `crt=`:证书文件路径(当`tls=2`时) +- `key=`:私钥文件路径(当`tls=2`时) + +## 运行模式 + +NodePass提供三种互补的运行模式,以适应各种部署场景。 + +### 服务端模式 + +服务端模式建立隧道控制通道,并支持双向数据流转发。 + +```bash +nodepass "server:///?log=&tls=&crt=&key=" +``` + +#### 参数 + +- `tunnel_addr`:TCP隧道端点地址(控制通道),客户端将连接到此处(例如, 10.1.0.1:10101) +- `target_addr`:业务数据的目标地址,支持双向数据流模式(例如, 10.1.0.1:8080) +- `log`:日志级别(debug, info, warn, error, event) +- `tls`:目标数据通道的TLS加密模式 (0, 1, 2) + - `0`:无TLS加密(明文TCP/UDP) + - `1`:自签名证书(自动生成) + - `2`:自定义证书(需要`crt`和`key`参数) +- `crt`:证书文件路径(当`tls=2`时必需) +- `key`:私钥文件路径(当`tls=2`时必需) + +#### 服务端模式工作原理 + +在服务端模式下,NodePass支持两种数据流方向: + +**模式一:服务端接收流量**(target_addr为本地地址) +1. 在`tunnel_addr`上监听TCP隧道连接(控制通道) +2. 在`target_addr`上监听传入的TCP和UDP流量 +3. 当`target_addr`收到连接时,通过控制通道向客户端发送信号 +4. 为每个连接创建具有指定TLS加密级别的数据通道 + +**模式二:服务端发送流量**(target_addr为远程地址) +1. 在`tunnel_addr`上监听TCP隧道连接(控制通道) +2. 等待客户端在其本地监听,并通过隧道接收连接 +3. 建立到远程`target_addr`的连接并转发数据 + +#### 示例 + +```bash +# 数据通道无TLS加密 - 服务端接收模式 +nodepass "server://10.1.0.1:10101/10.1.0.1:8080?log=debug&tls=0" + +# 自签名证书(自动生成) - 服务端发送模式 +nodepass "server://10.1.0.1:10101/192.168.1.100:8080?log=debug&tls=1" + +# 自定义域名证书 - 服务端接收模式 +nodepass "server://10.1.0.1:10101/10.1.0.1:8080?log=debug&tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem" +``` + +### 客户端模式 + +客户端模式连接到NodePass服务端并支持双向数据流转发。 + +```bash +nodepass "client:///?log=&min=&max=" +``` + +#### 参数 + +- `tunnel_addr`:要连接的NodePass服务端隧道端点地址(例如, 10.1.0.1:10101) +- `target_addr`:业务数据的目标地址,支持双向数据流模式(例如, 127.0.0.1:8080) +- `log`:日志级别(debug, info, warn, error, event) +- `min`:最小连接池容量(默认:64) +- `max`:最大连接池容量(默认:8192) + +#### 客户端模式工作原理 + +在客户端模式下,NodePass支持三种操作模式: + +**模式一:客户端单端转发**(当隧道地址为本地地址时) +1. 在本地隧道地址上监听TCP和UDP连接 +2. 使用连接池技术预建立到目标地址的TCP连接,消除连接延迟 +3. 直接将接收到的流量转发到目标地址,实现高性能转发 +4. 无需与服务端握手,实现点对点的直接转发 +5. 适用于本地代理和简单转发场景 + +**模式二:客户端接收流量**(当服务端发送流量时) +1. 连接到服务端的TCP隧道端点(控制通道) +2. 在本地监听端口,等待通过隧道传入的连接 +3. 建立到本地`target_addr`的连接并转发数据 + +**模式三:客户端发送流量**(当服务端接收流量时) +1. 连接到服务端的TCP隧道端点(控制通道) +2. 通过控制通道监听来自服务端的信号 +3. 当收到信号时,使用服务端指定的TLS安全级别建立数据连接 +4. 建立到`target_addr`的本地连接并转发流量 + +#### 示例 + +```bash +# 客户端单端转发模式 - 本地代理监听1080端口,转发到目标服务器 +nodepass client://127.0.0.1:1080/target.example.com:8080?log=debug + +# 连接到NodePass服务端并采用其TLS安全策略 - 客户端发送模式 +nodepass client://server.example.com:10101/127.0.0.1:8080 + +# 使用调试日志连接 - 客户端接收模式 +nodepass client://server.example.com:10101/192.168.1.100:8080?log=debug + +# 自定义连接池容量 - 高性能配置 +nodepass "client://server.example.com:10101/127.0.0.1:8080?min=128&max=4096" + +# 资源受限配置 - 小型连接池 +nodepass "client://server.example.com:10101/127.0.0.1:8080?min=16&max=512&log=info" +``` + +### 主控模式 (API) + +主控模式运行RESTful API服务器,用于集中管理NodePass实例。 + +```bash +nodepass "master://[]?log=&tls=&crt=&key=" +``` + +#### 参数 + +- `api_addr`:API服务监听的地址(例如,0.0.0.0:9090) +- `prefix`:可选的API前缀路径(例如,/management)。默认为`/api` +- `log`:日志级别(debug, info, warn, error, event) +- `tls`:API服务的TLS加密模式(0, 1, 2) + - `0`:无TLS加密(HTTP) + - `1`:自签名证书(带自动生成证书的HTTPS) + - `2`:自定义证书(带提供证书的HTTPS) +- `crt`:证书文件路径(当`tls=2`时必需) +- `key`:私钥文件路径(当`tls=2`时必需) + +#### 主控模式工作原理 + +在主控模式下,NodePass: +1. 运行一个RESTful API服务器,允许动态管理NodePass实例 +2. 提供用于创建、启动、停止和监控客户端和服务端实例的端点 +3. 包含用于轻松API探索的Swagger UI,位于`{prefix}/v1/docs` +4. 自动继承通过API创建的实例的TLS和日志设置 + +#### API端点 + +所有端点都是相对于配置的前缀(默认:`/api`): + +- `GET {prefix}/v1/instances` - 列出所有实例 +- `POST {prefix}/v1/instances` - 创建新实例,JSON请求体: `{"url": "server://0.0.0.0:10101/0.0.0.0:8080"}` +- `GET {prefix}/v1/instances/{id}` - 获取实例详情 +- `PATCH {prefix}/v1/instances/{id}` - 更新实例,JSON请求体: `{"action": "start|stop|restart"}` +- `DELETE {prefix}/v1/instances/{id}` - 删除实例 +- `GET {prefix}/v1/openapi.json` - OpenAPI规范 +- `GET {prefix}/v1/docs` - Swagger UI文档 + +#### 示例 + +```bash +# 启动HTTP主控服务(使用默认API前缀/api) +nodepass "master://0.0.0.0:9090?log=info" + +# 启动带有自定义API前缀的主控服务(/management) +nodepass "master://0.0.0.0:9090/management?log=info" + +# 启动HTTPS主控服务(自签名证书) +nodepass "master://0.0.0.0:9090/admin?log=info&tls=1" + +# 启动HTTPS主控服务(自定义证书) +nodepass "master://0.0.0.0:9090?log=info&tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem" +``` + +## 管理NodePass实例 + +### 通过API创建和管理 + +您可以使用标准HTTP请求通过主控API管理NodePass实例: + +```bash +# 通过API创建和管理实例(使用默认前缀) +curl -X POST http://localhost:9090/api/v1/instances \ + -H "Content-Type: application/json" \ + -d '{"url":"server://0.0.0.0:10101/0.0.0.0:8080?tls=1"}' + +# 使用自定义前缀 +curl -X POST http://localhost:9090/admin/v1/instances \ + -H "Content-Type: application/json" \ + -d '{"url":"server://0.0.0.0:10101/0.0.0.0:8080?tls=1"}' + +# 列出所有运行实例 +curl http://localhost:9090/api/v1/instances + +# 控制实例(用实际实例ID替换{id}) +curl -X PUT http://localhost:9090/api/v1/instances/{id} \ + -H "Content-Type: application/json" \ + -d '{"action":"restart"}' +``` + +## 双向数据流说明 + +NodePass支持灵活的双向数据流配置: + +### 客户端单端转发模式 +- **客户端**:在本地隧道地址监听,使用连接池技术直接转发到目标地址 +- **连接池优化**:预建立TCP连接,消除连接延迟,提供高性能转发 +- **无需服务端**:独立运行,不依赖服务端握手 +- **使用场景**:本地代理、简单端口转发、测试环境、高性能转发 + +### 服务端接收模式 (dataFlow: "-") +- **服务端**:在target_addr监听传入连接,通过隧道转发到客户端 +- **客户端**:连接到本地target_addr提供服务 +- **使用场景**:将内网服务暴露给外网访问 + +### 服务端发送模式 (dataFlow: "+") +- **服务端**:连接到远程target_addr获取数据,通过隧道发送到客户端 +- **客户端**:在本地监听,接收来自服务端的连接 +- **使用场景**:通过隧道代理访问远程服务 + +系统会根据隧道地址和目标地址自动选择合适的操作模式: +- 如果客户端的隧道地址为本地地址,启用单端转发模式 +- 如果目标地址是本地地址,使用服务端接收模式 +- 如果目标地址是远程地址,使用服务端发送模式 + +## 隧道密钥(Tunnel Key) + +NodePass使用隧道密钥来验证客户端和服务端之间的连接。密钥可以通过两种方式指定: + +### 密钥获取规则 + +1. **显式密钥**:在URL中指定用户名部分作为密钥 + ```bash + # 使用"mypassword"作为隧道密钥 + nodepass server://mypassword@10.1.0.1:10101/10.1.0.1:8080 + nodepass client://mypassword@10.1.0.1:10101/127.0.0.1:8080 + ``` + +2. **端口派生密钥**:如果未指定用户名,系统将使用端口号的十六进制值作为密钥 + ```bash + # 端口10101的十六进制值为"2775",将作为隧道密钥 + nodepass server://10.1.0.1:10101/10.1.0.1:8080 + nodepass client://10.1.0.1:10101/127.0.0.1:8080 + ``` + +### 握手流程 + +客户端与服务端的握手过程如下: + +1. **客户端连接**:客户端连接到服务端的隧道地址 +2. **密钥验证**:客户端发送XOR加密的隧道密钥 +3. **服务端验证**:服务端解密并验证密钥是否匹配 +4. **配置同步**:验证成功后,服务端发送隧道配置信息(包括TLS模式) +5. **连接确立**:握手完成,开始数据传输 + +这种设计确保了只有拥有正确密钥的客户端才能建立隧道连接。 + +## 下一步 + +- 了解[配置选项](/docs/zh/configuration.md)以微调NodePass +- 探索常见部署场景的[使用示例](/docs/zh/examples.md) +- 理解NodePass内部[工作原理](/docs/zh/how-it-works.md) +- 如果遇到问题,请查看[故障排除指南](/docs/zh/troubleshooting.md) \ No newline at end of file diff --git a/nodepass/go.mod b/nodepass/go.mod new file mode 100644 index 0000000000..57a7199dc3 --- /dev/null +++ b/nodepass/go.mod @@ -0,0 +1,10 @@ +module github.com/yosebyte/nodepass + +go 1.24.3 + +require ( + github.com/NodePassProject/cert v1.0.0 + github.com/NodePassProject/conn v1.0.1 + github.com/NodePassProject/logs v1.0.1 + github.com/NodePassProject/pool v1.0.8 +) diff --git a/nodepass/go.sum b/nodepass/go.sum new file mode 100644 index 0000000000..36b649b851 --- /dev/null +++ b/nodepass/go.sum @@ -0,0 +1,8 @@ +github.com/NodePassProject/cert v1.0.0 h1:cBNNvR+ja22AgNlUmeGWLcCM1vmnLTqpbCQ4Hdn5was= +github.com/NodePassProject/cert v1.0.0/go.mod h1:4EJDS3GozJ74dtICJ/xcq42WKKvF0tiTM9/M7Q9NF9c= +github.com/NodePassProject/conn v1.0.1 h1:vuzcQQj+cqENagzEYPwse9Vvlj/8vfkyNZCp5RvQMKk= +github.com/NodePassProject/conn v1.0.1/go.mod h1:mWe3Rylunp6Sx4v6pkSGgYZe2R+I/O+7nZ2od0yJ3aQ= +github.com/NodePassProject/logs v1.0.1 h1:WDHY1DcTO+7NydBzuRpxhEw6pWYayBdDjjZzU1uDKac= +github.com/NodePassProject/logs v1.0.1/go.mod h1:ocFTMNXBTnQFJFAhF+qobAzu7+y+wYPik7D+a1jPfis= +github.com/NodePassProject/pool v1.0.8 h1:zuqVdQj0OBarIo/P/BdpTxXk8kbjU2GYJJaVA5T+LwQ= +github.com/NodePassProject/pool v1.0.8/go.mod h1:kdRAEDK45j/+iHH4kRTpXt/wI28NIguJ13n/5NDXxkw= diff --git a/nodepass/internal/client.go b/nodepass/internal/client.go new file mode 100644 index 0000000000..1645695648 --- /dev/null +++ b/nodepass/internal/client.go @@ -0,0 +1,171 @@ +// 内部包,实现客户端模式功能 +package internal + +import ( + "bufio" + "bytes" + "context" + "net" + "net/url" + "os" + "os/signal" + "syscall" + "time" + + "github.com/NodePassProject/logs" + "github.com/NodePassProject/pool" +) + +// Client 实现客户端模式功能 +type Client struct { + Common // 继承共享功能 + tunnelName string // 隧道名称 +} + +// NewClient 创建新的客户端实例 +func NewClient(parsedURL *url.URL, logger *logs.Logger) *Client { + client := &Client{ + Common: Common{ + logger: logger, + semaphore: make(chan struct{}, semaphoreLimit), + errChan: make(chan error, 2), + signalChan: make(chan string, semaphoreLimit), + }, + tunnelName: parsedURL.Hostname(), + } + // 初始化公共字段 + client.getTunnelKey(parsedURL) + client.getPoolCapacity(parsedURL) + client.getAddress(parsedURL) + return client +} + +// Run 管理客户端生命周期 +func (c *Client) Run() { + c.logger.Info("Client started: %v@%v/%v", c.tunnelKey, c.tunnelAddr, c.targetTCPAddr) + + // 启动客户端服务并处理重启 + go func() { + for { + time.Sleep(serviceCooldown) + if err := c.start(); err != nil { + c.logger.Error("Client error: %v", err) + c.stop() + c.logger.Info("Client restarted: %v@%v/%v", c.tunnelKey, c.tunnelAddr, c.targetTCPAddr) + } + } + }() + + // 监听系统信号以优雅关闭 + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + <-ctx.Done() + stop() + + // 执行关闭过程 + shutdownCtx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) + defer cancel() + if err := c.shutdown(shutdownCtx, c.stop); err != nil { + c.logger.Error("Client shutdown error: %v", err) + } else { + c.logger.Info("Client shutdown complete") + } +} + +// start 启动客户端服务 +func (c *Client) start() error { + c.initContext() + + // 通过是否监听成功判断单端转发或双端握手 + if err := c.initTunnelListener(); err == nil { + // 初始化连接池 + c.tunnelPool = pool.NewClientPool( + c.minPoolCapacity, + c.maxPoolCapacity, + minPoolInterval, + maxPoolInterval, + reportInterval, + c.tlsCode, + true, + c.tunnelName, + func() (net.Conn, error) { + return net.DialTCP("tcp", nil, c.targetTCPAddr) + }) + + go c.tunnelPool.ClientManager() + + return c.singleLoop() + } else { + if err := c.tunnelHandshake(); err != nil { + return err + } + + // 初始化连接池 + c.tunnelPool = pool.NewClientPool( + c.minPoolCapacity, + c.maxPoolCapacity, + minPoolInterval, + maxPoolInterval, + reportInterval, + c.tlsCode, + false, + c.tunnelName, + func() (net.Conn, error) { + return net.DialTCP("tcp", nil, c.tunnelTCPAddr) + }) + + go c.tunnelPool.ClientManager() + + switch c.dataFlow { + case "-": + go c.commonOnce() + go c.commonQueue() + case "+": + // 初始化目标监听器 + if err := c.initTargetListener(); err != nil { + return err + } + go c.commonLoop() + } + return c.healthCheck() + } +} + +// tunnelHandshake 与隧道服务端进行握手 +func (c *Client) tunnelHandshake() error { + // 建立隧道TCP连接 + tunnelTCPConn, err := net.DialTimeout("tcp", c.tunnelTCPAddr.String(), tcpDialTimeout) + if err != nil { + return err + } + + c.tunnelTCPConn = tunnelTCPConn.(*net.TCPConn) + c.bufReader = bufio.NewReader(c.tunnelTCPConn) + c.tunnelTCPConn.SetKeepAlive(true) + c.tunnelTCPConn.SetKeepAlivePeriod(reportInterval) + + // 发送隧道密钥 + _, err = c.tunnelTCPConn.Write(append(c.xor([]byte(c.tunnelKey)), '\n')) + if err != nil { + return err + } + + // 读取隧道URL + rawTunnelURL, err := c.bufReader.ReadBytes('\n') + if err != nil { + return err + } + + tunnelSignal := string(c.xor(bytes.TrimSuffix(rawTunnelURL, []byte{'\n'}))) + + // 解析隧道URL + tunnelURL, err := url.Parse(tunnelSignal) + if err != nil { + return err + } + c.dataFlow = tunnelURL.Host + c.tlsCode = tunnelURL.Fragment + + c.logger.Info("Tunnel signal <- : %v <- %v", tunnelSignal, c.tunnelTCPConn.RemoteAddr()) + c.logger.Info("Tunnel handshaked: %v <-> %v", c.tunnelTCPConn.LocalAddr(), c.tunnelTCPConn.RemoteAddr()) + return nil +} diff --git a/nodepass/internal/common.go b/nodepass/internal/common.go new file mode 100644 index 0000000000..b4d2ab3e84 --- /dev/null +++ b/nodepass/internal/common.go @@ -0,0 +1,848 @@ +// 内部包,提供共享功能 +package internal + +import ( + "bufio" + "bytes" + "context" + "fmt" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/NodePassProject/conn" + "github.com/NodePassProject/logs" + "github.com/NodePassProject/pool" +) + +// Common 包含所有模式共享的核心功能 +type Common struct { + mu sync.Mutex // 互斥锁 + logger *logs.Logger // 日志记录器 + tlsCode string // TLS模式代码 + dataFlow string // 数据流向 + tunnelKey string // 隧道密钥 + tunnelAddr string // 隧道地址字符串 + tunnelTCPAddr *net.TCPAddr // 隧道TCP地址 + tunnelUDPAddr *net.UDPAddr // 隧道UDP地址 + targetAddr string // 目标地址字符串 + targetTCPAddr *net.TCPAddr // 目标TCP地址 + targetUDPAddr *net.UDPAddr // 目标UDP地址 + targetListener *net.TCPListener // 目标监听器 + tunnelListener net.Listener // 隧道监听器 + tunnelTCPConn *net.TCPConn // 隧道TCP连接 + tunnelUDPConn *net.UDPConn // 隧道UDP连接 + targetTCPConn *net.TCPConn // 目标TCP连接 + targetUDPConn *net.UDPConn // 目标UDP连接 + targetUDPSession sync.Map // 目标UDP会话 + tunnelPool *pool.Pool // 隧道连接池 + minPoolCapacity int // 最小池容量 + maxPoolCapacity int // 最大池容量 + semaphore chan struct{} // 信号量通道 + bufReader *bufio.Reader // 缓冲读取器 + signalChan chan string // 信号通道 + errChan chan error // 错误通道 + ctx context.Context // 上下文 + cancel context.CancelFunc // 取消函数 +} + +// 配置变量,可通过环境变量调整 +var ( + semaphoreLimit = getEnvAsInt("NP_SEMAPHORE_LIMIT", 1024) // 信号量限制 + udpDataBufSize = getEnvAsInt("NP_UDP_DATA_BUF_SIZE", 8192) // UDP缓冲区大小 + udpReadTimeout = getEnvAsDuration("NP_UDP_READ_TIMEOUT", 20*time.Second) // UDP读取超时 + udpDialTimeout = getEnvAsDuration("NP_UDP_DIAL_TIMEOUT", 20*time.Second) // UDP拨号超时 + tcpReadTimeout = getEnvAsDuration("NP_TCP_READ_TIMEOUT", 20*time.Second) // TCP读取超时 + tcpDialTimeout = getEnvAsDuration("NP_TCP_DIAL_TIMEOUT", 20*time.Second) // TCP拨号超时 + minPoolInterval = getEnvAsDuration("NP_MIN_POOL_INTERVAL", 1*time.Second) // 最小池间隔 + maxPoolInterval = getEnvAsDuration("NP_MAX_POOL_INTERVAL", 5*time.Second) // 最大池间隔 + reportInterval = getEnvAsDuration("NP_REPORT_INTERVAL", 5*time.Second) // 报告间隔 + serviceCooldown = getEnvAsDuration("NP_SERVICE_COOLDOWN", 3*time.Second) // 服务冷却时间 + shutdownTimeout = getEnvAsDuration("NP_SHUTDOWN_TIMEOUT", 5*time.Second) // 关闭超时 + ReloadInterval = getEnvAsDuration("NP_RELOAD_INTERVAL", 1*time.Hour) // 重载间隔 +) + +// getEnvAsInt 从环境变量获取整数值,如果不存在则使用默认值 +func getEnvAsInt(name string, defaultValue int) int { + if valueStr, exists := os.LookupEnv(name); exists { + if value, err := strconv.Atoi(valueStr); err == nil && value >= 0 { + return value + } + } + return defaultValue +} + +// getEnvAsDuration 从环境变量获取时间间隔,如果不存在则使用默认值 +func getEnvAsDuration(name string, defaultValue time.Duration) time.Duration { + if valueStr, exists := os.LookupEnv(name); exists { + if value, err := time.ParseDuration(valueStr); err == nil && value >= 0 { + return value + } + } + return defaultValue +} + +// xor 对数据进行异或处理 +func (c *Common) xor(data []byte) []byte { + for i := range data { + data[i] ^= byte(len(c.tunnelKey) % 256) + } + return data +} + +// getTunnelKey 从URL中获取隧道密钥 +func (c *Common) getTunnelKey(parsedURL *url.URL) { + if key := parsedURL.User.Username(); key != "" { + c.tunnelKey = key + } else { + portStr := parsedURL.Port() + if portNum, err := strconv.Atoi(portStr); err == nil { + c.tunnelKey = fmt.Sprintf("%x", portNum) + } else { + c.tunnelKey = fmt.Sprintf("%x", portStr) + } + } +} + +// getPoolCapacity 获取连接池容量设置 +func (c *Common) getPoolCapacity(parsedURL *url.URL) { + if min := parsedURL.Query().Get("min"); min != "" { + if value, err := strconv.Atoi(min); err == nil && value > 0 { + c.minPoolCapacity = value + } + } else { + c.minPoolCapacity = 64 + } + + if max := parsedURL.Query().Get("max"); max != "" { + if value, err := strconv.Atoi(max); err == nil && value > 0 { + c.maxPoolCapacity = value + } + } else { + c.maxPoolCapacity = 8192 + } +} + +// getAddress 解析和设置地址信息 +func (c *Common) getAddress(parsedURL *url.URL) { + // 解析隧道地址 + c.tunnelAddr = parsedURL.Host + + // 解析隧道TCP地址 + if tunnelTCPAddr, err := net.ResolveTCPAddr("tcp", c.tunnelAddr); err == nil { + c.tunnelTCPAddr = tunnelTCPAddr + } else { + c.logger.Error("Resolve failed: %v", err) + } + + // 解析隧道UDP地址 + if tunnelUDPAddr, err := net.ResolveUDPAddr("udp", c.tunnelAddr); err == nil { + c.tunnelUDPAddr = tunnelUDPAddr + } else { + c.logger.Error("Resolve failed: %v", err) + } + + // 处理目标地址 + targetAddr := strings.TrimPrefix(parsedURL.Path, "/") + c.targetAddr = targetAddr + + // 解析目标TCP地址 + if targetTCPAddr, err := net.ResolveTCPAddr("tcp", targetAddr); err == nil { + c.targetTCPAddr = targetTCPAddr + } else { + c.logger.Error("Resolve failed: %v", err) + } + + // 解析目标UDP地址 + if targetUDPAddr, err := net.ResolveUDPAddr("udp", targetAddr); err == nil { + c.targetUDPAddr = targetUDPAddr + } else { + c.logger.Error("Resolve failed: %v", err) + } +} + +// initContext 初始化上下文 +func (c *Common) initContext() { + if c.cancel != nil { + c.cancel() + } + c.ctx, c.cancel = context.WithCancel(context.Background()) +} + +// initTargetListener 初始化目标监听器 +func (c *Common) initTargetListener() error { + // 初始化目标TCP监听器 + targetListener, err := net.ListenTCP("tcp", c.targetTCPAddr) + if err != nil { + if targetListener != nil { + targetListener.Close() + } + return err + } + c.targetListener = targetListener + + // 初始化目标UDP监听器 + targetUDPConn, err := net.ListenUDP("udp", c.targetUDPAddr) + if err != nil { + if targetUDPConn != nil { + targetUDPConn.Close() + } + return err + } + c.targetUDPConn = targetUDPConn + + return nil +} + +// initTunnelListener 初始化隧道监听器 +func (c *Common) initTunnelListener() error { + // 初始化隧道TCP监听器 + tunnelListener, err := net.ListenTCP("tcp", c.tunnelTCPAddr) + if err != nil { + if tunnelListener != nil { + tunnelListener.Close() + } + return err + } + c.tunnelListener = tunnelListener + + // 初始化隧道UDP监听器 + tunnelUDPConn, err := net.ListenUDP("udp", c.tunnelUDPAddr) + if err != nil { + if tunnelUDPConn != nil { + tunnelUDPConn.Close() + } + return err + } + c.tunnelUDPConn = tunnelUDPConn + + return nil +} + +// stop 共用停止服务 +func (c *Common) stop() { + // 取消上下文 + if c.cancel != nil { + c.cancel() + } + + // 关闭隧道连接池 + if c.tunnelPool != nil { + active := c.tunnelPool.Active() + c.tunnelPool.Close() + c.logger.Debug("Tunnel connection closed: pool active %v", active) + } + + // 清理目标UDP会话 + c.targetUDPSession.Range(func(key, value any) bool { + if conn, ok := value.(*net.UDPConn); ok { + conn.Close() + } + c.targetUDPSession.Delete(key) + return true + }) + + // 关闭目标UDP连接 + if c.targetUDPConn != nil { + c.targetUDPConn.Close() + c.logger.Debug("Target connection closed: %v", c.targetUDPConn.LocalAddr()) + } + + // 关闭目标TCP连接 + if c.targetTCPConn != nil { + c.targetTCPConn.Close() + c.logger.Debug("Target connection closed: %v", c.targetTCPConn.LocalAddr()) + } + + // 关闭隧道UDP连接 + if c.tunnelUDPConn != nil { + c.tunnelUDPConn.Close() + c.logger.Debug("Tunnel connection closed: %v", c.tunnelUDPConn.LocalAddr()) + } + + // 关闭隧道TCP连接 + if c.tunnelTCPConn != nil { + c.tunnelTCPConn.Close() + c.logger.Debug("Tunnel connection closed: %v", c.tunnelTCPConn.LocalAddr()) + } + + // 关闭目标监听器 + if c.targetListener != nil { + c.targetListener.Close() + c.logger.Debug("Target listener closed: %v", c.targetListener.Addr()) + } + + // 关闭隧道监听器 + if c.tunnelListener != nil { + c.tunnelListener.Close() + c.logger.Debug("Tunnel listener closed: %v", c.tunnelListener.Addr()) + } + + // 清空信号通道 + for { + select { + case <-c.signalChan: + default: + return + } + } +} + +// shutdown 共用优雅关闭 +func (c *Common) shutdown(ctx context.Context, stopFunc func()) error { + done := make(chan struct{}) + go func() { + defer close(done) + stopFunc() + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + } +} + +// commonQueue 共用信号队列 +func (c *Common) commonQueue() error { + for { + select { + case <-c.ctx.Done(): + return c.ctx.Err() + default: + // 读取原始信号 + rawSignal, err := c.bufReader.ReadBytes('\n') + if err != nil { + return err + } + signal := string(c.xor(bytes.TrimSuffix(rawSignal, []byte{'\n'}))) + + // 将信号发送到通道 + select { + case c.signalChan <- signal: + default: + c.logger.Debug("Queue limit reached: %v", semaphoreLimit) + } + } + } +} + +// healthCheck 共用健康度检查 +func (c *Common) healthCheck() error { + flushURL := &url.URL{Fragment: "0"} // 连接池刷新信号 + for { + select { + case <-c.ctx.Done(): + return c.ctx.Err() + default: + if !c.mu.TryLock() { + continue + } + + // 连接池健康度检查 + if c.tunnelPool.ErrorCount() > c.tunnelPool.Active()/2 { + // 发送刷新信号到对端 + _, err := c.tunnelTCPConn.Write(append(c.xor([]byte(flushURL.String())), '\n')) + if err != nil { + c.mu.Unlock() + return err + } + c.tunnelPool.Flush() + time.Sleep(reportInterval) // 等待连接池刷新完成 + c.logger.Debug("Tunnel pool reset: %v active connections", c.tunnelPool.Active()) + } else { + // 发送普通心跳包 + _, err := c.tunnelTCPConn.Write([]byte("\n")) + if err != nil { + c.mu.Unlock() + return err + } + } + + c.mu.Unlock() + time.Sleep(reportInterval) + } + } +} + +// commonLoop 共用处理循环 +func (c *Common) commonLoop() { + for { + select { + case <-c.ctx.Done(): + return + default: + // 等待连接池准备就绪 + if c.tunnelPool.Ready() { + go c.commonTCPLoop() + go c.commonUDPLoop() + return + } + time.Sleep(time.Millisecond) + } + } +} + +// commonTCPLoop 共用TCP请求处理循环 +func (c *Common) commonTCPLoop() { + for { + select { + case <-c.ctx.Done(): + return + default: + // 接受来自目标的TCP连接 + targetConn, err := c.targetListener.Accept() + if err != nil { + continue + } + + defer func() { + if targetConn != nil { + targetConn.Close() + } + }() + + c.targetTCPConn = targetConn.(*net.TCPConn) + c.logger.Debug("Target connection: %v <-> %v", targetConn.LocalAddr(), targetConn.RemoteAddr()) + + // 使用信号量限制并发数 + c.semaphore <- struct{}{} + + go func(targetConn net.Conn) { + defer func() { <-c.semaphore }() + + // 从连接池获取连接 + id, remoteConn := c.tunnelPool.ServerGet() + if remoteConn == nil { + c.logger.Error("Get failed: %v not found", id) + c.tunnelPool.AddError() + return + } + + c.logger.Debug("Tunnel connection: get %v <- pool active %v", id, c.tunnelPool.Active()) + + defer func() { + c.tunnelPool.Put(id, remoteConn) + c.logger.Debug("Tunnel connection: put %v -> pool active %v", id, c.tunnelPool.Active()) + }() + + c.logger.Debug("Tunnel connection: %v <-> %v", remoteConn.LocalAddr(), remoteConn.RemoteAddr()) + + // 构建并发送启动URL到客户端 + launchURL := &url.URL{ + Host: id, + Fragment: "1", // TCP模式 + } + + c.mu.Lock() + _, err = c.tunnelTCPConn.Write(append(c.xor([]byte(launchURL.String())), '\n')) + c.mu.Unlock() + + if err != nil { + c.logger.Error("Write failed: %v", err) + return + } + + c.logger.Debug("TCP launch signal: %v -> %v", id, c.tunnelTCPConn.RemoteAddr()) + c.logger.Debug("Starting exchange: %v <-> %v", remoteConn.LocalAddr(), targetConn.LocalAddr()) + + // 交换数据 + rx, tx, _ := conn.DataExchange(remoteConn, targetConn, tcpReadTimeout) + + // 交换完成,广播统计信息 + c.logger.Event("Exchange complete: TRAFFIC_STATS|TCP_RX=%v|TCP_TX=%v|UDP_RX=0|UDP_TX=0", rx, tx) + }(targetConn) + } + } +} + +// commonUDPLoop 共用UDP请求处理循环 +func (c *Common) commonUDPLoop() { + for { + select { + case <-c.ctx.Done(): + return + default: + // 读取来自目标的UDP数据 + buffer := make([]byte, udpDataBufSize) + n, clientAddr, err := c.targetUDPConn.ReadFromUDP(buffer) + if err != nil { + continue + } + + c.logger.Debug("Target connection: %v <-> %v", c.targetUDPConn.LocalAddr(), clientAddr) + + // 从连接池获取连接 + id, remoteConn := c.tunnelPool.ServerGet() + if remoteConn == nil { + c.logger.Error("Get failed: %v not found", id) + c.tunnelPool.AddError() + continue + } + + c.logger.Debug("Tunnel connection: get %v <- pool active %v", id, c.tunnelPool.Active()) + + defer func() { + c.tunnelPool.Put(id, remoteConn) + c.logger.Debug("Tunnel connection: put %v -> pool active %v", id, c.tunnelPool.Active()) + }() + + c.logger.Debug("Tunnel connection: %v <-> %v", remoteConn.LocalAddr(), remoteConn.RemoteAddr()) + + // 使用信号量限制并发数 + c.semaphore <- struct{}{} + + go func(buffer []byte, n int, clientAddr *net.UDPAddr, remoteConn net.Conn) { + defer func() { <-c.semaphore }() + + // 构建并发送启动URL到客户端 + launchURL := &url.URL{ + Host: id, + Fragment: "2", // UDP模式 + } + + c.mu.Lock() + _, err = c.tunnelTCPConn.Write(append(c.xor([]byte(launchURL.String())), '\n')) + c.mu.Unlock() + + if err != nil { + c.logger.Error("Write failed: %v", err) + return + } + + c.logger.Debug("UDP launch signal: %v -> %v", id, c.tunnelTCPConn.RemoteAddr()) + c.logger.Debug("Starting transfer: %v <-> %v", remoteConn.LocalAddr(), c.targetUDPConn.LocalAddr()) + + // 处理UDP/TCP数据传输 + rx, tx, _ := conn.DataTransfer( + c.targetUDPConn, + remoteConn, + clientAddr, + buffer[:n], + udpDataBufSize, + tcpReadTimeout, + ) + + // 传输完成,广播统计信息 + c.logger.Event("Transfer complete: TRAFFIC_STATS|TCP_RX=0|TCP_TX=0|UDP_RX=%v|UDP_TX=%v", rx, tx) + }(buffer, n, clientAddr, remoteConn) + } + } +} + +// commonOnce 共用处理单个请求 +func (c *Common) commonOnce() { + for { + // 等待连接池准备就绪 + if !c.tunnelPool.Ready() { + time.Sleep(time.Millisecond) + continue + } + + select { + case <-c.ctx.Done(): + return + case signal := <-c.signalChan: + // 解析信号URL + signalURL, err := url.Parse(signal) + if err != nil { + c.logger.Error("Parse failed: %v", err) + continue + } + + // 处理信号 + switch signalURL.Fragment { + case "0": // 连接池刷新 + go func() { + c.tunnelPool.Flush() + time.Sleep(reportInterval) // 等待连接池刷新完成 + c.logger.Debug("Tunnel pool reset: %v active connections", c.tunnelPool.Active()) + }() + case "1": // TCP + go c.commonTCPOnce(signalURL.Host) + case "2": // UDP + go c.commonUDPOnce(signalURL.Host) + default: + // 健康检查或无效信号 + } + } + } +} + +// commonTCPOnce 共用处理单个TCP请求 +func (c *Common) commonTCPOnce(id string) { + c.logger.Debug("TCP launch signal: %v <- %v", id, c.tunnelTCPConn.RemoteAddr()) + + // 从连接池获取连接 + remoteConn := c.tunnelPool.ClientGet(id) + if remoteConn == nil { + c.logger.Error("Get failed: %v not found", id) + return + } + + c.logger.Debug("Tunnel connection: get %v <- pool active %v", id, c.tunnelPool.Active()) + + defer func() { + c.tunnelPool.Put(id, remoteConn) + c.logger.Debug("Tunnel connection: put %v -> pool active %v", id, c.tunnelPool.Active()) + }() + + c.logger.Debug("Tunnel connection: %v <-> %v", remoteConn.LocalAddr(), remoteConn.RemoteAddr()) + + // 连接到目标TCP地址 + targetConn, err := net.DialTimeout("tcp", c.targetTCPAddr.String(), tcpDialTimeout) + if err != nil { + c.logger.Error("Dial failed: %v", err) + return + } + + defer func() { + if targetConn != nil { + targetConn.Close() + } + }() + + c.targetTCPConn = targetConn.(*net.TCPConn) + c.logger.Debug("Target connection: %v <-> %v", targetConn.LocalAddr(), targetConn.RemoteAddr()) + c.logger.Debug("Starting exchange: %v <-> %v", remoteConn.LocalAddr(), targetConn.LocalAddr()) + + // 交换数据 + rx, tx, _ := conn.DataExchange(remoteConn, targetConn, tcpReadTimeout) + + // 交换完成,广播统计信息 + c.logger.Event("Exchange complete: TRAFFIC_STATS|TCP_RX=%v|TCP_TX=%v|UDP_RX=0|UDP_TX=0", rx, tx) +} + +// commonUDPOnce 共用处理单个UDP请求 +func (c *Common) commonUDPOnce(id string) { + c.logger.Debug("UDP launch signal: %v <- %v", id, c.tunnelTCPConn.RemoteAddr()) + + // 从连接池获取连接 + remoteConn := c.tunnelPool.ClientGet(id) + if remoteConn == nil { + c.logger.Error("Get failed: %v not found", id) + return + } + + c.logger.Debug("Tunnel connection: get %v <- pool active %v", id, c.tunnelPool.Active()) + + defer func() { + c.tunnelPool.Put(id, remoteConn) + c.logger.Debug("Tunnel connection: put %v -> pool active %v", id, c.tunnelPool.Active()) + }() + + c.logger.Debug("Tunnel connection: %v <-> %v", remoteConn.LocalAddr(), remoteConn.RemoteAddr()) + + // 连接到目标UDP地址 + targetConn, err := net.DialTimeout("udp", c.targetUDPAddr.String(), udpDialTimeout) + if err != nil { + c.logger.Error("Dial failed: %v", err) + return + } + + defer func() { + if targetConn != nil { + targetConn.Close() + } + }() + + c.targetUDPConn = targetConn.(*net.UDPConn) + c.logger.Debug("Target connection: %v <-> %v", targetConn.LocalAddr(), targetConn.RemoteAddr()) + c.logger.Debug("Starting transfer: %v <-> %v", remoteConn.LocalAddr(), targetConn.LocalAddr()) + + // 处理UDP/TCP数据传输 + rx, tx, _ := conn.DataTransfer( + c.targetUDPConn, + remoteConn, + nil, + nil, + udpDataBufSize, + udpReadTimeout, + ) + + // 传输完成,广播统计信息 + c.logger.Event("Transfer complete: TRAFFIC_STATS|TCP_RX=0|TCP_TX=0|UDP_RX=%v|UDP_TX=%v", rx, tx) +} + +// singleLoop 单端转发处理循环 +func (c *Common) singleLoop() error { + for { + select { + case <-c.ctx.Done(): + return context.Canceled + default: + go func() { + c.errChan <- c.singleTCPLoop() + }() + go func() { + c.errChan <- c.singleUDPLoop() + }() + return <-c.errChan + } + } +} + +// singleTCPLoop 单端转发TCP处理循环 +func (c *Common) singleTCPLoop() error { + for { + select { + case <-c.ctx.Done(): + return context.Canceled + default: + // 接受来自隧道的TCP连接 + tunnelConn, err := c.tunnelListener.Accept() + if err != nil { + continue + } + + defer func() { + if tunnelConn != nil { + tunnelConn.Close() + } + }() + + c.tunnelTCPConn = tunnelConn.(*net.TCPConn) + c.logger.Debug("Tunnel connection: %v <-> %v", tunnelConn.LocalAddr(), tunnelConn.RemoteAddr()) + + // 使用信号量限制并发数 + c.semaphore <- struct{}{} + + go func(tunnelConn net.Conn) { + defer func() { <-c.semaphore }() + + // 从连接池中获取连接 + targetConn := c.tunnelPool.ClientGet("") + if targetConn == nil { + c.logger.Error("Get failed: no target connection available") + return + } + + c.logger.Debug("Target connection: pool active %v / %v per %v", c.tunnelPool.Active(), c.tunnelPool.Capacity(), c.tunnelPool.Interval()) + + defer func() { + if targetConn != nil { + targetConn.Close() + } + }() + + c.targetTCPConn = targetConn.(*net.TCPConn) + c.logger.Debug("Target connection: %v <-> %v", targetConn.LocalAddr(), targetConn.RemoteAddr()) + c.logger.Debug("Starting exchange: %v <-> %v", tunnelConn.LocalAddr(), targetConn.LocalAddr()) + + // 交换数据 + rx, tx, _ := conn.DataExchange(tunnelConn, targetConn, tcpReadTimeout) + + // 交换完成,广播统计信息 + c.logger.Event("Exchange complete: TRAFFIC_STATS|TCP_RX=%v|TCP_TX=%v|UDP_RX=0|UDP_TX=0", rx, tx) + }(tunnelConn) + } + } +} + +// singleUDPLoop 单端转发UDP处理循环 +func (c *Common) singleUDPLoop() error { + for { + select { + case <-c.ctx.Done(): + return context.Canceled + default: + buffer := make([]byte, udpDataBufSize) + + // 读取来自隧道的UDP数据 + rx, clientAddr, err := c.tunnelUDPConn.ReadFromUDP(buffer) + if err != nil { + continue + } + + c.logger.Debug("Tunnel connection: %v <-> %v", c.tunnelUDPConn.LocalAddr(), clientAddr) + + var targetConn *net.UDPConn + sessionKey := clientAddr.String() + + // 获取或创建目标UDP会话 + if session, ok := c.targetUDPSession.Load(sessionKey); ok { + // 复用现有会话 + targetConn = session.(*net.UDPConn) + c.logger.Debug("Using UDP session: %v <-> %v", targetConn.LocalAddr(), targetConn.RemoteAddr()) + } else { + // 创建新的会话 + session, err := net.DialTimeout("udp", c.targetUDPAddr.String(), udpDialTimeout) + if err != nil { + c.logger.Error("Dial failed: %v", err) + continue + } + c.targetUDPSession.Store(sessionKey, session) + + targetConn = session.(*net.UDPConn) + c.logger.Debug("Target connection: %v <-> %v", targetConn.LocalAddr(), targetConn.RemoteAddr()) + + // 使用信号量限制并发数 + c.semaphore <- struct{}{} + + go func(targetConn *net.UDPConn, clientAddr *net.UDPAddr, sessionKey string) { + defer func() { <-c.semaphore }() + + buffer := make([]byte, udpDataBufSize) + + for { + select { + case <-c.ctx.Done(): + return + default: + // 设置UDP读取超时 + if err := targetConn.SetReadDeadline(time.Now().Add(udpReadTimeout)); err != nil { + c.logger.Error("SetReadDeadline failed: %v", err) + c.targetUDPSession.Delete(sessionKey) + targetConn.Close() + return + } + + // 从UDP读取响应 + n, err := targetConn.Read(buffer) + if err != nil { + // 检查是否为超时错误 + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + c.logger.Debug("UDP session abort: %v", err) + } else { + c.logger.Error("Read failed: %v", err) + } + c.targetUDPSession.Delete(sessionKey) + targetConn.Close() + return + } + + // 将响应写回隧道UDP连接 + tx, err := c.tunnelUDPConn.WriteToUDP(buffer[:n], clientAddr) + if err != nil { + c.logger.Error("WriteToUDP failed: %v", err) + c.targetUDPSession.Delete(sessionKey) + targetConn.Close() + return + } + // 传输完成,广播统计信息 + c.logger.Event("Transfer complete: TRAFFIC_STATS|TCP_RX=0|TCP_TX=0|UDP_RX=0|UDP_TX=%v", tx) + } + } + }(targetConn, clientAddr, sessionKey) + } + + // 将初始数据发送到目标UDP连接 + c.logger.Debug("Starting transfer: %v <-> %v", targetConn.LocalAddr(), c.tunnelUDPConn.LocalAddr()) + _, err = targetConn.Write(buffer[:rx]) + if err != nil { + c.logger.Error("Write failed: %v", err) + c.targetUDPSession.Delete(sessionKey) + targetConn.Close() + return err + } + + // 传输完成,广播统计信息 + c.logger.Event("Transfer complete: TRAFFIC_STATS|TCP_RX=0|TCP_TX=0|UDP_RX=%v|UDP_TX=0", rx) + } + } +} diff --git a/nodepass/internal/master.go b/nodepass/internal/master.go new file mode 100644 index 0000000000..1de13c3256 --- /dev/null +++ b/nodepass/internal/master.go @@ -0,0 +1,1384 @@ +// 内部包,实现主控模式功能 +package internal + +import ( + "bufio" + "context" + "crypto/rand" + "crypto/tls" + "encoding/gob" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "os/signal" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/NodePassProject/logs" +) + +// 常量定义 +const ( + openAPIVersion = "v1" // OpenAPI版本 + stateFilePath = "gob" // 实例状态持久化文件路径 + stateFileName = "nodepass.gob" // 实例状态持久化文件名 + sseRetryTime = 3000 // 重试间隔时间(毫秒) + apiKeyID = "********" // API Key的特殊ID +) + +// Swagger UI HTML模板 +const swaggerUIHTML = ` + + + NodePass API + + + +
+ + + +` + +// Master 实现主控模式功能 +type Master struct { + Common // 继承通用功能 + prefix string // API前缀 + version string // NP版本 + hostname string // 隧道名称 + logLevel string // 日志级别 + crtPath string // 证书路径 + keyPath string // 密钥路径 + instances sync.Map // 实例映射表 + server *http.Server // HTTP服务器 + tlsConfig *tls.Config // TLS配置 + masterURL *url.URL // 主控URL + statePath string // 实例状态持久化文件路径 + subscribers sync.Map // SSE订阅者映射表 + notifyChannel chan *InstanceEvent // 事件通知通道 + startTime time.Time // 启动时间 +} + +// Instance 实例信息 +type Instance struct { + ID string `json:"id"` // 实例ID + Alias string `json:"alias"` // 实例别名 + Type string `json:"type"` // 实例类型 + Status string `json:"status"` // 实例状态 + URL string `json:"url"` // 实例URL + Restart bool `json:"restart"` // 是否自启动 + TCPRX uint64 `json:"tcprx"` // TCP接收字节数 + TCPTX uint64 `json:"tcptx"` // TCP发送字节数 + UDPRX uint64 `json:"udprx"` // UDP接收字节数 + UDPTX uint64 `json:"udptx"` // UDP发送字节数 + cmd *exec.Cmd `json:"-" gob:"-"` // 命令对象(不序列化) + stopped chan struct{} `json:"-" gob:"-"` // 停止信号通道(不序列化) + cancelFunc context.CancelFunc `json:"-" gob:"-"` // 取消函数(不序列化) +} + +// InstanceEvent 实例事件信息 +type InstanceEvent struct { + Type string `json:"type"` // 事件类型:initial, create, update, delete, shutdown, log + Time time.Time `json:"time"` // 事件时间 + Instance *Instance `json:"instance"` // 关联的实例 + Logs string `json:"logs,omitempty"` // 日志内容,仅当Type为log时有效 +} + +// InstanceLogWriter 实例日志写入器 +type InstanceLogWriter struct { + instanceID string // 实例ID + instance *Instance // 实例对象 + target io.Writer // 目标写入器 + master *Master // 主控对象 + statRegex *regexp.Regexp // 统计信息正则表达式 +} + +// NewInstanceLogWriter 创建新的实例日志写入器 +func NewInstanceLogWriter(instanceID string, instance *Instance, target io.Writer, master *Master) *InstanceLogWriter { + return &InstanceLogWriter{ + instanceID: instanceID, + instance: instance, + target: target, + master: master, + statRegex: regexp.MustCompile(`TRAFFIC_STATS\|TCP_RX=(\d+)\|TCP_TX=(\d+)\|UDP_RX=(\d+)\|UDP_TX=(\d+)`), + } +} + +// Write 实现io.Writer接口,处理日志输出并解析统计信息 +func (w *InstanceLogWriter) Write(p []byte) (n int, err error) { + s := string(p) + scanner := bufio.NewScanner(strings.NewReader(s)) + + for scanner.Scan() { + line := scanner.Text() + // 解析并处理统计信息 + if matches := w.statRegex.FindStringSubmatch(line); len(matches) == 5 { + stats := []*uint64{&w.instance.TCPRX, &w.instance.TCPTX, &w.instance.UDPRX, &w.instance.UDPTX} + for i, stat := range stats { + if v, err := strconv.ParseUint(matches[i+1], 10, 64); err == nil { + // 累加新的统计数据 + *stat += v + } + } + w.master.instances.Store(w.instanceID, w.instance) + + // 发送流量更新事件 + w.master.sendSSEEvent("update", w.instance) + } + // 输出日志加实例ID + fmt.Fprintf(w.target, "%s [%s]\n", line, w.instanceID) + + // 发送日志事件 + w.master.sendSSEEvent("log", w.instance, line) + } + + if err := scanner.Err(); err != nil { + fmt.Fprintf(w.target, "%s [%s]", s, w.instanceID) + } + return len(p), nil +} + +// setCorsHeaders 设置跨域响应头 +func setCorsHeaders(w http.ResponseWriter) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Methods", "GET, PATCH, POST, PUT, DELETE, OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization, X-API-Key, Cache-Control") +} + +// NewMaster 创建新的主控实例 +func NewMaster(parsedURL *url.URL, tlsCode string, tlsConfig *tls.Config, logger *logs.Logger, version string) *Master { + // 解析主机地址 + host, err := net.ResolveTCPAddr("tcp", parsedURL.Host) + if err != nil { + logger.Error("Resolve failed: %v", err) + return nil + } + + // 获取隧道名称 + var hostname string + if tlsConfig != nil && tlsConfig.ServerName != "" { + hostname = tlsConfig.ServerName + } else { + hostname = parsedURL.Hostname() + } + + // 设置API前缀 + prefix := parsedURL.Path + if prefix == "" || prefix == "/" { + prefix = "/api" + } else { + prefix = strings.TrimRight(prefix, "/") + } + + // 获取应用程序目录作为状态文件存储位置 + execPath, _ := os.Executable() + baseDir := filepath.Dir(execPath) + + master := &Master{ + Common: Common{ + tlsCode: tlsCode, + logger: logger, + }, + prefix: fmt.Sprintf("%s/%s", prefix, openAPIVersion), + version: version, + logLevel: parsedURL.Query().Get("log"), + crtPath: parsedURL.Query().Get("crt"), + keyPath: parsedURL.Query().Get("key"), + hostname: hostname, + tlsConfig: tlsConfig, + masterURL: parsedURL, + statePath: filepath.Join(baseDir, stateFilePath, stateFileName), + notifyChannel: make(chan *InstanceEvent, 1024), + startTime: time.Now(), + } + master.tunnelTCPAddr = host + + // 加载持久化的实例状态 + master.loadState() + + // 启动事件分发器 + go master.startEventDispatcher() + + return master +} + +// Run 管理主控生命周期 +func (m *Master) Run() { + m.logger.Info("Master started: %v%v", m.tunnelAddr, m.prefix) + + // 初始化API Key + apiKey, ok := m.findInstance(apiKeyID) + if !ok { + // 如果不存在API Key实例,则创建一个 + apiKey = &Instance{ + ID: apiKeyID, + URL: generateAPIKey(), + } + m.instances.Store(apiKeyID, apiKey) + m.saveState() + m.logger.Info("API Key created: %v", apiKey.URL) + } else { + m.logger.Info("API Key loaded: %v", apiKey.URL) + } + + // 设置HTTP路由 + mux := http.NewServeMux() + + // 创建需要API Key认证的端点 + protectedEndpoints := map[string]http.HandlerFunc{ + fmt.Sprintf("%s/instances", m.prefix): m.handleInstances, + fmt.Sprintf("%s/instances/", m.prefix): m.handleInstanceDetail, + fmt.Sprintf("%s/events", m.prefix): m.handleSSE, + fmt.Sprintf("%s/info", m.prefix): m.handleInfo, + } + + // 创建不需要API Key认证的端点 + publicEndpoints := map[string]http.HandlerFunc{ + fmt.Sprintf("%s/openapi.json", m.prefix): m.handleOpenAPISpec, + fmt.Sprintf("%s/docs", m.prefix): m.handleSwaggerUI, + } + + // API Key 认证中间件 + apiKeyMiddleware := func(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // 设置跨域响应头 + setCorsHeaders(w) + if r.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + return + } + + // 读取API Key,如果存在的话 + apiKeyInstance, keyExists := m.findInstance(apiKeyID) + if keyExists && apiKeyInstance.URL != "" { + // 检查请求头中的API Key + reqAPIKey := r.Header.Get("X-API-Key") + if reqAPIKey == "" { + // API Key不存在,返回未授权错误 + httpError(w, "Unauthorized: API key required", http.StatusUnauthorized) + return + } + + // 验证API Key + if reqAPIKey != apiKeyInstance.URL { + httpError(w, "Unauthorized: Invalid API key", http.StatusUnauthorized) + return + } + } + + // 调用原始处理器 + next(w, r) + } + } + + // CORS 中间件 + corsMiddleware := func(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // 设置跨域响应头 + setCorsHeaders(w) + if r.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + return + } + next(w, r) + } + } + + // 注册受保护的端点 + for path, handler := range protectedEndpoints { + mux.HandleFunc(path, apiKeyMiddleware(handler)) + } + + // 注册公共端点 + for path, handler := range publicEndpoints { + mux.HandleFunc(path, corsMiddleware(handler)) + } + + // 创建HTTP服务器 + m.server = &http.Server{ + Addr: m.tunnelTCPAddr.String(), + ErrorLog: m.logger.StdLogger(), + Handler: mux, + TLSConfig: m.tlsConfig, + } + + // 启动HTTP服务器 + go func() { + var err error + if m.tlsConfig != nil { + err = m.server.ListenAndServeTLS("", "") + } else { + err = m.server.ListenAndServe() + } + if err != nil && err != http.ErrServerClosed { + m.logger.Error("Listen failed: %v", err) + } + }() + + // 处理系统信号 + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + <-ctx.Done() + stop() + + // 优雅关闭 + shutdownCtx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) + defer cancel() + if err := m.Shutdown(shutdownCtx); err != nil { + m.logger.Error("Master shutdown error: %v", err) + } else { + m.logger.Info("Master shutdown complete") + } +} + +// Shutdown 关闭主控 +func (m *Master) Shutdown(ctx context.Context) error { + return m.shutdown(ctx, func() { + // 声明一个已关闭通道的集合,避免重复关闭 + var closedChannels sync.Map + + var wg sync.WaitGroup + + // 给所有订阅者一个关闭通知 + m.subscribers.Range(func(key, value any) bool { + subscriberChan := value.(chan *InstanceEvent) + wg.Add(1) + go func(ch chan *InstanceEvent) { + defer wg.Done() + // 非阻塞的方式发送关闭事件 + select { + case ch <- &InstanceEvent{ + Type: "shutdown", + Time: time.Now(), + }: + default: + // 不可用,忽略 + } + }(subscriberChan) + return true + }) + + // 等待所有订阅者处理完关闭事件 + time.Sleep(100 * time.Millisecond) + + // 关闭所有订阅者通道 + m.subscribers.Range(func(key, value any) bool { + subscriberChan := value.(chan *InstanceEvent) + // 检查通道是否已关闭,如果没有则关闭它 + if _, loaded := closedChannels.LoadOrStore(subscriberChan, true); !loaded { + wg.Add(1) + go func(k any, ch chan *InstanceEvent) { + defer wg.Done() + close(ch) + m.subscribers.Delete(k) + }(key, subscriberChan) + } + return true + }) + + // 停止所有运行中的实例 + m.instances.Range(func(key, value any) bool { + instance := value.(*Instance) + // 如果实例正在运行,则停止它 + if instance.Status == "running" && instance.cmd != nil && instance.cmd.Process != nil { + wg.Add(1) + go func(inst *Instance) { + defer wg.Done() + m.stopInstance(inst) + }(instance) + } + return true + }) + + wg.Wait() + + // 关闭事件通知通道,停止事件分发器 + close(m.notifyChannel) + + // 保存实例状态 + if err := m.saveState(); err != nil { + m.logger.Error("Save gob failed: %v", err) + } else { + m.logger.Info("Instances saved: %v", m.statePath) + } + + // 关闭HTTP服务器 + if err := m.server.Shutdown(ctx); err != nil { + m.logger.Error("ApiSvr shutdown error: %v", err) + } + }) +} + +// saveState 保存实例状态到文件 +func (m *Master) saveState() error { + // 创建持久化数据 + persistentData := make(map[string]*Instance) + + // 从sync.Map转换数据 + m.instances.Range(func(key, value any) bool { + instance := value.(*Instance) + persistentData[key.(string)] = instance + return true + }) + + // 如果没有实例,直接返回 + if len(persistentData) == 0 { + // 如果状态文件存在,删除它 + if _, err := os.Stat(m.statePath); err == nil { + return os.Remove(m.statePath) + } + return nil + } + + // 确保目录存在 + if err := os.MkdirAll(filepath.Dir(m.statePath), 0755); err != nil { + m.logger.Error("Create state dir failed: %v", err) + return err + } + + // 创建临时文件 + tempFile, err := os.CreateTemp(filepath.Dir(m.statePath), "np-*.tmp") + if err != nil { + m.logger.Error("Create temp failed: %v", err) + return err + } + tempPath := tempFile.Name() + + // 删除临时文件的函数,只在错误情况下使用 + removeTemp := func() { + if _, err := os.Stat(tempPath); err == nil { + os.Remove(tempPath) + } + } + + // 编码数据 + encoder := gob.NewEncoder(tempFile) + if err := encoder.Encode(persistentData); err != nil { + m.logger.Error("Encode instances failed: %v", err) + tempFile.Close() + removeTemp() + return err + } + + // 关闭文件 + if err := tempFile.Close(); err != nil { + m.logger.Error("Close temp failed: %v", err) + removeTemp() + return err + } + + // 原子地替换文件 + if err := os.Rename(tempPath, m.statePath); err != nil { + m.logger.Error("Rename temp failed: %v", err) + removeTemp() + return err + } + + return nil +} + +// loadState 从文件加载实例状态 +func (m *Master) loadState() { + // 检查文件是否存在 + if _, err := os.Stat(m.statePath); os.IsNotExist(err) { + return + } + + // 打开文件 + file, err := os.Open(m.statePath) + if err != nil { + m.logger.Error("Open file failed: %v", err) + return + } + defer file.Close() + + // 解码数据 + var persistentData map[string]*Instance + decoder := gob.NewDecoder(file) + if err := decoder.Decode(&persistentData); err != nil { + m.logger.Error("Decode file failed: %v", err) + return + } + + // 恢复实例 + for id, instance := range persistentData { + instance.stopped = make(chan struct{}) + m.instances.Store(id, instance) + + // 处理自启动 + if instance.Restart { + go m.startInstance(instance) + m.logger.Info("Auto-starting instance: %v [%v]", instance.URL, instance.ID) + } + } + + m.logger.Info("Loaded %v instances from %v", len(persistentData), m.statePath) +} + +// handleOpenAPISpec 处理OpenAPI规范请求 +func (m *Master) handleOpenAPISpec(w http.ResponseWriter, r *http.Request) { + setCorsHeaders(w) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(generateOpenAPISpec())) +} + +// handleSwaggerUI 处理Swagger UI请求 +func (m *Master) handleSwaggerUI(w http.ResponseWriter, r *http.Request) { + setCorsHeaders(w) + w.Header().Set("Content-Type", "text/html") + fmt.Fprintf(w, swaggerUIHTML, generateOpenAPISpec()) +} + +// handleInfo 处理系统信息请求 +func (m *Master) handleInfo(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + httpError(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + info := map[string]any{ + "os": runtime.GOOS, + "arch": runtime.GOARCH, + "ver": m.version, + "name": m.hostname, + "uptime": uint64(time.Since(m.startTime).Seconds()), + "log": m.logLevel, + "tls": m.tlsCode, + "crt": m.crtPath, + "key": m.keyPath, + } + + writeJSON(w, http.StatusOK, info) +} + +// handleInstances 处理实例集合请求 +func (m *Master) handleInstances(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + // 获取所有实例 + instances := []*Instance{} + m.instances.Range(func(_, value any) bool { + instances = append(instances, value.(*Instance)) + return true + }) + writeJSON(w, http.StatusOK, instances) + + case http.MethodPost: + // 创建新实例 + var reqData struct { + URL string `json:"url"` + } + if err := json.NewDecoder(r.Body).Decode(&reqData); err != nil || reqData.URL == "" { + httpError(w, "Invalid request body", http.StatusBadRequest) + return + } + + // 解析URL + parsedURL, err := url.Parse(reqData.URL) + if err != nil { + httpError(w, "Invalid URL format", http.StatusBadRequest) + return + } + + // 验证实例类型 + instanceType := parsedURL.Scheme + if instanceType != "client" && instanceType != "server" { + httpError(w, "Invalid URL scheme", http.StatusBadRequest) + return + } + + // 生成实例ID + id := generateID() + if _, exists := m.instances.Load(id); exists { + httpError(w, "Instance ID already exists", http.StatusConflict) + return + } + + // 创建实例 + instance := &Instance{ + ID: id, + Type: instanceType, + URL: m.enhanceURL(reqData.URL, instanceType), + Status: "stopped", + Restart: false, + stopped: make(chan struct{}), + } + m.instances.Store(id, instance) + + // 启动实例 + go m.startInstance(instance) + + // 保存实例状态 + go func() { + time.Sleep(100 * time.Millisecond) + m.saveState() + }() + writeJSON(w, http.StatusCreated, instance) + + // 发送创建事件 + m.sendSSEEvent("create", instance) + + default: + httpError(w, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +// handleInstanceDetail 处理单个实例请求 +func (m *Master) handleInstanceDetail(w http.ResponseWriter, r *http.Request) { + // 获取实例ID + id := strings.TrimPrefix(r.URL.Path, fmt.Sprintf("%s/instances/", m.prefix)) + if id == "" || id == "/" { + httpError(w, "Instance ID is required", http.StatusBadRequest) + return + } + + // 查找实例 + instance, ok := m.findInstance(id) + if !ok { + httpError(w, "Instance not found", http.StatusNotFound) + return + } + + switch r.Method { + case http.MethodGet: + m.handleGetInstance(w, instance) + case http.MethodPatch: + m.handlePatchInstance(w, r, id, instance) + case http.MethodPut: + m.handlePutInstance(w, r, id, instance) + case http.MethodDelete: + m.handleDeleteInstance(w, id, instance) + default: + httpError(w, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +// handleGetInstance 处理获取实例信息请求 +func (m *Master) handleGetInstance(w http.ResponseWriter, instance *Instance) { + writeJSON(w, http.StatusOK, instance) +} + +// handlePatchInstance 处理更新实例状态请求 +func (m *Master) handlePatchInstance(w http.ResponseWriter, r *http.Request, id string, instance *Instance) { + var reqData struct { + Alias string `json:"alias,omitempty"` + Action string `json:"action,omitempty"` + Restart *bool `json:"restart,omitempty"` + } + if err := json.NewDecoder(r.Body).Decode(&reqData); err == nil { + if id == apiKeyID { + // API Key实例只允许restart操作 + if reqData.Action == "restart" { + m.regenerateAPIKey(instance) + // 只有API Key需要在这里发送事件 + m.sendSSEEvent("update", instance) + } + } else { + // 重置流量统计 + if reqData.Action == "reset" { + instance.TCPRX = 0 + instance.TCPTX = 0 + instance.UDPRX = 0 + instance.UDPTX = 0 + m.instances.Store(id, instance) + m.saveState() + m.logger.Info("Traffic stats reset: [%v]", instance.ID) + + // 发送流量统计重置事件 + m.sendSSEEvent("update", instance) + } + + // 更新自启动设置 + if reqData.Restart != nil && instance.Restart != *reqData.Restart { + instance.Restart = *reqData.Restart + m.instances.Store(id, instance) + m.saveState() + m.logger.Info("Restart policy updated: %v [%v]", *reqData.Restart, instance.ID) + + // 发送restart策略变更事件 + m.sendSSEEvent("update", instance) + } + + // 更新实例别名 + if reqData.Alias != "" && instance.Alias != reqData.Alias { + instance.Alias = reqData.Alias + m.instances.Store(id, instance) + m.saveState() + m.logger.Info("Alias updated: %v [%v]", reqData.Alias, instance.ID) + + // 发送别名变更事件 + m.sendSSEEvent("update", instance) + } + + // 处理当前实例操作 + if reqData.Action != "" && reqData.Action != "reset" { + m.processInstanceAction(instance, reqData.Action) + } + } + } + writeJSON(w, http.StatusOK, instance) +} + +// handlePutInstance 处理更新实例URL请求 +func (m *Master) handlePutInstance(w http.ResponseWriter, r *http.Request, id string, instance *Instance) { + // API Key实例不允许修改URL + if id == apiKeyID { + httpError(w, "Forbidden: API Key", http.StatusForbidden) + return + } + + var reqData struct { + URL string `json:"url"` + } + if err := json.NewDecoder(r.Body).Decode(&reqData); err != nil || reqData.URL == "" { + httpError(w, "Invalid request body", http.StatusBadRequest) + return + } + + // 解析URL + parsedURL, err := url.Parse(reqData.URL) + if err != nil { + httpError(w, "Invalid URL format", http.StatusBadRequest) + return + } + + // 验证实例类型 + instanceType := parsedURL.Scheme + if instanceType != "client" && instanceType != "server" { + httpError(w, "Invalid URL scheme", http.StatusBadRequest) + return + } + + // 增强URL以便进行重复检测 + enhancedURL := m.enhanceURL(reqData.URL, instanceType) + + // 检查是否与当前实例的URL相同 + if instance.URL == enhancedURL { + httpError(w, "Instance URL conflict", http.StatusConflict) + return + } + + // 如果实例正在运行,先停止它 + if instance.Status == "running" { + m.stopInstance(instance) + time.Sleep(100 * time.Millisecond) + } + + // 更新实例URL和类型 + instance.URL = enhancedURL + instance.Type = instanceType + + // 更新实例状态 + instance.Status = "stopped" + m.instances.Store(id, instance) + + // 启动实例 + go m.startInstance(instance) + + // 保存实例状态 + go func() { + time.Sleep(100 * time.Millisecond) + m.saveState() + }() + writeJSON(w, http.StatusOK, instance) + + m.logger.Info("Instance URL updated: %v [%v]", instance.URL, instance.ID) +} + +// regenerateAPIKey 重新生成API Key +func (m *Master) regenerateAPIKey(instance *Instance) { + instance.URL = generateAPIKey() + m.instances.Store(apiKeyID, instance) + m.saveState() + m.logger.Info("API Key regenerated: %v", instance.URL) +} + +// processInstanceAction 处理实例操作 +func (m *Master) processInstanceAction(instance *Instance, action string) { + switch action { + case "start": + if instance.Status != "running" { + go m.startInstance(instance) + } + case "stop": + if instance.Status == "running" { + go m.stopInstance(instance) + } + case "restart": + if instance.Status == "running" { + go func() { + m.stopInstance(instance) + time.Sleep(100 * time.Millisecond) + m.startInstance(instance) + }() + } else { + go m.startInstance(instance) + } + } +} + +// handleDeleteInstance 处理删除实例请求 +func (m *Master) handleDeleteInstance(w http.ResponseWriter, id string, instance *Instance) { + // API Key实例不允许删除 + if id == apiKeyID { + httpError(w, "Forbidden: API Key", http.StatusForbidden) + return + } + + if instance.Status == "running" { + m.stopInstance(instance) + } + m.instances.Delete(id) + // 删除实例后保存状态 + m.saveState() + w.WriteHeader(http.StatusNoContent) + + // 发送删除事件 + m.sendSSEEvent("delete", instance) +} + +// handleSSE 处理SSE连接请求 +func (m *Master) handleSSE(w http.ResponseWriter, r *http.Request) { + // 验证是否为GET请求 + if r.Method != http.MethodGet { + httpError(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + // 设置SSE相关响应头 + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + w.Header().Set("Access-Control-Allow-Origin", "*") + + // 创建唯一的订阅者ID + subscriberID := generateID() + + // 创建一个通道用于接收事件 + events := make(chan *InstanceEvent, 10) + + // 注册订阅者 + m.subscribers.Store(subscriberID, events) + defer m.subscribers.Delete(subscriberID) + + // 发送初始重试间隔 + fmt.Fprintf(w, "retry: %d\n\n", sseRetryTime) + + // 获取当前所有实例并发送初始状态 + m.instances.Range(func(_, value any) bool { + instance := value.(*Instance) + event := &InstanceEvent{ + Type: "initial", + Time: time.Now(), + Instance: instance, + } + + data, err := json.Marshal(event) + if err == nil { + fmt.Fprintf(w, "event: instance\ndata: %s\n\n", data) + w.(http.Flusher).Flush() + } + return true + }) + + // 设置客户端连接超时 + ctx, cancel := context.WithCancel(r.Context()) + defer cancel() + + // 客户端连接关闭标志 + connectionClosed := make(chan struct{}) + + // 监听客户端连接是否关闭,但不关闭通道,留给Shutdown处理 + go func() { + <-ctx.Done() + close(connectionClosed) + // 只从映射表中移除,但不关闭通道 + m.subscribers.Delete(subscriberID) + }() + + // 持续发送事件到客户端 + for { + select { + case <-connectionClosed: + return + case event, ok := <-events: + if !ok { + return + } + + // 序列化事件数据 + data, err := json.Marshal(event) + if err != nil { + m.logger.Error("Event marshal error: %v", err) + continue + } + + // 发送事件 + fmt.Fprintf(w, "event: instance\ndata: %s\n\n", data) + w.(http.Flusher).Flush() + } + } +} + +// sendSSEEvent 发送SSE事件的通用函数 +func (m *Master) sendSSEEvent(eventType string, instance *Instance, logs ...string) { + event := &InstanceEvent{ + Type: eventType, + Time: time.Now(), + Instance: instance, + } + + // 如果有日志内容,添加到事件中 + if len(logs) > 0 { + event.Logs = logs[0] + } + + // 非阻塞方式发送事件 + select { + case m.notifyChannel <- event: + default: + // 通道已满或关闭,忽略 + } +} + +// startEventDispatcher 启动事件分发器 +func (m *Master) startEventDispatcher() { + for event := range m.notifyChannel { + // 向所有订阅者分发事件 + m.subscribers.Range(func(_, value any) bool { + eventChan := value.(chan *InstanceEvent) + // 非阻塞方式发送事件 + select { + case eventChan <- event: + default: + // 不可用,忽略 + } + return true + }) + } +} + +// findInstance 查找实例 +func (m *Master) findInstance(id string) (*Instance, bool) { + value, exists := m.instances.Load(id) + if !exists { + return nil, false + } + return value.(*Instance), true +} + +// startInstance 启动实例 +func (m *Master) startInstance(instance *Instance) { + // 获取最新实例状态 + if value, exists := m.instances.Load(instance.ID); exists { + instance = value.(*Instance) + if instance.Status == "running" { + return + } + } + + // 保存原始流量统计 + originalTCPRX := instance.TCPRX + originalTCPTX := instance.TCPTX + originalUDPRX := instance.UDPRX + originalUDPTX := instance.UDPTX + + // 获取可执行文件路径 + execPath, err := os.Executable() + if err != nil { + m.logger.Error("Get path failed: %v [%v]", err, instance.ID) + instance.Status = "error" + m.instances.Store(instance.ID, instance) + return + } + + // 创建上下文和命令 + ctx, cancel := context.WithCancel(context.Background()) + cmd := exec.CommandContext(ctx, execPath, instance.URL) + instance.cancelFunc = cancel + + // 设置日志输出 + writer := NewInstanceLogWriter(instance.ID, instance, os.Stdout, m) + cmd.Stdout, cmd.Stderr = writer, writer + + m.logger.Info("Instance starting: %v [%v]", instance.URL, instance.ID) + + // 启动实例 + if err := cmd.Start(); err != nil { + m.logger.Error("Instance error: %v [%v]", err, instance.ID) + instance.Status = "error" + cancel() + } else { + instance.cmd = cmd + instance.Status = "running" + + // 恢复原始流量统计 + instance.TCPRX = originalTCPRX + instance.TCPTX = originalTCPTX + instance.UDPRX = originalUDPRX + instance.UDPTX = originalUDPTX + + go m.monitorInstance(instance, cmd) + } + + m.instances.Store(instance.ID, instance) + + // 发送启动事件 + m.sendSSEEvent("update", instance) +} + +// monitorInstance 监控实例状态 +func (m *Master) monitorInstance(instance *Instance, cmd *exec.Cmd) { + select { + case <-instance.stopped: + // 实例被显式停止 + return + default: + // 等待进程完成 + err := cmd.Wait() + + // 获取最新的实例状态 + if value, exists := m.instances.Load(instance.ID); exists { + instance = value.(*Instance) + + // 仅在实例状态为running时才发送事件 + if instance.Status == "running" { + if err != nil { + m.logger.Error("Instance error: %v [%v]", err, instance.ID) + instance.Status = "error" + } else { + instance.Status = "stopped" + } + m.instances.Store(instance.ID, instance) + + // 安全地发送停止事件,避免向已关闭的通道发送 + m.sendSSEEvent("update", instance) + } + } + } +} + +// stopInstance 停止实例 +func (m *Master) stopInstance(instance *Instance) { + // 如果已经是停止状态,不重复操作 + if instance.Status == "stopped" { + return + } + + // 如果没有命令或进程,直接设为已停止 + if instance.cmd == nil || instance.cmd.Process == nil { + instance.Status = "stopped" + m.instances.Store(instance.ID, instance) + m.sendSSEEvent("update", instance) + return + } + + // 发送终止信号 + if instance.cmd.Process != nil { + if runtime.GOOS == "windows" { + instance.cmd.Process.Signal(os.Interrupt) + } else { + instance.cmd.Process.Signal(syscall.SIGTERM) + } + time.Sleep(100 * time.Millisecond) + } + + // 关闭停止通道 + close(instance.stopped) + + // 取消执行或强制终止 + if instance.cancelFunc != nil { + instance.cancelFunc() + } else { + err := instance.cmd.Process.Kill() + if err != nil { + m.logger.Error("Instance error: %v [%v]", err, instance.ID) + } + } + + m.logger.Info("Instance stopped [%v]", instance.ID) + + // 重置实例状态 + instance.Status = "stopped" + instance.stopped = make(chan struct{}) + instance.cancelFunc = nil + m.instances.Store(instance.ID, instance) + + // 保存状态变更 + m.saveState() + + // 发送停止事件 + m.sendSSEEvent("update", instance) +} + +// enhanceURL 增强URL,添加日志级别和TLS配置 +func (m *Master) enhanceURL(instanceURL string, instanceType string) string { + parsedURL, err := url.Parse(instanceURL) + if err != nil { + m.logger.Error("Invalid URL format: %v", err) + return instanceURL + } + + query := parsedURL.Query() + + // 设置日志级别 + if m.logLevel != "" && query.Get("log") == "" { + query.Set("log", m.logLevel) + } + + // 为服务端实例设置TLS配置 + if instanceType == "server" && m.tlsCode != "0" { + if query.Get("tls") == "" { + query.Set("tls", m.tlsCode) + } + + // 为TLS code-2设置证书和密钥 + if m.tlsCode == "2" { + if m.crtPath != "" && query.Get("crt") == "" { + query.Set("crt", m.crtPath) + } + if m.keyPath != "" && query.Get("key") == "" { + query.Set("key", m.keyPath) + } + } + } + + parsedURL.RawQuery = query.Encode() + return parsedURL.String() +} + +// generateID 生成随机ID +func generateID() string { + bytes := make([]byte, 4) + rand.Read(bytes) + return hex.EncodeToString(bytes) +} + +// generateAPIKey 生成API Key +func generateAPIKey() string { + bytes := make([]byte, 16) + rand.Read(bytes) + return hex.EncodeToString(bytes) +} + +// httpError 返回HTTP错误 +func httpError(w http.ResponseWriter, message string, statusCode int) { + setCorsHeaders(w) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + json.NewEncoder(w).Encode(map[string]string{"error": message}) +} + +// writeJSON 写入JSON响应 +func writeJSON(w http.ResponseWriter, statusCode int, data any) { + setCorsHeaders(w) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + json.NewEncoder(w).Encode(data) +} + +// generateOpenAPISpec 生成OpenAPI规范文档 +func generateOpenAPISpec() string { + return fmt.Sprintf(`{ + "openapi": "3.1.1", + "info": { + "title": "NodePass API", + "description": "API for managing NodePass server and client instances", + "version": "%s" + }, + "servers": [{"url": "/{prefix}/v1", "variables": {"prefix": {"default": "api", "description": "API prefix path"}}}], + "security": [{"ApiKeyAuth": []}], + "paths": { + "/instances": { + "get": { + "summary": "List all instances", + "security": [{"ApiKeyAuth": []}], + "responses": { + "200": {"description": "Success", "content": {"application/json": {"schema": {"type": "array", "items": {"$ref": "#/components/schemas/Instance"}}}}}, + "401": {"description": "Unauthorized"}, + "405": {"description": "Method not allowed"} + } + }, + "post": { + "summary": "Create a new instance", + "security": [{"ApiKeyAuth": []}], + "requestBody": {"required": true, "content": {"application/json": {"schema": {"$ref": "#/components/schemas/CreateInstanceRequest"}}}}, + "responses": { + "201": {"description": "Created", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Instance"}}}}, + "400": {"description": "Invalid input"}, + "401": {"description": "Unauthorized"}, + "405": {"description": "Method not allowed"}, + "409": {"description": "Instance ID already exists"} + } + } + }, + "/instances/{id}": { + "parameters": [{"name": "id", "in": "path", "required": true, "schema": {"type": "string"}}], + "get": { + "summary": "Get instance details", + "security": [{"ApiKeyAuth": []}], + "responses": { + "200": {"description": "Success", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Instance"}}}}, + "400": {"description": "Instance ID required"}, + "401": {"description": "Unauthorized"}, + "404": {"description": "Not found"}, + "405": {"description": "Method not allowed"} + } + }, + "patch": { + "summary": "Update instance", + "security": [{"ApiKeyAuth": []}], + "requestBody": {"required": true, "content": {"application/json": {"schema": {"$ref": "#/components/schemas/UpdateInstanceRequest"}}}}, + "responses": { + "200": {"description": "Success", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Instance"}}}}, + "400": {"description": "Instance ID required or invalid input"}, + "401": {"description": "Unauthorized"}, + "404": {"description": "Not found"}, + "405": {"description": "Method not allowed"} + } + }, + "put": { + "summary": "Update instance URL", + "security": [{"ApiKeyAuth": []}], + "requestBody": {"required": true, "content": {"application/json": {"schema": {"$ref": "#/components/schemas/PutInstanceRequest"}}}}, + "responses": { + "200": {"description": "Success", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Instance"}}}}, + "400": {"description": "Instance ID required or invalid input"}, + "401": {"description": "Unauthorized"}, + "403": {"description": "Forbidden"}, + "404": {"description": "Not found"}, + "405": {"description": "Method not allowed"}, + "409": {"description": "Instance URL conflict"} + } + }, + "delete": { + "summary": "Delete instance", + "security": [{"ApiKeyAuth": []}], + "responses": { + "204": {"description": "Deleted"}, + "400": {"description": "Instance ID required"}, + "401": {"description": "Unauthorized"}, + "403": {"description": "Forbidden"}, + "404": {"description": "Not found"}, + "405": {"description": "Method not allowed"} + } + } + }, + "/events": { + "get": { + "summary": "Subscribe to instance events", + "security": [{"ApiKeyAuth": []}], + "responses": { + "200": {"description": "Success", "content": {"text/event-stream": {}}}, + "401": {"description": "Unauthorized"}, + "405": {"description": "Method not allowed"} + } + } + }, + "/info": { + "get": { + "summary": "Get master information", + "security": [{"ApiKeyAuth": []}], + "responses": { + "200": {"description": "Success", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/MasterInfo"}}}}, + "401": {"description": "Unauthorized"}, + "405": {"description": "Method not allowed"} + } + } + }, + "/openapi.json": { + "get": { + "summary": "Get OpenAPI specification", + "responses": { + "200": {"description": "Success", "content": {"application/json": {}}} + } + } + }, + "/docs": { + "get": { + "summary": "Get Swagger UI", + "responses": { + "200": {"description": "Success", "content": {"text/html": {}}} + } + } + } + }, + "components": { + "securitySchemes": { + "ApiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "X-API-Key", + "description": "API Key for authentication" + } + }, + "schemas": { + "Instance": { + "type": "object", + "properties": { + "id": {"type": "string", "description": "Unique identifier"}, + "alias": {"type": "string", "description": "Instance alias"}, + "type": {"type": "string", "enum": ["client", "server"], "description": "Type of instance"}, + "status": {"type": "string", "enum": ["running", "stopped", "error"], "description": "Instance status"}, + "url": {"type": "string", "description": "Command string or API Key"}, + "restart": {"type": "boolean", "description": "Restart policy"}, + "tcprx": {"type": "integer", "description": "TCP received bytes"}, + "tcptx": {"type": "integer", "description": "TCP transmitted bytes"}, + "udprx": {"type": "integer", "description": "UDP received bytes"}, + "udptx": {"type": "integer", "description": "UDP transmitted bytes"} + } + }, + "CreateInstanceRequest": { + "type": "object", + "required": ["url"], + "properties": {"url": {"type": "string", "description": "Command string(scheme://host:port/host:port)"}} + }, + "UpdateInstanceRequest": { + "type": "object", + "properties": { + "alias": {"type": "string", "description": "Instance alias"}, + "action": {"type": "string", "enum": ["start", "stop", "restart", "reset"], "description": "Action for the instance"}, + "restart": {"type": "boolean", "description": "Instance restart policy"} + } + }, + "PutInstanceRequest": { + "type": "object", + "required": ["url"], + "properties": {"url": {"type": "string", "description": "New command string(scheme://host:port/host:port)"}} + }, + "MasterInfo": { + "type": "object", + "properties": { + "os": {"type": "string", "description": "Operating system"}, + "arch": {"type": "string", "description": "System architecture"}, + "ver": {"type": "string", "description": "NodePass version"}, + "name": {"type": "string", "description": "Hostname"}, + "uptime": {"type": "integer", "format": "int64", "description": "Uptime in seconds"}, + "log": {"type": "string", "description": "Log level"}, + "tls": {"type": "string", "description": "TLS code"}, + "crt": {"type": "string", "description": "Certificate path"}, + "key": {"type": "string", "description": "Private key path"} + } + } + } + } +}`, openAPIVersion) +} diff --git a/nodepass/internal/server.go b/nodepass/internal/server.go new file mode 100644 index 0000000000..12bebf27cd --- /dev/null +++ b/nodepass/internal/server.go @@ -0,0 +1,175 @@ +// 内部包,实现服务端模式功能 +package internal + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "net" + "net/url" + "os" + "os/signal" + "syscall" + "time" + + "github.com/NodePassProject/logs" + "github.com/NodePassProject/pool" +) + +// Server 实现服务端模式功能 +type Server struct { + Common // 继承共享功能 + tlsConfig *tls.Config // TLS配置 + clientIP string // 客户端IP +} + +// NewServer 创建新的服务端实例 +func NewServer(parsedURL *url.URL, tlsCode string, tlsConfig *tls.Config, logger *logs.Logger) *Server { + server := &Server{ + Common: Common{ + tlsCode: tlsCode, + dataFlow: "+", + logger: logger, + semaphore: make(chan struct{}, semaphoreLimit), + signalChan: make(chan string, semaphoreLimit), + }, + tlsConfig: tlsConfig, + } + // 初始化公共字段 + server.getTunnelKey(parsedURL) + server.getAddress(parsedURL) + return server +} + +// Run 管理服务端生命周期 +func (s *Server) Run() { + s.logger.Info("Server started: %v@%v/%v", s.tunnelKey, s.tunnelAddr, s.targetTCPAddr) + + // 启动服务端并处理重启 + go func() { + for { + time.Sleep(serviceCooldown) + if err := s.start(); err != nil { + s.logger.Error("Server error: %v", err) + s.stop() + s.logger.Info("Server restarted: %v@%v/%v", s.tunnelKey, s.tunnelAddr, s.targetTCPAddr) + } + } + }() + + // 监听系统信号以优雅关闭 + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + <-ctx.Done() + stop() + + // 执行关闭过程 + shutdownCtx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) + defer cancel() + if err := s.shutdown(shutdownCtx, s.stop); err != nil { + s.logger.Error("Server shutdown error: %v", err) + } else { + s.logger.Info("Server shutdown complete") + } +} + +// start 启动服务端 +func (s *Server) start() error { + s.initContext() + + // 初始化隧道监听器 + if err := s.initTunnelListener(); err != nil { + return err + } + + // 通过是否监听成功判断数据流向 + if err := s.initTargetListener(); err == nil { + s.dataFlow = "-" + } + + // 与客户端进行握手 + if err := s.tunnelHandshake(); err != nil { + return err + } + + // 握手之后把UDP监听关掉 + if s.tunnelUDPConn != nil { + s.tunnelUDPConn.Close() + } + + // 初始化隧道连接池 + s.tunnelPool = pool.NewServerPool( + s.clientIP, + s.tlsConfig, + s.tunnelListener, + reportInterval) + + go s.tunnelPool.ServerManager() + + switch s.dataFlow { + case "-": + go s.commonLoop() + case "+": + go s.commonOnce() + go s.commonQueue() + } + return s.healthCheck() +} + +// tunnelHandshake 与客户端进行握手 +func (s *Server) tunnelHandshake() error { + // 接受隧道连接 + for { + tunnelTCPConn, err := s.tunnelListener.Accept() + if err != nil { + s.logger.Error("Accept error: %v", err) + time.Sleep(serviceCooldown) + continue + } + + tunnelTCPConn.SetReadDeadline(time.Now().Add(tcpReadTimeout)) + + bufReader := bufio.NewReader(tunnelTCPConn) + rawTunnelKey, err := bufReader.ReadString('\n') + if err != nil { + s.logger.Warn("Handshake timeout: %v", tunnelTCPConn.RemoteAddr()) + tunnelTCPConn.Close() + time.Sleep(serviceCooldown) + continue + } + + tunnelTCPConn.SetReadDeadline(time.Time{}) + tunnelKey := string(s.xor(bytes.TrimSuffix([]byte(rawTunnelKey), []byte{'\n'}))) + + if tunnelKey != s.tunnelKey { + s.logger.Warn("Access denied: %v", tunnelTCPConn.RemoteAddr()) + tunnelTCPConn.Close() + time.Sleep(serviceCooldown) + continue + } else { + s.tunnelTCPConn = tunnelTCPConn.(*net.TCPConn) + s.bufReader = bufio.NewReader(s.tunnelTCPConn) + s.tunnelTCPConn.SetKeepAlive(true) + s.tunnelTCPConn.SetKeepAlivePeriod(reportInterval) + + // 记录客户端IP + s.clientIP = s.tunnelTCPConn.RemoteAddr().(*net.TCPAddr).IP.String() + break + } + } + + // 构建并发送隧道URL到客户端 + tunnelURL := &url.URL{ + Host: s.dataFlow, + Fragment: s.tlsCode, + } + + _, err := s.tunnelTCPConn.Write(append(s.xor([]byte(tunnelURL.String())), '\n')) + if err != nil { + return err + } + + s.logger.Info("Tunnel signal -> : %v -> %v", tunnelURL.String(), s.tunnelTCPConn.RemoteAddr()) + s.logger.Info("Tunnel handshaked: %v <-> %v", s.tunnelTCPConn.LocalAddr(), s.tunnelTCPConn.RemoteAddr()) + return nil +} diff --git a/openwrt-packages/filebrowser/Makefile b/openwrt-packages/filebrowser/Makefile index 796f1006e0..c9dfae090b 100644 --- a/openwrt-packages/filebrowser/Makefile +++ b/openwrt-packages/filebrowser/Makefile @@ -5,12 +5,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=filebrowser -PKG_VERSION:=2.39.0 +PKG_VERSION:=2.40.0 PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://codeload.github.com/filebrowser/filebrowser/tar.gz/v${PKG_VERSION}? -PKG_HASH:=50cd9f1ffacf49f35ab145966fb912731598d5c3d7975820388a20fa18cc08ae +PKG_HASH:=0219856deb87e5337e119ee1b8942089be147e25c313e6cc70d854e23e82dcdc PKG_LICENSE:=Apache-2.0 PKG_LICENSE_FILES:=LICENSE diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/type/ray.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/type/ray.lua index 5d7a62a8b5..e943fc2678 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/type/ray.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/client/type/ray.lua @@ -24,7 +24,7 @@ local ss_method_list = { local security_list = { "none", "auto", "aes-128-gcm", "chacha20-poly1305", "zero" } local header_type_list = { - "none", "srtp", "utp", "wechat-video", "dtls", "wireguard" + "none", "srtp", "utp", "wechat-video", "dtls", "wireguard", "dns" } local xray_version = api.get_app_version("xray") @@ -462,10 +462,13 @@ o:depends({ [_n("tcp_guise")] = "http" }) -- [[ mKCP部分 ]]-- -o = s:option(ListValue, _n("mkcp_guise"), translate("Camouflage Type"), translate('
none: default, no masquerade, data sent is packets with no characteristics.
srtp: disguised as an SRTP packet, it will be recognized as video call data (such as FaceTime).
utp: packets disguised as uTP will be recognized as bittorrent downloaded data.
wechat-video: packets disguised as WeChat video calls.
dtls: disguised as DTLS 1.2 packet.
wireguard: disguised as a WireGuard packet. (not really WireGuard protocol)')) +o = s:option(ListValue, _n("mkcp_guise"), translate("Camouflage Type"), translate('
none: default, no masquerade, data sent is packets with no characteristics.
srtp: disguised as an SRTP packet, it will be recognized as video call data (such as FaceTime).
utp: packets disguised as uTP will be recognized as bittorrent downloaded data.
wechat-video: packets disguised as WeChat video calls.
dtls: disguised as DTLS 1.2 packet.
wireguard: disguised as a WireGuard packet. (not really WireGuard protocol)
dns: Disguising traffic as DNS requests.')) for a, t in ipairs(header_type_list) do o:value(t) end o:depends({ [_n("transport")] = "mkcp" }) +o = s:option(Value, _n("mkcp_domain"), translate("Camouflage Domain"), translate("Use it together with the DNS disguised type. You can fill in any domain.")) +o:depends({ [_n("mkcp_guise")] = "dns" }) + o = s:option(Value, _n("mkcp_mtu"), translate("KCP MTU")) o.default = "1350" o:depends({ [_n("transport")] = "mkcp" }) diff --git a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/server/type/ray.lua b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/server/type/ray.lua index bdd62575f3..4b532b8125 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/server/type/ray.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/model/cbi/passwall/server/type/ray.lua @@ -21,7 +21,7 @@ local x_ss_method_list = { } local header_type_list = { - "none", "srtp", "utp", "wechat-video", "dtls", "wireguard" + "none", "srtp", "utp", "wechat-video", "dtls", "wireguard", "dns" } -- [[ Xray ]] @@ -288,10 +288,13 @@ o:depends({ [_n("tcp_guise")] = "http" }) -- [[ mKCP部分 ]]-- -o = s:option(ListValue, _n("mkcp_guise"), translate("Camouflage Type"), translate('
none: default, no masquerade, data sent is packets with no characteristics.
srtp: disguised as an SRTP packet, it will be recognized as video call data (such as FaceTime).
utp: packets disguised as uTP will be recognized as bittorrent downloaded data.
wechat-video: packets disguised as WeChat video calls.
dtls: disguised as DTLS 1.2 packet.
wireguard: disguised as a WireGuard packet. (not really WireGuard protocol)')) +o = s:option(ListValue, _n("mkcp_guise"), translate("Camouflage Type"), translate('
none: default, no masquerade, data sent is packets with no characteristics.
srtp: disguised as an SRTP packet, it will be recognized as video call data (such as FaceTime).
utp: packets disguised as uTP will be recognized as bittorrent downloaded data.
wechat-video: packets disguised as WeChat video calls.
dtls: disguised as DTLS 1.2 packet.
wireguard: disguised as a WireGuard packet. (not really WireGuard protocol)
dns: Disguising traffic as DNS requests.')) for a, t in ipairs(header_type_list) do o:value(t) end o:depends({ [_n("transport")] = "mkcp" }) +o = s:option(Value, _n("mkcp_domain"), translate("Camouflage Domain"), translate("Use it together with the DNS disguised type. You can fill in any domain.")) +o:depends({ [_n("mkcp_guise")] = "dns" }) + o = s:option(Value, _n("mkcp_mtu"), translate("KCP MTU")) o.default = "1350" o:depends({ [_n("transport")] = "mkcp" }) diff --git a/openwrt-passwall/luci-app-passwall/luasrc/passwall/util_xray.lua b/openwrt-passwall/luci-app-passwall/luasrc/passwall/util_xray.lua index 4b5d5358f3..c9543bbf50 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/passwall/util_xray.lua +++ b/openwrt-passwall/luci-app-passwall/luasrc/passwall/util_xray.lua @@ -186,7 +186,10 @@ function gen_outbound(flag, node, tag, proxy_table) readBufferSize = tonumber(node.mkcp_readBufferSize), writeBufferSize = tonumber(node.mkcp_writeBufferSize), seed = (node.mkcp_seed and node.mkcp_seed ~= "") and node.mkcp_seed or nil, - header = {type = node.mkcp_guise} + header = { + type = node.mkcp_guise, + domain = node.mkcp_domain + } } or nil, wsSettings = (node.transport == "ws") and { path = node.ws_path or "/", @@ -482,7 +485,10 @@ function gen_config_server(node) readBufferSize = tonumber(node.mkcp_readBufferSize), writeBufferSize = tonumber(node.mkcp_writeBufferSize), seed = (node.mkcp_seed and node.mkcp_seed ~= "") and node.mkcp_seed or nil, - header = {type = node.mkcp_guise} + header = { + type = node.mkcp_guise, + domain = node.mkcp_domain + } } or nil, wsSettings = (node.transport == "ws") and { host = node.ws_host or nil, diff --git a/openwrt-passwall/luci-app-passwall/luasrc/view/passwall/haproxy/js.htm b/openwrt-passwall/luci-app-passwall/luasrc/view/passwall/haproxy/js.htm index 872b819f29..904909678c 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/view/passwall/haproxy/js.htm +++ b/openwrt-passwall/luci-app-passwall/luasrc/view/passwall/haproxy/js.htm @@ -37,8 +37,11 @@ const lastVal = lastInput.value.trim(); const secondLastVal = secondLastInput.value.trim(); - if (lastVal !== secondLastVal && secondLastVal !== "" && secondLastVal !== "0") { - lastInput.value = secondLastVal; + const lbssHiddenInput = lastRow.querySelector("div.cbi-dropdown > div > input[type='hidden'][name$='.lbss']"); + if (!lbssHiddenInput) { + if (lastVal !== secondLastVal && secondLastVal !== "" && secondLastVal !== "0") { + lastInput.value = secondLastVal; + } } }, 300); }); diff --git a/openwrt-passwall/luci-app-passwall/luasrc/view/passwall/node_list/link_share_man.htm b/openwrt-passwall/luci-app-passwall/luasrc/view/passwall/node_list/link_share_man.htm index df6c2f78de..17ce8f1fef 100644 --- a/openwrt-passwall/luci-app-passwall/luasrc/view/passwall/node_list/link_share_man.htm +++ b/openwrt-passwall/luci-app-passwall/luasrc/view/passwall/node_list/link_share_man.htm @@ -261,6 +261,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin } else if (v_transport === "mkcp") { v_transport = "kcp"; params += opt.query("headerType", dom_prefix + "mkcp_guise"); + params += opt.query("seed", dom_prefix + "mkcp_seed"); } else if (v_transport === "quic") { params += opt.query("headerType", dom_prefix + "quic_guise"); params += opt.query("key", dom_prefix + "quic_key"); @@ -372,6 +373,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin } else if (v_transport === "mkcp") { v_transport = "kcp"; info.type = opt.get(dom_prefix + "mkcp_guise").value; + info.seed = opt.get(dom_prefix + "mkcp_seed").value; } else if (v_transport === "quic") { info.type = opt.get(dom_prefix + "quic_guise")?.value; info.key = opt.get(dom_prefix + "quic_key")?.value; @@ -419,6 +421,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin } else if (v_transport === "mkcp") { v_transport = "kcp"; params += opt.query("headerType", dom_prefix + "mkcp_guise"); + params += opt.query("seed", dom_prefix + "mkcp_seed"); } else if (v_transport === "quic") { params += opt.query("headerType", dom_prefix + "quic_guise"); params += opt.query("key", dom_prefix + "quic_key"); @@ -496,6 +499,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin } else if (v_transport === "mkcp") { v_transport = "kcp"; params += opt.query("headerType", dom_prefix + "mkcp_guise"); + params += opt.query("seed", dom_prefix + "mkcp_seed"); } else if (v_transport === "quic") { params += opt.query("headerType", dom_prefix + "quic_guise"); params += opt.query("key", dom_prefix + "quic_key"); @@ -1035,6 +1039,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin opt.set(dom_prefix + 'quic_key', queryParam.key); } else if (queryParam.type === "mkcp") { opt.set(dom_prefix + 'mkcp_guise', queryParam.headerType || "none"); + opt.set(dom_prefix + 'mkcp_seed', queryParam.seed || ""); } else if (queryParam.type === "grpc") { opt.set(dom_prefix + 'grpc_serviceName', (queryParam.serviceName || queryParam.path) || ""); opt.set(dom_prefix + 'grpc_mode', queryParam.mode || "gun"); @@ -1174,6 +1179,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin opt.set(dom_prefix + 'quic_key', queryParam.key); } else if (queryParam.type === "mkcp") { opt.set(dom_prefix + 'mkcp_guise', queryParam.headerType || "none"); + opt.set(dom_prefix + 'mkcp_seed', queryParam.seed || ""); } else if (queryParam.type === "grpc") { opt.set(dom_prefix + 'grpc_serviceName', (queryParam.serviceName || queryParam.path) || ""); opt.set(dom_prefix + 'grpc_mode', queryParam.mode || "gun"); @@ -1283,7 +1289,8 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin opt.set(dom_prefix + 'quic_security', ssm.securty); opt.set(dom_prefix + 'quic_key', ssm.key); } else if (ssm.net === "kcp" || ssm.net === "mkcp") { - opt.set(dom_prefix + 'mkcp_guise', ssm.type); + opt.set(dom_prefix + 'mkcp_guise', ssm.type || "none"); + opt.set(dom_prefix + 'mkcp_seed', ssm.seed || ""); } else if (ssm.net === "grpc") { opt.set(dom_prefix + 'grpc_serviceName', ssm.path); } @@ -1415,6 +1422,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin opt.set(dom_prefix + 'quic_key', queryParam.key); } else if (queryParam.type === "kcp" || queryParam.type === "mkcp") { opt.set(dom_prefix + 'mkcp_guise', queryParam.headerType || "none"); + opt.set(dom_prefix + 'mkcp_seed', queryParam.seed || ""); } else if (queryParam.type === "grpc") { opt.set(dom_prefix + 'grpc_serviceName', (queryParam.serviceName || queryParam.path) || ""); opt.set(dom_prefix + 'grpc_mode', queryParam.mode || "gun"); diff --git a/openwrt-passwall/luci-app-passwall/po/zh-cn/passwall.po b/openwrt-passwall/luci-app-passwall/po/zh-cn/passwall.po index 0ffa338b81..1f0d6b6d65 100644 --- a/openwrt-passwall/luci-app-passwall/po/zh-cn/passwall.po +++ b/openwrt-passwall/luci-app-passwall/po/zh-cn/passwall.po @@ -1315,6 +1315,9 @@ msgstr "本地地址" msgid "Decimal numbers separated by \",\" or Base64-encoded strings." msgstr "用“,”隔开的十进制数字或 Base64 编码字符串。" +msgid "Camouflage Domain" +msgstr "伪装域名" + msgid "Camouflage Type" msgstr "伪装类型" @@ -1393,8 +1396,11 @@ msgstr "TUIC socks5 服务器可以从外部接收的最大数据包大小(以 msgid "Set if the listening socket should be dual-stack" msgstr "设置监听套接字为双栈" -msgid "
none: default, no masquerade, data sent is packets with no characteristics.
srtp: disguised as an SRTP packet, it will be recognized as video call data (such as FaceTime).
utp: packets disguised as uTP will be recognized as bittorrent downloaded data.
wechat-video: packets disguised as WeChat video calls.
dtls: disguised as DTLS 1.2 packet.
wireguard: disguised as a WireGuard packet. (not really WireGuard protocol)" -msgstr "
none:默认值,不进行伪装,发送的数据是没有特征的数据包。
srtp:伪装成 SRTP 数据包,会被识别为视频通话数据(如 FaceTime)。
utp:伪装成 uTP 数据包,会被识别为 BT 下载数据。
wechat-video:伪装成微信视频通话的数据包。
dtls:伪装成 DTLS 1.2 数据包。
wireguard:伪装成 WireGuard 数据包。(并不是真正的 WireGuard 协议)" +msgid "
none: default, no masquerade, data sent is packets with no characteristics.
srtp: disguised as an SRTP packet, it will be recognized as video call data (such as FaceTime).
utp: packets disguised as uTP will be recognized as bittorrent downloaded data.
wechat-video: packets disguised as WeChat video calls.
dtls: disguised as DTLS 1.2 packet.
wireguard: disguised as a WireGuard packet. (not really WireGuard protocol)
dns: Disguising traffic as DNS requests." +msgstr "
none:默认值,不进行伪装,发送的数据是没有特征的数据包。
srtp:伪装成 SRTP 数据包,会被识别为视频通话数据(如 FaceTime)。
utp:伪装成 uTP 数据包,会被识别为 BT 下载数据。
wechat-video:伪装成微信视频通话的数据包。
dtls:伪装成 DTLS 1.2 数据包。
wireguard:伪装成 WireGuard 数据包。(并不是真正的 WireGuard 协议)
dns:把流量伪装成 DNS 请求。" + +msgid "Use it together with the DNS disguised type. You can fill in any domain." +msgstr "配合伪装类型 DNS 使用,可随便填一个域名。" msgid "A legal file path. This file must not exist before running." msgstr "一个合法的文件路径。在运行之前,这个文件必须不存在。" diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/type/ray.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/type/ray.lua index 5d7a62a8b5..e943fc2678 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/client/type/ray.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/client/type/ray.lua @@ -24,7 +24,7 @@ local ss_method_list = { local security_list = { "none", "auto", "aes-128-gcm", "chacha20-poly1305", "zero" } local header_type_list = { - "none", "srtp", "utp", "wechat-video", "dtls", "wireguard" + "none", "srtp", "utp", "wechat-video", "dtls", "wireguard", "dns" } local xray_version = api.get_app_version("xray") @@ -462,10 +462,13 @@ o:depends({ [_n("tcp_guise")] = "http" }) -- [[ mKCP部分 ]]-- -o = s:option(ListValue, _n("mkcp_guise"), translate("Camouflage Type"), translate('
none: default, no masquerade, data sent is packets with no characteristics.
srtp: disguised as an SRTP packet, it will be recognized as video call data (such as FaceTime).
utp: packets disguised as uTP will be recognized as bittorrent downloaded data.
wechat-video: packets disguised as WeChat video calls.
dtls: disguised as DTLS 1.2 packet.
wireguard: disguised as a WireGuard packet. (not really WireGuard protocol)')) +o = s:option(ListValue, _n("mkcp_guise"), translate("Camouflage Type"), translate('
none: default, no masquerade, data sent is packets with no characteristics.
srtp: disguised as an SRTP packet, it will be recognized as video call data (such as FaceTime).
utp: packets disguised as uTP will be recognized as bittorrent downloaded data.
wechat-video: packets disguised as WeChat video calls.
dtls: disguised as DTLS 1.2 packet.
wireguard: disguised as a WireGuard packet. (not really WireGuard protocol)
dns: Disguising traffic as DNS requests.')) for a, t in ipairs(header_type_list) do o:value(t) end o:depends({ [_n("transport")] = "mkcp" }) +o = s:option(Value, _n("mkcp_domain"), translate("Camouflage Domain"), translate("Use it together with the DNS disguised type. You can fill in any domain.")) +o:depends({ [_n("mkcp_guise")] = "dns" }) + o = s:option(Value, _n("mkcp_mtu"), translate("KCP MTU")) o.default = "1350" o:depends({ [_n("transport")] = "mkcp" }) diff --git a/small/luci-app-passwall/luasrc/model/cbi/passwall/server/type/ray.lua b/small/luci-app-passwall/luasrc/model/cbi/passwall/server/type/ray.lua index bdd62575f3..4b532b8125 100644 --- a/small/luci-app-passwall/luasrc/model/cbi/passwall/server/type/ray.lua +++ b/small/luci-app-passwall/luasrc/model/cbi/passwall/server/type/ray.lua @@ -21,7 +21,7 @@ local x_ss_method_list = { } local header_type_list = { - "none", "srtp", "utp", "wechat-video", "dtls", "wireguard" + "none", "srtp", "utp", "wechat-video", "dtls", "wireguard", "dns" } -- [[ Xray ]] @@ -288,10 +288,13 @@ o:depends({ [_n("tcp_guise")] = "http" }) -- [[ mKCP部分 ]]-- -o = s:option(ListValue, _n("mkcp_guise"), translate("Camouflage Type"), translate('
none: default, no masquerade, data sent is packets with no characteristics.
srtp: disguised as an SRTP packet, it will be recognized as video call data (such as FaceTime).
utp: packets disguised as uTP will be recognized as bittorrent downloaded data.
wechat-video: packets disguised as WeChat video calls.
dtls: disguised as DTLS 1.2 packet.
wireguard: disguised as a WireGuard packet. (not really WireGuard protocol)')) +o = s:option(ListValue, _n("mkcp_guise"), translate("Camouflage Type"), translate('
none: default, no masquerade, data sent is packets with no characteristics.
srtp: disguised as an SRTP packet, it will be recognized as video call data (such as FaceTime).
utp: packets disguised as uTP will be recognized as bittorrent downloaded data.
wechat-video: packets disguised as WeChat video calls.
dtls: disguised as DTLS 1.2 packet.
wireguard: disguised as a WireGuard packet. (not really WireGuard protocol)
dns: Disguising traffic as DNS requests.')) for a, t in ipairs(header_type_list) do o:value(t) end o:depends({ [_n("transport")] = "mkcp" }) +o = s:option(Value, _n("mkcp_domain"), translate("Camouflage Domain"), translate("Use it together with the DNS disguised type. You can fill in any domain.")) +o:depends({ [_n("mkcp_guise")] = "dns" }) + o = s:option(Value, _n("mkcp_mtu"), translate("KCP MTU")) o.default = "1350" o:depends({ [_n("transport")] = "mkcp" }) diff --git a/small/luci-app-passwall/luasrc/passwall/util_xray.lua b/small/luci-app-passwall/luasrc/passwall/util_xray.lua index 4b5d5358f3..c9543bbf50 100644 --- a/small/luci-app-passwall/luasrc/passwall/util_xray.lua +++ b/small/luci-app-passwall/luasrc/passwall/util_xray.lua @@ -186,7 +186,10 @@ function gen_outbound(flag, node, tag, proxy_table) readBufferSize = tonumber(node.mkcp_readBufferSize), writeBufferSize = tonumber(node.mkcp_writeBufferSize), seed = (node.mkcp_seed and node.mkcp_seed ~= "") and node.mkcp_seed or nil, - header = {type = node.mkcp_guise} + header = { + type = node.mkcp_guise, + domain = node.mkcp_domain + } } or nil, wsSettings = (node.transport == "ws") and { path = node.ws_path or "/", @@ -482,7 +485,10 @@ function gen_config_server(node) readBufferSize = tonumber(node.mkcp_readBufferSize), writeBufferSize = tonumber(node.mkcp_writeBufferSize), seed = (node.mkcp_seed and node.mkcp_seed ~= "") and node.mkcp_seed or nil, - header = {type = node.mkcp_guise} + header = { + type = node.mkcp_guise, + domain = node.mkcp_domain + } } or nil, wsSettings = (node.transport == "ws") and { host = node.ws_host or nil, diff --git a/small/luci-app-passwall/luasrc/view/passwall/haproxy/js.htm b/small/luci-app-passwall/luasrc/view/passwall/haproxy/js.htm index 872b819f29..904909678c 100644 --- a/small/luci-app-passwall/luasrc/view/passwall/haproxy/js.htm +++ b/small/luci-app-passwall/luasrc/view/passwall/haproxy/js.htm @@ -37,8 +37,11 @@ const lastVal = lastInput.value.trim(); const secondLastVal = secondLastInput.value.trim(); - if (lastVal !== secondLastVal && secondLastVal !== "" && secondLastVal !== "0") { - lastInput.value = secondLastVal; + const lbssHiddenInput = lastRow.querySelector("div.cbi-dropdown > div > input[type='hidden'][name$='.lbss']"); + if (!lbssHiddenInput) { + if (lastVal !== secondLastVal && secondLastVal !== "" && secondLastVal !== "0") { + lastInput.value = secondLastVal; + } } }, 300); }); diff --git a/small/luci-app-passwall/luasrc/view/passwall/node_list/link_share_man.htm b/small/luci-app-passwall/luasrc/view/passwall/node_list/link_share_man.htm index df6c2f78de..17ce8f1fef 100644 --- a/small/luci-app-passwall/luasrc/view/passwall/node_list/link_share_man.htm +++ b/small/luci-app-passwall/luasrc/view/passwall/node_list/link_share_man.htm @@ -261,6 +261,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin } else if (v_transport === "mkcp") { v_transport = "kcp"; params += opt.query("headerType", dom_prefix + "mkcp_guise"); + params += opt.query("seed", dom_prefix + "mkcp_seed"); } else if (v_transport === "quic") { params += opt.query("headerType", dom_prefix + "quic_guise"); params += opt.query("key", dom_prefix + "quic_key"); @@ -372,6 +373,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin } else if (v_transport === "mkcp") { v_transport = "kcp"; info.type = opt.get(dom_prefix + "mkcp_guise").value; + info.seed = opt.get(dom_prefix + "mkcp_seed").value; } else if (v_transport === "quic") { info.type = opt.get(dom_prefix + "quic_guise")?.value; info.key = opt.get(dom_prefix + "quic_key")?.value; @@ -419,6 +421,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin } else if (v_transport === "mkcp") { v_transport = "kcp"; params += opt.query("headerType", dom_prefix + "mkcp_guise"); + params += opt.query("seed", dom_prefix + "mkcp_seed"); } else if (v_transport === "quic") { params += opt.query("headerType", dom_prefix + "quic_guise"); params += opt.query("key", dom_prefix + "quic_key"); @@ -496,6 +499,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin } else if (v_transport === "mkcp") { v_transport = "kcp"; params += opt.query("headerType", dom_prefix + "mkcp_guise"); + params += opt.query("seed", dom_prefix + "mkcp_seed"); } else if (v_transport === "quic") { params += opt.query("headerType", dom_prefix + "quic_guise"); params += opt.query("key", dom_prefix + "quic_key"); @@ -1035,6 +1039,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin opt.set(dom_prefix + 'quic_key', queryParam.key); } else if (queryParam.type === "mkcp") { opt.set(dom_prefix + 'mkcp_guise', queryParam.headerType || "none"); + opt.set(dom_prefix + 'mkcp_seed', queryParam.seed || ""); } else if (queryParam.type === "grpc") { opt.set(dom_prefix + 'grpc_serviceName', (queryParam.serviceName || queryParam.path) || ""); opt.set(dom_prefix + 'grpc_mode', queryParam.mode || "gun"); @@ -1174,6 +1179,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin opt.set(dom_prefix + 'quic_key', queryParam.key); } else if (queryParam.type === "mkcp") { opt.set(dom_prefix + 'mkcp_guise', queryParam.headerType || "none"); + opt.set(dom_prefix + 'mkcp_seed', queryParam.seed || ""); } else if (queryParam.type === "grpc") { opt.set(dom_prefix + 'grpc_serviceName', (queryParam.serviceName || queryParam.path) || ""); opt.set(dom_prefix + 'grpc_mode', queryParam.mode || "gun"); @@ -1283,7 +1289,8 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin opt.set(dom_prefix + 'quic_security', ssm.securty); opt.set(dom_prefix + 'quic_key', ssm.key); } else if (ssm.net === "kcp" || ssm.net === "mkcp") { - opt.set(dom_prefix + 'mkcp_guise', ssm.type); + opt.set(dom_prefix + 'mkcp_guise', ssm.type || "none"); + opt.set(dom_prefix + 'mkcp_seed', ssm.seed || ""); } else if (ssm.net === "grpc") { opt.set(dom_prefix + 'grpc_serviceName', ssm.path); } @@ -1415,6 +1422,7 @@ local hysteria2_type = map:get("@global_subscribe[0]", "hysteria2_type") or "sin opt.set(dom_prefix + 'quic_key', queryParam.key); } else if (queryParam.type === "kcp" || queryParam.type === "mkcp") { opt.set(dom_prefix + 'mkcp_guise', queryParam.headerType || "none"); + opt.set(dom_prefix + 'mkcp_seed', queryParam.seed || ""); } else if (queryParam.type === "grpc") { opt.set(dom_prefix + 'grpc_serviceName', (queryParam.serviceName || queryParam.path) || ""); opt.set(dom_prefix + 'grpc_mode', queryParam.mode || "gun"); diff --git a/small/luci-app-passwall/po/zh-cn/passwall.po b/small/luci-app-passwall/po/zh-cn/passwall.po index 0ffa338b81..1f0d6b6d65 100644 --- a/small/luci-app-passwall/po/zh-cn/passwall.po +++ b/small/luci-app-passwall/po/zh-cn/passwall.po @@ -1315,6 +1315,9 @@ msgstr "本地地址" msgid "Decimal numbers separated by \",\" or Base64-encoded strings." msgstr "用“,”隔开的十进制数字或 Base64 编码字符串。" +msgid "Camouflage Domain" +msgstr "伪装域名" + msgid "Camouflage Type" msgstr "伪装类型" @@ -1393,8 +1396,11 @@ msgstr "TUIC socks5 服务器可以从外部接收的最大数据包大小(以 msgid "Set if the listening socket should be dual-stack" msgstr "设置监听套接字为双栈" -msgid "
none: default, no masquerade, data sent is packets with no characteristics.
srtp: disguised as an SRTP packet, it will be recognized as video call data (such as FaceTime).
utp: packets disguised as uTP will be recognized as bittorrent downloaded data.
wechat-video: packets disguised as WeChat video calls.
dtls: disguised as DTLS 1.2 packet.
wireguard: disguised as a WireGuard packet. (not really WireGuard protocol)" -msgstr "
none:默认值,不进行伪装,发送的数据是没有特征的数据包。
srtp:伪装成 SRTP 数据包,会被识别为视频通话数据(如 FaceTime)。
utp:伪装成 uTP 数据包,会被识别为 BT 下载数据。
wechat-video:伪装成微信视频通话的数据包。
dtls:伪装成 DTLS 1.2 数据包。
wireguard:伪装成 WireGuard 数据包。(并不是真正的 WireGuard 协议)" +msgid "
none: default, no masquerade, data sent is packets with no characteristics.
srtp: disguised as an SRTP packet, it will be recognized as video call data (such as FaceTime).
utp: packets disguised as uTP will be recognized as bittorrent downloaded data.
wechat-video: packets disguised as WeChat video calls.
dtls: disguised as DTLS 1.2 packet.
wireguard: disguised as a WireGuard packet. (not really WireGuard protocol)
dns: Disguising traffic as DNS requests." +msgstr "
none:默认值,不进行伪装,发送的数据是没有特征的数据包。
srtp:伪装成 SRTP 数据包,会被识别为视频通话数据(如 FaceTime)。
utp:伪装成 uTP 数据包,会被识别为 BT 下载数据。
wechat-video:伪装成微信视频通话的数据包。
dtls:伪装成 DTLS 1.2 数据包。
wireguard:伪装成 WireGuard 数据包。(并不是真正的 WireGuard 协议)
dns:把流量伪装成 DNS 请求。" + +msgid "Use it together with the DNS disguised type. You can fill in any domain." +msgstr "配合伪装类型 DNS 使用,可随便填一个域名。" msgid "A legal file path. This file must not exist before running." msgstr "一个合法的文件路径。在运行之前,这个文件必须不存在。" diff --git a/small/luci-app-ssr-plus/Makefile b/small/luci-app-ssr-plus/Makefile index cdda85709d..2aa3cf84cf 100644 --- a/small/luci-app-ssr-plus/Makefile +++ b/small/luci-app-ssr-plus/Makefile @@ -71,6 +71,7 @@ LUCI_DEPENDS:= \ define Package/$(PKG_NAME)/config select PACKAGE_luci-lib-ipkg if PACKAGE_$(PKG_NAME) +select PACKAGE_luci-lua-runtime if PACKAGE_$(PKG_NAME) choice prompt "Shadowsocks Client Selection" diff --git a/small/v2ray-geodata/Makefile b/small/v2ray-geodata/Makefile index 7402c18d9c..106286fe51 100644 --- a/small/v2ray-geodata/Makefile +++ b/small/v2ray-geodata/Makefile @@ -30,7 +30,7 @@ define Download/geosite HASH:=01dae2a9c31b5c74ba7e54d8d51e0060688ed22da493eaf09f6eeeec89db395e endef -GEOSITE_IRAN_VER:=202507070044 +GEOSITE_IRAN_VER:=202507140045 GEOSITE_IRAN_FILE:=iran.dat.$(GEOSITE_IRAN_VER) define Download/geosite-ir URL:=https://github.com/bootmortis/iran-hosted-domains/releases/download/$(GEOSITE_IRAN_VER)/ diff --git a/v2rayn/v2rayN/Directory.Build.props b/v2rayn/v2rayN/Directory.Build.props index 4e1f44067c..43a84d2d80 100644 --- a/v2rayn/v2rayN/Directory.Build.props +++ b/v2rayn/v2rayN/Directory.Build.props @@ -1,7 +1,7 @@ - 7.12.7 + 7.13.1 diff --git a/v2rayn/v2rayN/Directory.Packages.props b/v2rayn/v2rayN/Directory.Packages.props index 4747ed4617..e7523edac8 100644 --- a/v2rayn/v2rayN/Directory.Packages.props +++ b/v2rayn/v2rayN/Directory.Packages.props @@ -18,13 +18,13 @@ - - + + - + - \ No newline at end of file + diff --git a/v2rayn/v2rayN/ServiceLib/Services/CoreConfig/CoreConfigSingboxService.cs b/v2rayn/v2rayN/ServiceLib/Services/CoreConfig/CoreConfigSingboxService.cs index a48879681d..44d45ab21d 100644 --- a/v2rayn/v2rayN/ServiceLib/Services/CoreConfig/CoreConfigSingboxService.cs +++ b/v2rayn/v2rayN/ServiceLib/Services/CoreConfig/CoreConfigSingboxService.cs @@ -1369,7 +1369,7 @@ public class CoreConfigSingboxService private async Task GenRoutingUserRuleOutbound(string outboundTag, SingboxConfig singboxConfig) { - if (outboundTag is Global.ProxyTag or Global.ProxyTag or Global.BlockTag) + if (outboundTag is Global.ProxyTag or Global.DirectTag or Global.BlockTag) { return outboundTag; } diff --git a/v2rayn/v2rayN/ServiceLib/Services/CoreConfig/CoreConfigV2rayService.cs b/v2rayn/v2rayN/ServiceLib/Services/CoreConfig/CoreConfigV2rayService.cs index f008cc14f5..73e638d059 100644 --- a/v2rayn/v2rayN/ServiceLib/Services/CoreConfig/CoreConfigV2rayService.cs +++ b/v2rayn/v2rayN/ServiceLib/Services/CoreConfig/CoreConfigV2rayService.cs @@ -631,7 +631,7 @@ public class CoreConfigV2rayService private async Task GenRoutingUserRuleOutbound(string outboundTag, V2rayConfig v2rayConfig) { - if (outboundTag is Global.ProxyTag or Global.ProxyTag or Global.BlockTag) + if (outboundTag is Global.ProxyTag or Global.DirectTag or Global.BlockTag) { return outboundTag; } diff --git a/yt-dlp/yt_dlp/extractor/_extractors.py b/yt-dlp/yt_dlp/extractor/_extractors.py index 18a3cac54b..0a00db437e 100644 --- a/yt-dlp/yt_dlp/extractor/_extractors.py +++ b/yt-dlp/yt_dlp/extractor/_extractors.py @@ -201,7 +201,6 @@ from .banbye import ( BanByeChannelIE, BanByeIE, ) -from .bandaichannel import BandaiChannelIE from .bandcamp import ( BandcampAlbumIE, BandcampIE, @@ -229,7 +228,6 @@ from .beatbump import ( from .beatport import BeatportIE from .beeg import BeegIE from .behindkink import BehindKinkIE -from .bellmedia import BellMediaIE from .berufetv import BerufeTVIE from .bet import BetIE from .bfi import BFIPlayerIE @@ -447,7 +445,6 @@ from .cspan import ( CSpanIE, ) from .ctsnews import CtsNewsIE -from .ctv import CTVIE from .ctvnews import CTVNewsIE from .cultureunplugged import CultureUnpluggedIE from .curiositystream import ( @@ -929,7 +926,6 @@ from .jiosaavn import ( JioSaavnSongIE, ) from .joj import JojIE -from .joqrag import JoqrAgIE from .jove import JoveIE from .jstream import JStreamIE from .jtbc import ( @@ -1032,11 +1028,6 @@ from .likee import ( LikeeIE, LikeeUserIE, ) -from .limelight import ( - LimelightChannelIE, - LimelightChannelListIE, - LimelightMediaIE, -) from .linkedin import ( LinkedInEventsIE, LinkedInIE, @@ -1383,7 +1374,6 @@ from .nobelprize import NobelPrizeIE from .noice import NoicePodcastIE from .nonktube import NonkTubeIE from .noodlemagazine import NoodleMagazineIE -from .noovo import NoovoIE from .nosnl import NOSNLArticleIE from .nova import ( NovaEmbedIE, @@ -2290,6 +2280,7 @@ from .uliza import ( ) from .umg import UMGDeIE from .unistra import UnistraIE +from .unitednations import UnitedNationsWebTvIE from .unity import UnityIE from .unsupported import ( KnownDRMIE, diff --git a/yt-dlp/yt_dlp/extractor/bandaichannel.py b/yt-dlp/yt_dlp/extractor/bandaichannel.py deleted file mode 100644 index d7fcf44bd9..0000000000 --- a/yt-dlp/yt_dlp/extractor/bandaichannel.py +++ /dev/null @@ -1,33 +0,0 @@ -from .brightcove import BrightcoveNewBaseIE -from ..utils import extract_attributes - - -class BandaiChannelIE(BrightcoveNewBaseIE): - IE_NAME = 'bandaichannel' - _VALID_URL = r'https?://(?:www\.)?b-ch\.com/titles/(?P\d+/\d+)' - _TESTS = [{ - 'url': 'https://www.b-ch.com/titles/514/001', - 'md5': 'a0f2d787baa5729bed71108257f613a4', - 'info_dict': { - 'id': '6128044564001', - 'ext': 'mp4', - 'title': 'メタルファイターMIKU 第1話', - 'timestamp': 1580354056, - 'uploader_id': '5797077852001', - 'upload_date': '20200130', - 'duration': 1387.733, - }, - 'params': { - 'skip_download': True, - }, - }] - - def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - attrs = extract_attributes(self._search_regex( - r'(]+\bid="bcplayer"[^>]*>)', webpage, 'player')) - bc = self._download_json( - 'https://pbifcd.b-ch.com/v1/playbackinfo/ST/70/' + attrs['data-info'], - video_id, headers={'X-API-KEY': attrs['data-auth'].strip()})['bc'] - return self._parse_brightcove_metadata(bc, bc['id']) diff --git a/yt-dlp/yt_dlp/extractor/bellmedia.py b/yt-dlp/yt_dlp/extractor/bellmedia.py deleted file mode 100644 index ac45dd4779..0000000000 --- a/yt-dlp/yt_dlp/extractor/bellmedia.py +++ /dev/null @@ -1,91 +0,0 @@ -from .common import InfoExtractor - - -class BellMediaIE(InfoExtractor): - _VALID_URL = r'''(?x)https?://(?:www\.)? - (?P - (?: - ctv| - tsn| - bnn(?:bloomberg)?| - thecomedynetwork| - discovery| - discoveryvelocity| - sciencechannel| - investigationdiscovery| - animalplanet| - bravo| - mtv| - space| - etalk| - marilyn - )\.ca| - (?:much|cp24)\.com - )/.*?(?:\b(?:vid(?:eoid)?|clipId)=|-vid|~|%7E|/(?:episode)?)(?P[0-9]{6,})''' - _TESTS = [{ - 'url': 'https://www.bnnbloomberg.ca/video/david-cockfield-s-top-picks~1403070', - 'md5': '3e5b8e38370741d5089da79161646635', - 'info_dict': { - 'id': '1403070', - 'ext': 'flv', - 'title': 'David Cockfield\'s Top Picks', - 'description': 'md5:810f7f8c6a83ad5b48677c3f8e5bb2c3', - 'upload_date': '20180525', - 'timestamp': 1527288600, - 'season_id': '73997', - 'season': '2018', - 'thumbnail': 'http://images2.9c9media.com/image_asset/2018_5_25_baf30cbd-b28d-4a18-9903-4bb8713b00f5_PNG_956x536.jpg', - 'tags': [], - 'categories': ['ETFs'], - 'season_number': 8, - 'duration': 272.038, - 'series': 'Market Call Tonight', - }, - }, { - 'url': 'http://www.thecomedynetwork.ca/video/player?vid=923582', - 'only_matching': True, - }, { - 'url': 'http://www.tsn.ca/video/expectations-high-for-milos-raonic-at-us-open~939549', - 'only_matching': True, - }, { - 'url': 'http://www.bnn.ca/video/berman-s-call-part-two-viewer-questions~939654', - 'only_matching': True, - }, { - 'url': 'http://www.ctv.ca/YourMorning/Video/S1E6-Monday-August-29-2016-vid938009', - 'only_matching': True, - }, { - 'url': 'http://www.much.com/shows/atmidnight/episode948007/tuesday-september-13-2016', - 'only_matching': True, - }, { - 'url': 'http://www.much.com/shows/the-almost-impossible-gameshow/928979/episode-6', - 'only_matching': True, - }, { - 'url': 'http://www.ctv.ca/DCs-Legends-of-Tomorrow/Video/S2E11-Turncoat-vid1051430', - 'only_matching': True, - }, { - 'url': 'http://www.etalk.ca/video?videoid=663455', - 'only_matching': True, - }, { - 'url': 'https://www.cp24.com/video?clipId=1982548', - 'only_matching': True, - }] - _DOMAINS = { - 'thecomedynetwork': 'comedy', - 'discoveryvelocity': 'discvel', - 'sciencechannel': 'discsci', - 'investigationdiscovery': 'invdisc', - 'animalplanet': 'aniplan', - 'etalk': 'ctv', - 'bnnbloomberg': 'bnn', - 'marilyn': 'ctv_marilyn', - } - - def _real_extract(self, url): - domain, video_id = self._match_valid_url(url).groups() - domain = domain.split('.')[0] - return { - '_type': 'url_transparent', - 'id': video_id, - 'url': f'9c9media:{self._DOMAINS.get(domain, domain)}_web:{video_id}', - 'ie_key': 'NineCNineMedia', - } diff --git a/yt-dlp/yt_dlp/extractor/ctv.py b/yt-dlp/yt_dlp/extractor/ctv.py deleted file mode 100644 index a41dab11b1..0000000000 --- a/yt-dlp/yt_dlp/extractor/ctv.py +++ /dev/null @@ -1,49 +0,0 @@ -from .common import InfoExtractor - - -class CTVIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?ctv\.ca/(?P(?:show|movie)s/[^/]+/[^/?#&]+)' - _TESTS = [{ - 'url': 'https://www.ctv.ca/shows/your-morning/wednesday-december-23-2020-s5e88', - 'info_dict': { - 'id': '2102249', - 'ext': 'flv', - 'title': 'Wednesday, December 23, 2020', - 'thumbnail': r're:^https?://.*\.jpg$', - 'description': 'Your Morning delivers original perspectives and unique insights into the headlines of the day.', - 'timestamp': 1608732000, - 'upload_date': '20201223', - 'series': 'Your Morning', - 'season': '2020-2021', - 'season_number': 5, - 'episode_number': 88, - 'tags': ['Your Morning'], - 'categories': ['Talk Show'], - 'duration': 7467.126, - }, - }, { - 'url': 'https://www.ctv.ca/movies/adam-sandlers-eight-crazy-nights/adam-sandlers-eight-crazy-nights', - 'only_matching': True, - }] - - def _real_extract(self, url): - display_id = self._match_id(url) - content = self._download_json( - 'https://www.ctv.ca/space-graphql/graphql', display_id, query={ - 'query': '''{ - resolvedPath(path: "/%s") { - lastSegment { - content { - ... on AxisContent { - axisId - videoPlayerDestCode - } - } - } - } -}''' % display_id, # noqa: UP031 - })['data']['resolvedPath']['lastSegment']['content'] - video_id = content['axisId'] - return self.url_result( - '9c9media:{}:{}'.format(content['videoPlayerDestCode'], video_id), - 'NineCNineMedia', video_id) diff --git a/yt-dlp/yt_dlp/extractor/generic.py b/yt-dlp/yt_dlp/extractor/generic.py index 721d04e317..d9a666f991 100644 --- a/yt-dlp/yt_dlp/extractor/generic.py +++ b/yt-dlp/yt_dlp/extractor/generic.py @@ -1481,30 +1481,6 @@ class GenericIE(InfoExtractor): }, 'add_ie': ['SenateISVP'], }, - { - # Limelight embeds (1 channel embed + 4 media embeds) - 'url': 'http://www.sedona.com/FacilitatorTraining2017', - 'info_dict': { - 'id': 'FacilitatorTraining2017', - 'title': 'Facilitator Training 2017', - }, - 'playlist_mincount': 5, - }, - { - # Limelight embed (LimelightPlayerUtil.embed) - 'url': 'https://tv5.ca/videos?v=xuu8qowr291ri', - 'info_dict': { - 'id': '95d035dc5c8a401588e9c0e6bd1e9c92', - 'ext': 'mp4', - 'title': '07448641', - 'timestamp': 1499890639, - 'upload_date': '20170712', - }, - 'params': { - 'skip_download': True, - }, - 'add_ie': ['LimelightMedia'], - }, { 'url': 'http://kron4.com/2017/04/28/standoff-with-walnut-creek-murder-suspect-ends-with-arrest/', 'info_dict': { diff --git a/yt-dlp/yt_dlp/extractor/hotstar.py b/yt-dlp/yt_dlp/extractor/hotstar.py index 891bcc8731..f10aab27a3 100644 --- a/yt-dlp/yt_dlp/extractor/hotstar.py +++ b/yt-dlp/yt_dlp/extractor/hotstar.py @@ -12,8 +12,11 @@ from ..utils import ( ExtractorError, OnDemandPagedList, determine_ext, + filter_dict, int_or_none, join_nonempty, + jwt_decode_hs256, + parse_iso8601, str_or_none, url_or_none, ) @@ -21,35 +24,48 @@ from ..utils.traversal import require, traverse_obj class HotStarBaseIE(InfoExtractor): + _TOKEN_NAME = 'userUP' _BASE_URL = 'https://www.hotstar.com' _API_URL = 'https://api.hotstar.com' _API_URL_V2 = 'https://apix.hotstar.com/v2' _AKAMAI_ENCRYPTION_KEY = b'\x05\xfc\x1a\x01\xca\xc9\x4b\xc4\x12\xfc\x53\x12\x07\x75\xf9\xee' + _FREE_HEADERS = { + 'user-agent': 'Hotstar;in.startv.hotstar/25.06.30.0.11580 (Android/12)', + 'x-hs-client': 'platform:android;app_id:in.startv.hotstar;app_version:25.06.30.0;os:Android;os_version:12;schema_version:0.0.1523', + 'x-hs-platform': 'android', + } + _SUB_HEADERS = { + 'user-agent': 'Disney+;in.startv.hotstar.dplus.tv/23.08.14.4.2915 (Android/13)', + 'x-hs-client': 'platform:androidtv;app_id:in.startv.hotstar.dplus.tv;app_version:23.08.14.4;os:Android;os_version:13;schema_version:0.0.970', + 'x-hs-platform': 'androidtv', + } + + def _has_active_subscription(self, cookies, server_time): + expiry = traverse_obj(cookies, ( + self._TOKEN_NAME, 'value', {jwt_decode_hs256}, 'sub', {json.loads}, + 'subscriptions', 'in', ..., 'expiry', {parse_iso8601}, all, {max})) or 0 + return expiry > server_time + def _call_api_v1(self, path, *args, **kwargs): return self._download_json( f'{self._API_URL}/o/v1/{path}', *args, **kwargs, headers={'x-country-code': 'IN', 'x-platform-code': 'PCTV'}) - def _call_api_impl(self, path, video_id, query, st=None, cookies=None): - if not cookies or not cookies.get('userUP'): - self.raise_login_required() - + def _call_api_impl(self, path, video_id, query, cookies=None, st=None): st = int_or_none(st) or int(time.time()) exp = st + 6000 auth = f'st={st}~exp={exp}~acl=/*' auth += '~hmac=' + hmac.new(self._AKAMAI_ENCRYPTION_KEY, auth.encode(), hashlib.sha256).hexdigest() response = self._download_json( f'{self._API_URL_V2}/{path}', video_id, query=query, - headers={ - 'user-agent': 'Disney+;in.startv.hotstar.dplus.tv/23.08.14.4.2915 (Android/13)', + headers=filter_dict({ + **(self._SUB_HEADERS if self._has_active_subscription(cookies, st) else self._FREE_HEADERS), 'hotstarauth': auth, - 'x-hs-usertoken': cookies['userUP'].value, + 'x-hs-usertoken': traverse_obj(cookies, (self._TOKEN_NAME, 'value')), 'x-hs-device-id': traverse_obj(cookies, ('deviceId', 'value')) or str(uuid.uuid4()), - 'x-hs-client': 'platform:androidtv;app_id:in.startv.hotstar.dplus.tv;app_version:23.08.14.4;os:Android;os_version:13;schema_version:0.0.970', - 'x-hs-platform': 'androidtv', 'content-type': 'application/json', - }) + })) if not traverse_obj(response, ('success', {dict})): raise ExtractorError('API call was unsuccessful') @@ -61,21 +77,22 @@ class HotStarBaseIE(InfoExtractor): 'filters': f'content_type={content_type}', 'client_capabilities': json.dumps({ 'package': ['dash', 'hls'], - 'container': ['fmp4br', 'fmp4'], + 'container': ['fmp4', 'fmp4br', 'ts'], 'ads': ['non_ssai', 'ssai'], - 'audio_channel': ['atmos', 'dolby51', 'stereo'], + 'audio_channel': ['stereo', 'dolby51', 'atmos'], 'encryption': ['plain', 'widevine'], # wv only so we can raise appropriate error - 'video_codec': ['h265', 'h264'], - 'ladder': ['tv', 'full'], - 'resolution': ['4k', 'hd'], - 'true_resolution': ['4k', 'hd'], - 'dynamic_range': ['hdr', 'sdr'], + 'video_codec': ['h264', 'h265'], + 'video_codec_non_secure': ['h264', 'h265', 'vp9'], + 'ladder': ['phone', 'tv', 'full'], + 'resolution': ['hd', '4k'], + 'true_resolution': ['hd', '4k'], + 'dynamic_range': ['sdr', 'hdr'], }, separators=(',', ':')), 'drm_parameters': json.dumps({ 'widevine_security_level': ['SW_SECURE_DECODE', 'SW_SECURE_CRYPTO'], 'hdcp_version': ['HDCP_V2_2', 'HDCP_V2_1', 'HDCP_V2', 'HDCP_V1'], }, separators=(',', ':')), - }, st=st, cookies=cookies) + }, cookies=cookies, st=st) @staticmethod def _parse_metadata_v1(video_data): @@ -274,6 +291,8 @@ class HotStarIE(HotStarBaseIE): video_id, video_type = self._match_valid_url(url).group('id', 'type') video_type = self._TYPE[video_type] cookies = self._get_cookies(url) # Cookies before any request + if not cookies or not cookies.get(self._TOKEN_NAME): + self.raise_login_required() video_data = traverse_obj( self._call_api_v1(f'{video_type}/detail', video_id, fatal=False, query={ @@ -292,7 +311,7 @@ class HotStarIE(HotStarBaseIE): # See https://github.com/yt-dlp/yt-dlp/issues/396 st = self._request_webpage( f'{self._BASE_URL}/in', video_id, 'Fetching server time').get_header('x-origin-date') - watch = self._call_api_v2('pages/watch', video_id, content_type, cookies=cookies, st=st) + watch = self._call_api_v2('pages/watch', video_id, content_type, cookies, st) player_config = traverse_obj(watch, ( 'page', 'spaces', 'player', 'widget_wrappers', lambda _, v: v['template'] == 'PlayerWidget', 'widget', 'data', 'player_config', {dict}, any, {require('player config')})) diff --git a/yt-dlp/yt_dlp/extractor/joqrag.py b/yt-dlp/yt_dlp/extractor/joqrag.py deleted file mode 100644 index 7a91d4a235..0000000000 --- a/yt-dlp/yt_dlp/extractor/joqrag.py +++ /dev/null @@ -1,112 +0,0 @@ -import datetime as dt -import urllib.parse - -from .common import InfoExtractor -from ..utils import ( - clean_html, - datetime_from_str, - unified_timestamp, - urljoin, -) - - -class JoqrAgIE(InfoExtractor): - IE_DESC = '超!A&G+ 文化放送 (f.k.a. AGQR) Nippon Cultural Broadcasting, Inc. (JOQR)' - _VALID_URL = [r'https?://www\.uniqueradio\.jp/agplayer5/(?:player|inc-player-hls)\.php', - r'https?://(?:www\.)?joqr\.co\.jp/ag/', - r'https?://(?:www\.)?joqr\.co\.jp/qr/ag(?:daily|regular)program/?(?:$|[#?])'] - _TESTS = [{ - 'url': 'https://www.uniqueradio.jp/agplayer5/player.php', - 'info_dict': { - 'id': 'live', - 'title': str, - 'channel': '超!A&G+', - 'description': str, - 'live_status': 'is_live', - 'release_timestamp': int, - }, - 'params': { - 'skip_download': True, - 'ignore_no_formats_error': True, - }, - }, { - 'url': 'https://www.uniqueradio.jp/agplayer5/inc-player-hls.php', - 'only_matching': True, - }, { - 'url': 'https://www.joqr.co.jp/ag/article/103760/', - 'only_matching': True, - }, { - 'url': 'http://www.joqr.co.jp/qr/agdailyprogram/', - 'only_matching': True, - }, { - 'url': 'http://www.joqr.co.jp/qr/agregularprogram/', - 'only_matching': True, - }] - - def _extract_metadata(self, variable, html): - return clean_html(urllib.parse.unquote_plus(self._search_regex( - rf'var\s+{variable}\s*=\s*(["\'])(?P(?:(?!\1).)+)\1', - html, 'metadata', group='value', default=''))) or None - - def _extract_start_timestamp(self, video_id, is_live): - def extract_start_time_from(date_str): - dt_ = datetime_from_str(date_str) + dt.timedelta(hours=9) - date = dt_.strftime('%Y%m%d') - start_time = self._search_regex( - r']+\bclass="dailyProgram-itemHeaderTime"[^>]*>[\s\d:]+–\s*(\d{1,2}:\d{1,2})', - self._download_webpage( - f'https://www.joqr.co.jp/qr/agdailyprogram/?date={date}', video_id, - note=f'Downloading program list of {date}', fatal=False, - errnote=f'Failed to download program list of {date}') or '', - 'start time', default=None) - if start_time: - return unified_timestamp(f'{dt_.strftime("%Y/%m/%d")} {start_time} +09:00') - return None - - start_timestamp = extract_start_time_from('today') - if not start_timestamp: - return None - - if not is_live or start_timestamp < datetime_from_str('now').timestamp(): - return start_timestamp - else: - return extract_start_time_from('yesterday') - - def _real_extract(self, url): - video_id = 'live' - - metadata = self._download_webpage( - 'https://www.uniqueradio.jp/aandg', video_id, - note='Downloading metadata', errnote='Failed to download metadata') - title = self._extract_metadata('Program_name', metadata) - - if not title or title == '放送休止': - formats = [] - live_status = 'is_upcoming' - release_timestamp = self._extract_start_timestamp(video_id, False) - msg = 'This stream is not currently live' - if release_timestamp: - msg += (' and will start at ' - + dt.datetime.fromtimestamp(release_timestamp).strftime('%Y-%m-%d %H:%M:%S')) - self.raise_no_formats(msg, expected=True) - else: - m3u8_path = self._search_regex( - r']*\bsrc="([^"]+)"', - self._download_webpage( - 'https://www.uniqueradio.jp/agplayer5/inc-player-hls.php', video_id, - note='Downloading player data', errnote='Failed to download player data'), - 'm3u8 url') - formats = self._extract_m3u8_formats( - urljoin('https://www.uniqueradio.jp/', m3u8_path), video_id) - live_status = 'is_live' - release_timestamp = self._extract_start_timestamp(video_id, True) - - return { - 'id': video_id, - 'title': title, - 'channel': '超!A&G+', - 'description': self._extract_metadata('Program_text', metadata), - 'formats': formats, - 'live_status': live_status, - 'release_timestamp': release_timestamp, - } diff --git a/yt-dlp/yt_dlp/extractor/limelight.py b/yt-dlp/yt_dlp/extractor/limelight.py deleted file mode 100644 index 763a01448c..0000000000 --- a/yt-dlp/yt_dlp/extractor/limelight.py +++ /dev/null @@ -1,358 +0,0 @@ -import re - -from .common import InfoExtractor -from ..networking.exceptions import HTTPError -from ..utils import ( - ExtractorError, - determine_ext, - float_or_none, - int_or_none, - smuggle_url, - try_get, - unsmuggle_url, -) - - -class LimelightBaseIE(InfoExtractor): - _PLAYLIST_SERVICE_URL = 'http://production-ps.lvp.llnw.net/r/PlaylistService/%s/%s/%s' - - @classmethod - def _extract_embed_urls(cls, url, webpage): - lm = { - 'Media': 'media', - 'Channel': 'channel', - 'ChannelList': 'channel_list', - } - - def smuggle(url): - return smuggle_url(url, {'source_url': url}) - - entries = [] - for kind, video_id in re.findall( - r'LimelightPlayer\.doLoad(Media|Channel|ChannelList)\(["\'](?P[a-z0-9]{32})', - webpage): - entries.append(cls.url_result( - smuggle(f'limelight:{lm[kind]}:{video_id}'), - f'Limelight{kind}', video_id)) - for mobj in re.finditer( - # As per [1] class attribute should be exactly equal to - # LimelightEmbeddedPlayerFlash but numerous examples seen - # that don't exactly match it (e.g. [2]). - # 1. http://support.3playmedia.com/hc/en-us/articles/227732408-Limelight-Embedding-the-Captions-Plugin-with-the-Limelight-Player-on-Your-Webpage - # 2. http://www.sedona.com/FacilitatorTraining2017 - r'''(?sx) - ]+class=(["\'])(?:(?!\1).)*\bLimelightEmbeddedPlayerFlash\b(?:(?!\1).)*\1[^>]*>.*? - ]+ - name=(["\'])flashVars\2[^>]+ - value=(["\'])(?:(?!\3).)*(?Pmedia|channel(?:List)?)Id=(?P[a-z0-9]{32}) - ''', webpage): - kind, video_id = mobj.group('kind'), mobj.group('id') - entries.append(cls.url_result( - smuggle(f'limelight:{kind}:{video_id}'), - f'Limelight{kind.capitalize()}', video_id)) - # http://support.3playmedia.com/hc/en-us/articles/115009517327-Limelight-Embedding-the-Audio-Description-Plugin-with-the-Limelight-Player-on-Your-Web-Page) - for video_id in re.findall( - r'(?s)LimelightPlayerUtil\.embed\s*\(\s*{.*?\bmediaId["\']\s*:\s*["\'](?P[a-z0-9]{32})', - webpage): - entries.append(cls.url_result( - smuggle(f'limelight:media:{video_id}'), - LimelightMediaIE.ie_key(), video_id)) - return entries - - def _call_playlist_service(self, item_id, method, fatal=True, referer=None): - headers = {} - if referer: - headers['Referer'] = referer - try: - return self._download_json( - self._PLAYLIST_SERVICE_URL % (self._PLAYLIST_SERVICE_PATH, item_id, method), - item_id, f'Downloading PlaylistService {method} JSON', - fatal=fatal, headers=headers) - except ExtractorError as e: - if isinstance(e.cause, HTTPError) and e.cause.status == 403: - error = self._parse_json(e.cause.response.read().decode(), item_id)['detail']['contentAccessPermission'] - if error == 'CountryDisabled': - self.raise_geo_restricted() - raise ExtractorError(error, expected=True) - raise - - def _extract(self, item_id, pc_method, mobile_method, referer=None): - pc = self._call_playlist_service(item_id, pc_method, referer=referer) - mobile = self._call_playlist_service( - item_id, mobile_method, fatal=False, referer=referer) - return pc, mobile - - def _extract_info(self, pc, mobile, i, referer): - get_item = lambda x, y: try_get(x, lambda x: x[y][i], dict) or {} - pc_item = get_item(pc, 'playlistItems') - mobile_item = get_item(mobile, 'mediaList') - video_id = pc_item.get('mediaId') or mobile_item['mediaId'] - title = pc_item.get('title') or mobile_item['title'] - - formats = [] - urls = [] - for stream in pc_item.get('streams', []): - stream_url = stream.get('url') - if not stream_url or stream_url in urls: - continue - if not self.get_param('allow_unplayable_formats') and stream.get('drmProtected'): - continue - urls.append(stream_url) - ext = determine_ext(stream_url) - if ext == 'f4m': - formats.extend(self._extract_f4m_formats( - stream_url, video_id, f4m_id='hds', fatal=False)) - else: - fmt = { - 'url': stream_url, - 'abr': float_or_none(stream.get('audioBitRate')), - 'fps': float_or_none(stream.get('videoFrameRate')), - 'ext': ext, - } - width = int_or_none(stream.get('videoWidthInPixels')) - height = int_or_none(stream.get('videoHeightInPixels')) - vbr = float_or_none(stream.get('videoBitRate')) - if width or height or vbr: - fmt.update({ - 'width': width, - 'height': height, - 'vbr': vbr, - }) - else: - fmt['vcodec'] = 'none' - rtmp = re.search(r'^(?Prtmpe?://(?P[^/]+)/(?P.+))/(?Pmp[34]:.+)$', stream_url) - if rtmp: - format_id = 'rtmp' - if stream.get('videoBitRate'): - format_id += '-%d' % int_or_none(stream['videoBitRate']) - http_format_id = format_id.replace('rtmp', 'http') - - CDN_HOSTS = ( - ('delvenetworks.com', 'cpl.delvenetworks.com'), - ('video.llnw.net', 's2.content.video.llnw.net'), - ) - for cdn_host, http_host in CDN_HOSTS: - if cdn_host not in rtmp.group('host').lower(): - continue - http_url = 'http://{}/{}'.format(http_host, rtmp.group('playpath')[4:]) - urls.append(http_url) - if self._is_valid_url(http_url, video_id, http_format_id): - http_fmt = fmt.copy() - http_fmt.update({ - 'url': http_url, - 'format_id': http_format_id, - }) - formats.append(http_fmt) - break - - fmt.update({ - 'url': rtmp.group('url'), - 'play_path': rtmp.group('playpath'), - 'app': rtmp.group('app'), - 'ext': 'flv', - 'format_id': format_id, - }) - formats.append(fmt) - - for mobile_url in mobile_item.get('mobileUrls', []): - media_url = mobile_url.get('mobileUrl') - format_id = mobile_url.get('targetMediaPlatform') - if not media_url or media_url in urls: - continue - if (format_id in ('Widevine', 'SmoothStreaming') - and not self.get_param('allow_unplayable_formats', False)): - continue - urls.append(media_url) - ext = determine_ext(media_url) - if ext == 'm3u8': - formats.extend(self._extract_m3u8_formats( - media_url, video_id, 'mp4', 'm3u8_native', - m3u8_id=format_id, fatal=False)) - elif ext == 'f4m': - formats.extend(self._extract_f4m_formats( - stream_url, video_id, f4m_id=format_id, fatal=False)) - else: - formats.append({ - 'url': media_url, - 'format_id': format_id, - 'quality': -10, - 'ext': ext, - }) - - subtitles = {} - for flag in mobile_item.get('flags'): - if flag == 'ClosedCaptions': - closed_captions = self._call_playlist_service( - video_id, 'getClosedCaptionsDetailsByMediaId', - False, referer) or [] - for cc in closed_captions: - cc_url = cc.get('webvttFileUrl') - if not cc_url: - continue - lang = cc.get('languageCode') or self._search_regex(r'/([a-z]{2})\.vtt', cc_url, 'lang', default='en') - subtitles.setdefault(lang, []).append({ - 'url': cc_url, - }) - break - - get_meta = lambda x: pc_item.get(x) or mobile_item.get(x) - - return { - 'id': video_id, - 'title': title, - 'description': get_meta('description'), - 'formats': formats, - 'duration': float_or_none(get_meta('durationInMilliseconds'), 1000), - 'thumbnail': get_meta('previewImageUrl') or get_meta('thumbnailImageUrl'), - 'subtitles': subtitles, - } - - -class LimelightMediaIE(LimelightBaseIE): - IE_NAME = 'limelight' - _VALID_URL = r'''(?x) - (?: - limelight:media:| - https?:// - (?: - link\.videoplatform\.limelight\.com/media/| - assets\.delvenetworks\.com/player/loader\.swf - ) - \?.*?\bmediaId= - ) - (?P[a-z0-9]{32}) - ''' - _TESTS = [{ - 'url': 'http://link.videoplatform.limelight.com/media/?mediaId=3ffd040b522b4485b6d84effc750cd86', - 'info_dict': { - 'id': '3ffd040b522b4485b6d84effc750cd86', - 'ext': 'mp4', - 'title': 'HaP and the HB Prince Trailer', - 'description': 'md5:8005b944181778e313d95c1237ddb640', - 'thumbnail': r're:^https?://.*\.jpeg$', - 'duration': 144.23, - }, - 'params': { - # m3u8 download - 'skip_download': True, - }, - }, { - # video with subtitles - 'url': 'limelight:media:a3e00274d4564ec4a9b29b9466432335', - 'md5': '2fa3bad9ac321e23860ca23bc2c69e3d', - 'info_dict': { - 'id': 'a3e00274d4564ec4a9b29b9466432335', - 'ext': 'mp4', - 'title': '3Play Media Overview Video', - 'thumbnail': r're:^https?://.*\.jpeg$', - 'duration': 78.101, - # TODO: extract all languages that were accessible via API - # 'subtitles': 'mincount:9', - 'subtitles': 'mincount:1', - }, - }, { - 'url': 'https://assets.delvenetworks.com/player/loader.swf?mediaId=8018a574f08d416e95ceaccae4ba0452', - 'only_matching': True, - }] - _PLAYLIST_SERVICE_PATH = 'media' - - def _real_extract(self, url): - url, smuggled_data = unsmuggle_url(url, {}) - video_id = self._match_id(url) - source_url = smuggled_data.get('source_url') - self._initialize_geo_bypass({ - 'countries': smuggled_data.get('geo_countries'), - }) - - pc, mobile = self._extract( - video_id, 'getPlaylistByMediaId', - 'getMobilePlaylistByMediaId', source_url) - - return self._extract_info(pc, mobile, 0, source_url) - - -class LimelightChannelIE(LimelightBaseIE): - IE_NAME = 'limelight:channel' - _VALID_URL = r'''(?x) - (?: - limelight:channel:| - https?:// - (?: - link\.videoplatform\.limelight\.com/media/| - assets\.delvenetworks\.com/player/loader\.swf - ) - \?.*?\bchannelId= - ) - (?P[a-z0-9]{32}) - ''' - _TESTS = [{ - 'url': 'http://link.videoplatform.limelight.com/media/?channelId=ab6a524c379342f9b23642917020c082', - 'info_dict': { - 'id': 'ab6a524c379342f9b23642917020c082', - 'title': 'Javascript Sample Code', - 'description': 'Javascript Sample Code - http://www.delvenetworks.com/sample-code/playerCode-demo.html', - }, - 'playlist_mincount': 3, - }, { - 'url': 'http://assets.delvenetworks.com/player/loader.swf?channelId=ab6a524c379342f9b23642917020c082', - 'only_matching': True, - }] - _PLAYLIST_SERVICE_PATH = 'channel' - - def _real_extract(self, url): - url, smuggled_data = unsmuggle_url(url, {}) - channel_id = self._match_id(url) - source_url = smuggled_data.get('source_url') - - pc, mobile = self._extract( - channel_id, 'getPlaylistByChannelId', - 'getMobilePlaylistWithNItemsByChannelId?begin=0&count=-1', - source_url) - - entries = [ - self._extract_info(pc, mobile, i, source_url) - for i in range(len(pc['playlistItems']))] - - return self.playlist_result( - entries, channel_id, pc.get('title'), mobile.get('description')) - - -class LimelightChannelListIE(LimelightBaseIE): - IE_NAME = 'limelight:channel_list' - _VALID_URL = r'''(?x) - (?: - limelight:channel_list:| - https?:// - (?: - link\.videoplatform\.limelight\.com/media/| - assets\.delvenetworks\.com/player/loader\.swf - ) - \?.*?\bchannelListId= - ) - (?P[a-z0-9]{32}) - ''' - _TESTS = [{ - 'url': 'http://link.videoplatform.limelight.com/media/?channelListId=301b117890c4465c8179ede21fd92e2b', - 'info_dict': { - 'id': '301b117890c4465c8179ede21fd92e2b', - 'title': 'Website - Hero Player', - }, - 'playlist_mincount': 2, - }, { - 'url': 'https://assets.delvenetworks.com/player/loader.swf?channelListId=301b117890c4465c8179ede21fd92e2b', - 'only_matching': True, - }] - _PLAYLIST_SERVICE_PATH = 'channel_list' - - def _real_extract(self, url): - channel_list_id = self._match_id(url) - - channel_list = self._call_playlist_service( - channel_list_id, 'getMobileChannelListById') - - entries = [ - self.url_result('limelight:channel:{}'.format(channel['id']), 'LimelightChannel') - for channel in channel_list['channelList']] - - return self.playlist_result( - entries, channel_list_id, channel_list['title']) diff --git a/yt-dlp/yt_dlp/extractor/lrt.py b/yt-dlp/yt_dlp/extractor/lrt.py index caff9125e0..34c9ece2d1 100644 --- a/yt-dlp/yt_dlp/extractor/lrt.py +++ b/yt-dlp/yt_dlp/extractor/lrt.py @@ -134,7 +134,7 @@ class LRTRadioIE(LRTBaseIE): def _real_extract(self, url): video_id, path = self._match_valid_url(url).group('id', 'path') media = self._download_json( - 'https://www.lrt.lt/radioteka/api/media', video_id, + 'https://www.lrt.lt/rest-api/media', video_id, query={'url': f'/mediateka/irasas/{video_id}/{path}'}) return { diff --git a/yt-dlp/yt_dlp/extractor/noovo.py b/yt-dlp/yt_dlp/extractor/noovo.py deleted file mode 100644 index 772d4ed9e0..0000000000 --- a/yt-dlp/yt_dlp/extractor/noovo.py +++ /dev/null @@ -1,100 +0,0 @@ -from .brightcove import BrightcoveNewIE -from .common import InfoExtractor -from ..utils import ( - int_or_none, - js_to_json, - smuggle_url, - try_get, -) - - -class NoovoIE(InfoExtractor): - _VALID_URL = r'https?://(?:[^/]+\.)?noovo\.ca/videos/(?P[^/]+/[^/?#&]+)' - _TESTS = [{ - # clip - 'url': 'http://noovo.ca/videos/rpm-plus/chrysler-imperial', - 'info_dict': { - 'id': '5386045029001', - 'ext': 'mp4', - 'title': 'Chrysler Imperial', - 'description': 'md5:de3c898d1eb810f3e6243e08c8b4a056', - 'timestamp': 1491399228, - 'upload_date': '20170405', - 'uploader_id': '618566855001', - 'series': 'RPM+', - }, - 'params': { - 'skip_download': True, - }, - }, { - # episode - 'url': 'http://noovo.ca/videos/l-amour-est-dans-le-pre/episode-13-8', - 'info_dict': { - 'id': '5395865725001', - 'title': 'Épisode 13 : Les retrouvailles', - 'description': 'md5:888c3330f0c1b4476c5bc99a1c040473', - 'ext': 'mp4', - 'timestamp': 1492019320, - 'upload_date': '20170412', - 'uploader_id': '618566855001', - 'series': "L'amour est dans le pré", - 'season_number': 5, - 'episode': 'Épisode 13', - 'episode_number': 13, - }, - 'params': { - 'skip_download': True, - }, - }] - BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/618566855001/default_default/index.html?videoId=%s' - - def _real_extract(self, url): - video_id = self._match_id(url) - - webpage = self._download_webpage(url, video_id) - - brightcove_id = self._search_regex( - r'data-video-id=["\'](\d+)', webpage, 'brightcove id') - - data = self._parse_json( - self._search_regex( - r'(?s)dataLayer\.push\(\s*({.+?})\s*\);', webpage, 'data', - default='{}'), - video_id, transform_source=js_to_json, fatal=False) - - title = try_get( - data, lambda x: x['video']['nom'], - str) or self._html_search_meta( - 'dcterms.Title', webpage, 'title', fatal=True) - - description = self._html_search_meta( - ('dcterms.Description', 'description'), webpage, 'description') - - series = try_get( - data, lambda x: x['emission']['nom']) or self._search_regex( - r']+class="banner-card__subtitle h4"[^>]*>([^<]+)', - webpage, 'series', default=None) - - season_el = try_get(data, lambda x: x['emission']['saison'], dict) or {} - season = try_get(season_el, lambda x: x['nom'], str) - season_number = int_or_none(try_get(season_el, lambda x: x['numero'])) - - episode_el = try_get(season_el, lambda x: x['episode'], dict) or {} - episode = try_get(episode_el, lambda x: x['nom'], str) - episode_number = int_or_none(try_get(episode_el, lambda x: x['numero'])) - - return { - '_type': 'url_transparent', - 'ie_key': BrightcoveNewIE.ie_key(), - 'url': smuggle_url( - self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, - {'geo_countries': ['CA']}), - 'id': brightcove_id, - 'title': title, - 'description': description, - 'series': series, - 'season': season, - 'season_number': season_number, - 'episode': episode, - 'episode_number': episode_number, - } diff --git a/yt-dlp/yt_dlp/extractor/rai.py b/yt-dlp/yt_dlp/extractor/rai.py index c489dc7312..027f7a7b6f 100644 --- a/yt-dlp/yt_dlp/extractor/rai.py +++ b/yt-dlp/yt_dlp/extractor/rai.py @@ -765,7 +765,7 @@ class RaiCulturaIE(RaiNewsIE): # XXX: Do not subclass from concrete IE class RaiSudtirolIE(RaiBaseIE): - _VALID_URL = r'https?://raisudtirol\.rai\.it/.+media=(?P\w+)' + _VALID_URL = r'https?://rai(?:bz|sudtirol)\.rai\.it/.+media=(?P\w+)' _TESTS = [{ # mp4 file 'url': 'https://raisudtirol.rai.it/la/index.php?media=Ptv1619729460', @@ -791,6 +791,9 @@ class RaiSudtirolIE(RaiBaseIE): 'formats': 'count:6', }, 'params': {'skip_download': True}, + }, { + 'url': 'https://raibz.rai.it/de/index.php?media=Ptv1751660400', + 'only_matching': True, }] def _real_extract(self, url): diff --git a/yt-dlp/yt_dlp/extractor/tfo.py b/yt-dlp/yt_dlp/extractor/tfo.py index 0d1b252175..1884ab2e8e 100644 --- a/yt-dlp/yt_dlp/extractor/tfo.py +++ b/yt-dlp/yt_dlp/extractor/tfo.py @@ -6,6 +6,7 @@ from ..utils import ExtractorError, clean_html, int_or_none class TFOIE(InfoExtractor): + _WORKING = False _GEO_COUNTRIES = ['CA'] _VALID_URL = r'https?://(?:www\.)?tfo\.org/(?:en|fr)/(?:[^/]+/){2}(?P\d+)' _TEST = { diff --git a/yt-dlp/yt_dlp/extractor/tv5unis.py b/yt-dlp/yt_dlp/extractor/tv5unis.py index 88fd334822..fe7fd0325b 100644 --- a/yt-dlp/yt_dlp/extractor/tv5unis.py +++ b/yt-dlp/yt_dlp/extractor/tv5unis.py @@ -51,6 +51,7 @@ class TV5UnisBaseIE(InfoExtractor): class TV5UnisVideoIE(TV5UnisBaseIE): + _WORKING = False IE_NAME = 'tv5unis:video' _VALID_URL = r'https?://(?:www\.)?tv5unis\.ca/videos/[^/]+/(?P\d+)' _TEST = { @@ -71,6 +72,7 @@ class TV5UnisVideoIE(TV5UnisBaseIE): class TV5UnisIE(TV5UnisBaseIE): + _WORKING = False IE_NAME = 'tv5unis' _VALID_URL = r'https?://(?:www\.)?tv5unis\.ca/videos/(?P[^/]+)(?:/saisons/(?P\d+)/episodes/(?P\d+))?/?(?:[?#&]|$)' _TESTS = [{ diff --git a/yt-dlp/yt_dlp/extractor/unitednations.py b/yt-dlp/yt_dlp/extractor/unitednations.py new file mode 100644 index 0000000000..f9283fd6c1 --- /dev/null +++ b/yt-dlp/yt_dlp/extractor/unitednations.py @@ -0,0 +1,32 @@ +from .common import InfoExtractor +from .kaltura import KalturaIE + + +class UnitedNationsWebTvIE(InfoExtractor): + _VALID_URL = r'https?://webtv\.un\.org/(?:ar|zh|en|fr|ru|es)/asset/\w+/(?P\w+)' + _TESTS = [{ + 'url': 'https://webtv.un.org/en/asset/k1o/k1o7stmi6p', + 'md5': 'b2f8b3030063298ae841b4b7ddc01477', + 'info_dict': { + 'id': '1_o7stmi6p', + 'ext': 'mp4', + 'title': 'António Guterres (Secretary-General) on Israel and Iran - Security Council, 9939th meeting', + 'thumbnail': 'http://cfvod.kaltura.com/p/2503451/sp/250345100/thumbnail/entry_id/1_o7stmi6p/version/100021', + 'uploader_id': 'evgeniia.alisova@un.org', + 'upload_date': '20250620', + 'timestamp': 1750430976, + 'duration': 234, + 'view_count': int, + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + partner_id = self._html_search_regex( + r'partnerId:\s*(\d+)', webpage, 'partner_id') + entry_id = self._html_search_regex( + r'const\s+kentryID\s*=\s*["\'](\w+)["\']', webpage, 'kentry_id') + + return self.url_result(f'kaltura:{partner_id}:{entry_id}', KalturaIE) diff --git a/yt-dlp/yt_dlp/extractor/unsupported.py b/yt-dlp/yt_dlp/extractor/unsupported.py index 31393b02a4..05ae4dd18a 100644 --- a/yt-dlp/yt_dlp/extractor/unsupported.py +++ b/yt-dlp/yt_dlp/extractor/unsupported.py @@ -53,6 +53,10 @@ class KnownDRMIE(UnsupportedInfoExtractor): r'(?:beta\.)?crunchyroll\.com', r'viki\.com', r'deezer\.com', + r'b-ch\.com', + r'ctv\.ca', + r'noovo\.ca', + r'tsn\.ca', ) _TESTS = [{ @@ -168,6 +172,18 @@ class KnownDRMIE(UnsupportedInfoExtractor): }, { 'url': 'http://www.deezer.com/playlist/176747451', 'only_matching': True, + }, { + 'url': 'https://www.b-ch.com/titles/8203/001', + 'only_matching': True, + }, { + 'url': 'https://www.ctv.ca/shows/masterchef-53506/the-audition-battles-s15e1', + 'only_matching': True, + }, { + 'url': 'https://www.noovo.ca/emissions/lamour-est-dans-le-pre/prets-pour-lamour-s10e1', + 'only_matching': True, + }, { + 'url': 'https://www.tsn.ca/video/relaxed-oilers-look-to-put-emotional-game-2-loss-in-the-rearview%7E3148747', + 'only_matching': True, }] def _real_extract(self, url): diff --git a/yt-dlp/yt_dlp/extractor/vimeo.py b/yt-dlp/yt_dlp/extractor/vimeo.py index fdd42ec94f..7ffe89f227 100644 --- a/yt-dlp/yt_dlp/extractor/vimeo.py +++ b/yt-dlp/yt_dlp/extractor/vimeo.py @@ -50,6 +50,7 @@ class VimeoBaseInfoExtractor(InfoExtractor): 'with the URL of the page that embeds this video.') _DEFAULT_CLIENT = 'android' + _DEFAULT_AUTHED_CLIENT = 'web' _CLIENT_HEADERS = { 'Accept': 'application/vnd.vimeo.*+json; version=3.4.10', 'Accept-Language': 'en', @@ -125,7 +126,14 @@ class VimeoBaseInfoExtractor(InfoExtractor): return self._viewer_info + @property + def _is_logged_in(self): + return 'vimeo' in self._get_cookies('https://vimeo.com') + def _perform_login(self, username, password): + if self._is_logged_in: + return + viewer = self._fetch_viewer_info() data = { 'action': 'login', @@ -150,7 +158,7 @@ class VimeoBaseInfoExtractor(InfoExtractor): raise ExtractorError('Unable to log in') def _real_initialize(self): - if self._LOGIN_REQUIRED and not self._get_cookies('https://vimeo.com').get('vimeo'): + if self._LOGIN_REQUIRED and not self._is_logged_in: self.raise_login_required() def _get_video_password(self): @@ -354,15 +362,22 @@ class VimeoBaseInfoExtractor(InfoExtractor): return f'Bearer {self._oauth_tokens[cache_key]}' - def _call_videos_api(self, video_id, unlisted_hash=None, path=None, *, force_client=None, query=None, **kwargs): - client = force_client or self._configuration_arg('client', [self._DEFAULT_CLIENT], ie_key=VimeoIE)[0] + def _get_requested_client(self): + default_client = self._DEFAULT_AUTHED_CLIENT if self._is_logged_in else self._DEFAULT_CLIENT + + client = self._configuration_arg('client', [default_client], ie_key=VimeoIE)[0] if client not in self._CLIENT_CONFIGS: raise ExtractorError( f'Unsupported API client "{client}" requested. ' f'Supported clients are: {", ".join(self._CLIENT_CONFIGS)}', expected=True) + return client + + def _call_videos_api(self, video_id, unlisted_hash=None, path=None, *, force_client=None, query=None, **kwargs): + client = force_client or self._get_requested_client() + client_config = self._CLIENT_CONFIGS[client] - if client_config['REQUIRES_AUTH'] and not self._get_cookies('https://vimeo.com').get('vimeo'): + if client_config['REQUIRES_AUTH'] and not self._is_logged_in: self.raise_login_required(f'The {client} client requires authentication') return self._download_json( @@ -382,7 +397,7 @@ class VimeoBaseInfoExtractor(InfoExtractor): def _extract_original_format(self, url, video_id, unlisted_hash=None): # Original/source formats are only available when logged in - if not self._get_cookies('https://vimeo.com/').get('vimeo'): + if not self._is_logged_in: return None policy = self._configuration_arg('original_format_policy', ['auto'], ie_key=VimeoIE)[0] @@ -1111,14 +1126,25 @@ class VimeoIE(VimeoBaseInfoExtractor): video = self._call_videos_api(video_id, unlisted_hash) break except ExtractorError as e: - if (not retry and isinstance(e.cause, HTTPError) and e.cause.status == 400 - and 'password' in traverse_obj( - self._webpage_read_content(e.cause.response, e.cause.response.url, video_id, fatal=False), - ({json.loads}, 'invalid_parameters', ..., 'field'), - )): + if not isinstance(e.cause, HTTPError): + raise + response = traverse_obj( + self._webpage_read_content(e.cause.response, e.cause.response.url, video_id, fatal=False), + ({json.loads}, {dict})) or {} + if ( + not retry and e.cause.status == 400 + and 'password' in traverse_obj(response, ('invalid_parameters', ..., 'field')) + ): self._verify_video_password(video_id) - continue - raise + elif e.cause.status == 404 and response.get('error_code') == 5460: + self.raise_login_required(join_nonempty( + traverse_obj(response, ('error', {str.strip})), + 'Authentication may be needed due to your location.', + 'If your IP address is located in Europe you could try using a VPN/proxy,', + f'or else u{self._login_hint()[1:]}', + delim=' '), method=None) + else: + raise if config_url := traverse_obj(video, ('config_url', {url_or_none})): info = self._parse_config(self._download_json(config_url, video_id), video_id)