Update On Sat Sep 6 20:35:54 CEST 2025

This commit is contained in:
github-action[bot]
2025-09-06 20:35:55 +02:00
parent 20396b5039
commit 93741b781f
164 changed files with 7460 additions and 3057 deletions

View File

@@ -19,20 +19,20 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4.2.2
uses: actions/checkout@v5.0.0
- name: Install cosign
if: github.event_name != 'pull_request'
uses: sigstore/cosign-installer@v3.8.2
uses: sigstore/cosign-installer@v3.9.2
with:
cosign-release: 'v2.2.4'
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.10.0
uses: docker/setup-buildx-action@v3.11.1
- name: Log into registry ${{ env.REGISTRY }}
if: github.event_name != 'pull_request'
uses: docker/login-action@v3.4.0
uses: docker/login-action@v3.5.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
@@ -40,13 +40,13 @@ jobs:
- name: Extract Docker metadata
id: meta
uses: docker/metadata-action@v5.7.0
uses: docker/metadata-action@v5.8.0
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Build and push Docker image
id: build-and-push
uses: docker/build-push-action@v6.16.0
uses: docker/build-push-action@v6.18.0
with:
context: .
push: ${{ github.event_name != 'pull_request' }}

View File

@@ -9,11 +9,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4.2.2
uses: actions/checkout@v5.0.0
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5.4.0
uses: actions/setup-go@v5.5.0
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6.3.0
with:

View File

@@ -40,7 +40,7 @@ English | [简体中文](README_zh.md)
- **⚙️ Minimal Configuration**
- No config files required, ready to use via CLI.
- Optimized for CI/CD and containers.
- Flexible tuning via environment variables.
- Advanced parameters like timeouts and rate limits.
- **📈 Performance**
- Intelligent scheduling, auto-tuning, ultra-low resource usage.
@@ -98,6 +98,8 @@ The [NodePassProject](https://github.com/NodePassProject) organization develops
- **[npsh](https://github.com/NodePassProject/npsh)**: A collection of one-click scripts that provide simple deployment for API or Dashboard with flexible configuration and management.
- **[nodepass-core](https://github.com/NodePassProject/nodepass-core)**: Development branch, featuring previews of new functionalities and performance optimizations, suitable for advanced users and developers.
## 💬 Discussion
- Follow our [Telegram Channel](https://t.me/NodePassChannel) for updates and community support.
@@ -116,19 +118,19 @@ This project is provided "as is" without any warranties. Users assume all risks
<table>
<tr>
<td width="220" align="center">
<td width="240" align="center">
<a href="https://whmcs.as211392.com"><img src="https://cdn.yobc.de/assets/dreamcloud.png"></a>
</td>
<td width="220" align="center">
<td width="240" align="center">
<a href="https://t.me/xiao_bai_xue_zhang"><img src="https://cdn.yobc.de/assets/xuezhang.png"></a>
</td>
</tr>
<tr>
<td width="220" align="center">
<td width="240" align="center">
<a href="https://sharon.io"><img src="https://cdn.yobc.de/assets/sharon.png"></a>
</td>
<td width="220" align="center">
<a href="https://zmto.com"><img src="https://cdn.yobc.de/assets/zmto.png"></a>
<td width="240" align="center">
<a href="https://vps.town"><img src="https://cdn.yobc.de/assets/vpstown.png"></a>
</td>
</tr>
</table>

View File

@@ -40,7 +40,7 @@
- **⚙️ 极简配置方式**
- 无需配置文件,仅命令行参数即可运行,适合自动化和快速迭代。
- 适配 CI/CD 流程与容器环境,极大提升部署和运维效率。
- 支持环境变量性能调优,灵活适应不同运行环境。
- 支持超时、限速等高级参数调优,灵活适应不同运行环境。
- **📈 高性能优化**
- 智能流量调度与自动连接调优,极低资源占用。
@@ -98,6 +98,8 @@ nodepass "master://:10101/api?log=debug&tls=1"
- **[npsh](https://github.com/NodePassProject/npsh)**: 简单易用的 NodePass 一键脚本合集,包括 API 主控、Dash 面板的安装部署、灵活配置和辅助管理。
- **[nodepass-core](https://github.com/NodePassProject/nodepass-core)**: 开发分支,包含新功能预览和性能优化测试,适合高级用户和开发者。
## 💬 讨论
- 关注我们的 [Telegram 频道](https://t.me/NodePassChannel) 获取最新更新和社区支持。
@@ -116,19 +118,19 @@ nodepass "master://:10101/api?log=debug&tls=1"
<table>
<tr>
<td width="220" align="center">
<td width="240" align="center">
<a href="https://whmcs.as211392.com"><img src="https://cdn.yobc.de/assets/dreamcloud.png"></a>
</td>
<td width="220" align="center">
<td width="240" align="center">
<a href="https://t.me/xiao_bai_xue_zhang"><img src="https://cdn.yobc.de/assets/xuezhang.png"></a>
</td>
</tr>
<tr>
<td width="220" align="center">
<td width="240" align="center">
<a href="https://sharon.io"><img src="https://cdn.yobc.de/assets/sharon.png"></a>
</td>
<td width="220" align="center">
<a href="https://zmto.com"><img src="https://cdn.yobc.de/assets/zmto.png"></a>
<td width="240" align="center">
<a href="https://vps.town"><img src="https://cdn.yobc.de/assets/vpstown.png"></a>
</td>
</tr>
</table>

View File

@@ -65,32 +65,20 @@ func getExitInfo() {
logger.SetLogLevel(logs.Info)
logger.Info(`Version: %v %v/%v
╭─────────────────────────────────────────────────────────
░░█▀█░█▀█░░▀█░█▀▀░█▀█░█▀█░█▀▀░█▀▀░░
░░█░█░█░█░█▀█░█▀▀░█▀▀░█▀█░▀▀█░▀▀█░░
░░▀░▀░▀▀▀░▀▀▀░▀▀▀░▀░░░▀░▀░▀▀▀░▀▀▀░░
├─────────────────────────────────────────────────────────
>Universal TCP/UDP Tunneling Solution
>https://github.com/yosebyte/nodepass
├─────────────────────────────────────────────────────────
│ Usage: nodepass "<your-unique-URL-syntax-command>"
├─────────────────────────────────────────────────────────
│ server://password@tunnel/target?log=X&tls=X&crt=X&key=X
│ client://password@tunnel/target?log=X&min=X&max=X
│ master://host:port/prefix?log=X&tls=X&crt=X&key=X
├──────────┬─────────────────────────┬────────────────────┤
│ Keys │ Values │ Description │
├──────────┼─────────────────────────┼────────────────────┤
│ tunnel │ host:port (IP | domain) │ Tunnel address │
│ target │ host:port (IP | domain) │ Target address │
│ log │ debug | warn | error │ Default level info │
│ tls │ 0 off | 1 on | 2 verify │ Default TLS code-0 │
│ crt │ <path/to/crt.pem> │ Custom certificate │
│ key │ <path/to/key.pem> │ Custom private key │
│ min │ <min> │ Min pool capacity │
│ max │ <max> │ Max pool capacity │
│ prefix │ <path/to/your/api> │ Master API prefix │
╰──────────┴─────────────────────────┴────────────────────╯
╭─────────────────────────────────────────────╮
│ ░░█▀█░█▀█░░▀█░█▀▀░█▀█░█▀█░█▀▀░█▀▀░░ │
│ ░░█░█░█░█░█▀█░█▀▀░█▀▀░█▀█░▀▀█░▀▀█░░ │
│ ░░▀░▀░▀▀▀░▀▀▀░▀▀▀░▀░░░▀░▀░▀▀▀░▀▀▀░░ │
├─────────────────────────────────────────────┤
│ >Universal TCP/UDP Tunneling Solution │
│ >https://github.com/yosebyte/nodepass │
├─────────────────────────────────────────────┤
│ Usage: nodepass "<your-unique-URL-command>" │
├─────────────────────────────────────────────┤
│ server://password@host/host?<query>&<query>
│ client://password@host/host?<query>&<query>
│ master://hostname:port/path?<query>&<query>
╰─────────────────────────────────────────────
`, version, runtime.GOOS, runtime.GOARCH)
os.Exit(1)
}

File diff suppressed because it is too large Load Diff

View File

@@ -47,19 +47,169 @@ Example with TLS Mode 2 (custom certificate):
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem"
```
## Connection Pool Capacity Parameters
## Run Mode Control
Connection pool capacity can be configured via URL query parameters:
NodePass supports configurable run modes via the `mode` query parameter to control the behavior of both client and server instances. This provides flexibility in deployment scenarios where automatic mode detection may not be suitable.
- `min`: Minimum connection pool capacity (default: 64)
- `max`: Maximum connection pool capacity (default: 1024)
### Client Mode Control
For client instances, the `mode` parameter controls the connection strategy:
- **Mode 0** (Default): Automatic mode detection
- Attempts to bind to tunnel address locally first
- If successful, operates in single-end forwarding mode
- If binding fails, operates in dual-end handshake mode
- **Mode 1**: Force single-end forwarding mode
- Binds to tunnel address locally and forwards traffic directly to target
- Uses direct connection establishment for high performance
- No handshake with server required
- **Mode 2**: Force dual-end handshake mode
- Always connects to remote server for tunnel establishment
- Requires handshake with server before data transfer
- Supports bidirectional data flow coordination
Example:
```bash
# Set minimum pool to 32 and maximum to 4096
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=32&max=4096"
# Force client to operate in single-end forwarding mode
nodepass "client://127.0.0.1:1080/target.example.com:8080?mode=1"
# Force client to operate in dual-end handshake mode
nodepass "client://server.example.com:10101/127.0.0.1:8080?mode=2"
```
### Server Mode Control
For server instances, the `mode` parameter controls the data flow direction:
- **Mode 0** (Default): Automatic flow direction detection
- Attempts to bind to target address locally first
- If successful, operates in reverse mode (server receives traffic)
- If binding fails, operates in forward mode (server sends traffic)
- **Mode 1**: Force reverse mode
- Server binds to target address locally and receives traffic
- Incoming connections are forwarded to connected clients
- Data flow: External → Server → Client → Target
- **Mode 2**: Force forward mode
- Server connects to remote target address
- Client connections are forwarded to remote target
- Data flow: Client → Server → External Target
Example:
```bash
# Force server to operate in reverse mode
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?mode=1"
# Force server to operate in forward mode
nodepass "server://0.0.0.0:10101/remote.example.com:8080?mode=2"
```
## Connection Pool Capacity Parameters
Connection pool capacity parameters only apply to dual-end handshake mode and are configured through different approaches:
- `min`: Minimum connection pool capacity (default: 64) - Set by client via URL query parameters
- `max`: Maximum connection pool capacity (default: 1024) - Determined by server and delivered to client during handshake
**Important Notes**:
- The `max` parameter set by client will be overridden by the value delivered from server during handshake
- The `min` parameter is fully controlled by client and will not be modified by server
- In client single-end forwarding mode, connection pools are not used and these parameters are ignored
Example:
```bash
# Client sets minimum pool to 32, maximum pool will be determined by server
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=32"
```
## Data Read Timeout
Data read timeout can be set using the URL query parameter `read`, with units in seconds or minutes:
- `read`: Data read timeout (default: 1 hour)
- Value format: integer followed by optional unit (`s` for seconds, `m` for minutes)
- Examples: `30s` (30 seconds), `5m` (5 minutes), `1h` (1 hour)
- Applies to both client and server modes
- If no data is received within the timeout period, the connection is closed
Example:
```bash
# Set data read timeout to 5 minutes
nodepass "client://server.example.com:10101/127.0.0.1:8080?read=5m"
# Set data read timeout to 30 seconds for fast-response applications
nodepass "client://server.example.com:10101/127.0.0.1:8080?read=30s"
# Set data read timeout to 30 minutes for long-running transfers
nodepass "client://server.example.com:10101/127.0.0.1:8080?read=30m"
```
## Rate Limiting
NodePass supports bandwidth rate limiting for traffic control through the `rate` parameter. This feature helps prevent network congestion and ensures fair resource allocation across multiple connections.
- `rate`: Maximum bandwidth limit in Mbps (Megabits per second)
- Value 0 or omitted: No rate limiting (unlimited bandwidth)
- Positive integer: Rate limit in Mbps (e.g., 10 means 10 Mbps)
- Applied to both upload and download traffic
- Uses token bucket algorithm for smooth traffic shaping
Example:
```bash
# Limit bandwidth to 50 Mbps
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?rate=50"
# Client with 100 Mbps rate limit
nodepass "client://server.example.com:10101/127.0.0.1:8080?rate=100"
# Combined with other parameters
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?log=error&tls=1&rate=50"
```
**Rate Limiting Use Cases:**
- **Bandwidth Control**: Prevent NodePass from consuming all available bandwidth
- **Fair Sharing**: Ensure multiple applications can share network resources
- **Cost Management**: Control data usage in metered network environments
- **QoS Compliance**: Meet service level agreements for bandwidth usage
- **Testing**: Simulate low-bandwidth environments for application testing
## PROXY Protocol Support
NodePass supports PROXY protocol v1 for preserving client connection information when forwarding traffic through load balancers, reverse proxies, or other intermediary services.
- `proxy`: PROXY protocol support (default: 0)
- Value 0: Disabled - no PROXY protocol header is sent
- Value 1: Enabled - sends PROXY protocol v1 header before data transfer
- Works with both TCP4 and TCP6 connections
- Compatible with HAProxy, Nginx, and other PROXY protocol aware services
The PROXY protocol header includes original client IP, server IP, and port information, allowing downstream services to identify the real client connection details even when traffic passes through NodePass tunnels.
Example:
```bash
# Enable PROXY protocol v1 for server mode
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?proxy=1"
# Enable PROXY protocol v1 for client mode
nodepass "client://server.example.com:10101/127.0.0.1:8080?proxy=1"
# Combined with other parameters
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?log=info&tls=1&proxy=1&rate=100"
```
**PROXY Protocol Use Cases:**
- **Load Balancer Integration**: Preserve client IP information when forwarding through load balancers
- **Reverse Proxy Support**: Enable backend services to see original client connections
- **Logging and Analytics**: Maintain accurate client connection logs for security and analysis
- **Access Control**: Allow downstream services to apply IP-based access controls
- **Compliance**: Meet regulatory requirements for connection logging and auditing
**Important Notes:**
- The target service must support PROXY protocol v1 to properly handle the header
- PROXY headers are only sent for TCP connections, not UDP
- The header format follows the HAProxy PROXY protocol v1 specification
- If the target service doesn't support PROXY protocol, connections may fail or behave unexpectedly
## URL Query Parameter Scope and Applicability
NodePass allows flexible configuration via URL query parameters. The following table shows which parameters are applicable in server, client, and master modes:
@@ -71,14 +221,21 @@ NodePass allows flexible configuration via URL query parameters. The following t
| `crt` | Custom certificate path| O | X | O |
| `key` | Custom key path | O | X | O |
| `min` | Minimum pool capacity | X | O | X |
| `max` | Maximum pool capacity | O | O | X |
| `max` | Maximum pool capacity | O | X | X |
| `mode` | Run mode control | O | O | X |
| `read` | Data read timeout | O | O | X |
| `rate` | Bandwidth rate limit | O | O | X |
| `slot` | Maximum connection limit | O | O | X |
| `proxy` | PROXY protocol support| O | O | X |
- O: Parameter is valid and recommended for configuration
- X: Parameter is not applicable and should be ignored
**Best Practices:**
- For server/master modes, configure security-related parameters (`tls`, `crt`, `key`) to enhance data channel security.
- For client/master modes, adjust connection pool capacity (`min`, `max`) based on traffic and resource constraints for optimal performance.
- For client/server dual-end handshake modes, adjust connection pool capacity (`min`, `max`) based on traffic and resource constraints for optimal performance.
- Use run mode control (`mode`) when automatic detection doesn't match your deployment requirements or for consistent behavior across environments.
- Configure rate limiting (`rate`) to control bandwidth usage and prevent network congestion in shared environments.
- Log level (`log`) can be set in all modes for easier operations and troubleshooting.
## Environment Variables
@@ -87,22 +244,22 @@ NodePass behavior can be fine-tuned using environment variables. Below is the co
| Variable | Description | Default | Example |
|----------|-------------|---------|---------|
| `NP_SEMAPHORE_LIMIT` | Maximum number of concurrent connections | 1024 | `export NP_SEMAPHORE_LIMIT=2048` |
| `NP_UDP_DATA_BUF_SIZE` | Buffer size for UDP packets | 8192 | `export NP_UDP_DATA_BUF_SIZE=16384` |
| `NP_UDP_READ_TIMEOUT` | Timeout for UDP read operations | 20s | `export NP_UDP_READ_TIMEOUT=30s` |
| `NP_UDP_DIAL_TIMEOUT` | Timeout for establishing UDP connections | 20s | `export NP_UDP_DIAL_TIMEOUT=30s` |
| `NP_TCP_READ_TIMEOUT` | Timeout for TCP read operations | 20s | `export NP_TCP_READ_TIMEOUT=30s` |
| `NP_TCP_DIAL_TIMEOUT` | Timeout for establishing TCP connections | 20s | `export NP_TCP_DIAL_TIMEOUT=30s` |
| `NP_MIN_POOL_INTERVAL` | Minimum interval between connection creations | 1s | `export NP_MIN_POOL_INTERVAL=500ms` |
| `NP_MAX_POOL_INTERVAL` | Maximum interval between connection creations | 5s | `export NP_MAX_POOL_INTERVAL=3s` |
| `NP_SEMAPHORE_LIMIT` | Signal channel buffer size | 65536 | `export NP_SEMAPHORE_LIMIT=2048` |
| `NP_UDP_DATA_BUF_SIZE` | Buffer size for UDP packets | 2048 | `export NP_UDP_DATA_BUF_SIZE=16384` |
| `NP_HANDSHAKE_TIMEOUT` | Timeout for handshake operations | 10s | `export NP_HANDSHAKE_TIMEOUT=30s` |
| `NP_TCP_DIAL_TIMEOUT` | Timeout for establishing TCP connections | 30s | `export NP_TCP_DIAL_TIMEOUT=60s` |
| `NP_UDP_DIAL_TIMEOUT` | Timeout for establishing UDP connections | 10s | `export NP_UDP_DIAL_TIMEOUT=30s` |
| `NP_POOL_GET_TIMEOUT` | Timeout for getting connections from pool | 30s | `export NP_POOL_GET_TIMEOUT=60s` |
| `NP_MIN_POOL_INTERVAL` | Minimum interval between connection creations | 100ms | `export NP_MIN_POOL_INTERVAL=200ms` |
| `NP_MAX_POOL_INTERVAL` | Maximum interval between connection creations | 1s | `export NP_MAX_POOL_INTERVAL=3s` |
| `NP_REPORT_INTERVAL` | Interval for health check reports | 5s | `export NP_REPORT_INTERVAL=10s` |
| `NP_SERVICE_COOLDOWN` | Cooldown period before restart attempts | 3s | `export NP_SERVICE_COOLDOWN=5s` |
| `NP_SHUTDOWN_TIMEOUT` | Timeout for graceful shutdown | 5s | `export NP_SHUTDOWN_TIMEOUT=10s` |
| `NP_RELOAD_INTERVAL` | Interval for cert/pool reload | 1h | `export NP_RELOAD_INTERVAL=30m` |
| `NP_RELOAD_INTERVAL` | Interval for cert reload/state backup | 1h | `export NP_RELOAD_INTERVAL=30m` |
### Connection Pool Tuning
The connection pool parameters are important settings for performance tuning:
The connection pool parameters are important settings for performance tuning in dual-end handshake mode and do not apply to client single-end forwarding mode:
#### Pool Capacity Settings
@@ -120,18 +277,18 @@ The connection pool parameters are important settings for performance tuning:
- `NP_MIN_POOL_INTERVAL`: Controls the minimum time between connection creation attempts
- Too low: May overwhelm network with connection attempts
- Recommended range: 500ms-2s depending on network latency
- Recommended range: 100ms-500ms depending on network latency and expected load
- `NP_MAX_POOL_INTERVAL`: Controls the maximum time between connection creation attempts
- Too high: May result in pool depletion during traffic spikes
- Recommended range: 3s-10s depending on expected traffic patterns
- Recommended range: 1s-5s depending on expected traffic patterns
#### Connection Management
- `NP_SEMAPHORE_LIMIT`: Controls the maximum number of concurrent tunnel operations
- Too low: Rejected connections during traffic spikes
- Too high: Potential memory pressure from too many concurrent goroutines
- Recommended range: 1000-5000 for most applications, higher for high-throughput scenarios
- `NP_SEMAPHORE_LIMIT`: Controls signal channel buffer size
- Too small: May cause signal loss
- Too large: Increased memory usage
- Recommended range: 1000-5000
### UDP Settings
@@ -142,11 +299,8 @@ For applications relying heavily on UDP traffic:
- Default (8192) works well for most cases
- Consider increasing to 16384 or higher for media streaming or game servers
- `NP_UDP_READ_TIMEOUT`: Timeout for UDP read operations
- Increase for high-latency networks or applications with slow response times
- Decrease for low-latency applications requiring quick failover
- `NP_UDP_DIAL_TIMEOUT`: Timeout for establishing UDP connections
- Default (10s) provides good balance for most applications
- Increase for high-latency networks or applications with slow response times
- Decrease for low-latency applications requiring quick failover
@@ -154,15 +308,18 @@ For applications relying heavily on UDP traffic:
For optimizing TCP connections:
- `NP_TCP_READ_TIMEOUT`: Timeout for TCP read operations
- Increase for high-latency networks or servers with slow response times
- Decrease for applications that need to detect disconnections quickly
- Affects wait time during data transfer phases
- `NP_TCP_DIAL_TIMEOUT`: Timeout for establishing TCP connections
- Default (30s) is suitable for most network conditions
- Increase for unstable network conditions
- Decrease for applications that need quick connection success/failure determination
- Affects initial connection establishment phase
### Pool Management Settings
- `NP_POOL_GET_TIMEOUT`: Maximum time to wait when getting connections from pool
- Default (30s) provides sufficient time for connection establishment
- Increase for high-latency environments or when using large pool sizes
- Decrease for applications requiring fast failure detection
- In client single-end forwarding mode, connection pools are not used and this parameter is ignored
### Service Management Settings
@@ -170,9 +327,9 @@ For optimizing TCP connections:
- Lower values provide more frequent updates but increase log volume
- Higher values reduce log output but provide less immediate visibility
- `NP_RELOAD_INTERVAL`: Controls how frequently TLS certificates are checked for changes
- Lower values detect certificate changes faster but increase file system operations
- Higher values reduce overhead but delay detection of certificate updates
- `NP_RELOAD_INTERVAL`: Controls how frequently TLS certificates are checked for changes and state backups are performed
- Lower values provide faster certificate change detection and more frequent backups but increase file system operations
- Higher values reduce overhead but delay certificate updates and backup frequency
- `NP_SERVICE_COOLDOWN`: Time to wait before attempting service restarts
- Lower values attempt recovery faster but might cause thrashing in case of persistent issues
@@ -192,15 +349,20 @@ For applications requiring maximum throughput (e.g., media streaming, file trans
URL parameters:
```bash
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=128&max=8192"
# High-throughput server with 1 Gbps rate limit
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?max=8192&rate=1000"
# High-throughput client with 500 Mbps rate limit
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=128&rate=500"
```
Environment variables:
```bash
export NP_MIN_POOL_INTERVAL=500ms
export NP_MAX_POOL_INTERVAL=3s
export NP_MIN_POOL_INTERVAL=50ms
export NP_MAX_POOL_INTERVAL=500ms
export NP_SEMAPHORE_LIMIT=8192
export NP_UDP_DATA_BUF_SIZE=32768
export NP_POOL_GET_TIMEOUT=60s
export NP_REPORT_INTERVAL=10s
```
@@ -210,15 +372,21 @@ For applications requiring minimal latency (e.g., gaming, financial trading):
URL parameters:
```bash
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=256&max=4096"
# Low-latency server with moderate rate limit
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?max=4096&rate=200"
# Low-latency client with moderate rate limit
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=256&rate=200"
```
Environment variables:
```bash
export NP_MIN_POOL_INTERVAL=100ms
export NP_MAX_POOL_INTERVAL=1s
export NP_MIN_POOL_INTERVAL=50ms
export NP_MAX_POOL_INTERVAL=500ms
export NP_SEMAPHORE_LIMIT=4096
export NP_UDP_READ_TIMEOUT=5s
export NP_TCP_DIAL_TIMEOUT=5s
export NP_UDP_DIAL_TIMEOUT=5s
export NP_POOL_GET_TIMEOUT=15s
export NP_REPORT_INTERVAL=1s
```
@@ -228,14 +396,21 @@ For deployment on systems with limited resources (e.g., IoT devices, small VPS):
URL parameters:
```bash
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=16&max=512"
# Resource-constrained server with conservative rate limit
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?max=512&rate=50"
# Resource-constrained client with conservative rate limit
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=16&rate=50"
```
Environment variables:
```bash
export NP_MIN_POOL_INTERVAL=2s
export NP_MAX_POOL_INTERVAL=10s
export NP_MIN_POOL_INTERVAL=200ms
export NP_MAX_POOL_INTERVAL=2s
export NP_SEMAPHORE_LIMIT=512
export NP_TCP_DIAL_TIMEOUT=20s
export NP_UDP_DIAL_TIMEOUT=20s
export NP_POOL_GET_TIMEOUT=45s
export NP_REPORT_INTERVAL=30s
export NP_SHUTDOWN_TIMEOUT=3s
```

View File

@@ -76,9 +76,30 @@ This enables verbose output to help identify:
- Data transfer details
- Error conditions
### Example 6: Run Mode Control
Control the operational behavior with explicit mode settings:
```bash
# Force server to operate in reverse mode (server receives traffic)
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?mode=1&tls=1"
# Force client to operate in single-end forwarding mode (high performance local proxy)
nodepass "client://127.0.0.1:1080/remote.example.com:8080?mode=1"
# Force client to operate in dual-end handshake mode (requires server coordination)
nodepass "client://server.example.com:10101/127.0.0.1:8080?mode=2&log=debug"
```
These configurations:
- **Server mode=1**: Forces reverse mode where server binds to target address locally
- **Client mode=1**: Forces single-end forwarding with direct connection establishment for high performance
- **Client mode=2**: Forces dual-end handshake mode for scenarios requiring server coordination
- Use mode control when automatic detection doesn't match your deployment requirements
## Database Access Through Firewall
### Example 6: Database Tunneling
### Example 7: Database Tunneling
Enable secure access to a database server behind a firewall:
@@ -98,7 +119,7 @@ This configuration:
## Secure Microservice Communication
### Example 7: Service-to-Service Communication
### Example 8: Service-to-Service Communication
Enable secure communication between microservices:
@@ -116,9 +137,64 @@ This setup:
- Limits logging to warnings and errors only
- Maps service A's API to appear as a local service on service B
## Bandwidth Rate Limiting
### Example 9: File Transfer Server with Rate Limit
Control bandwidth usage for file transfer services:
```bash
# Server side: Limit bandwidth to 100 Mbps for file transfers
nodepass "server://0.0.0.0:10101/127.0.0.1:8080?log=info&tls=1&rate=100"
# Client side: Connect with 50 Mbps rate limit
nodepass "client://fileserver.example.com:10101/127.0.0.1:3000?log=info&rate=50"
```
This configuration:
- Limits server bandwidth to 100 Mbps to prevent network congestion
- Client further limits download speed to 50 Mbps for fair sharing
- Allows file transfers while preserving bandwidth for other services
- Uses TLS encryption for secure file transfer
### Example 10: IoT Sensor Data Collection with Conservative Limits
For IoT devices with limited bandwidth or metered connections:
```bash
# Server: Accept IoT data with 5 Mbps limit
nodepass "server://0.0.0.0:10101/127.0.0.1:1883?log=warn&rate=5"
# IoT device client: Send sensor data with 2 Mbps limit
nodepass "client://iot-gateway.example.com:10101/127.0.0.1:1883?log=error&rate=2"
```
This setup:
- Limits server to 5 Mbps for collecting sensor data from multiple IoT devices
- Individual IoT clients limited to 2 Mbps to prevent single device consuming all bandwidth
- Minimal logging (warn/error) to reduce resource usage on IoT devices
- Efficient for MQTT or other IoT protocols
### Example 11: Development Environment Rate Control
Testing applications under bandwidth constraints:
```bash
# Simulate slow network conditions for testing
nodepass "client://api.example.com:443/127.0.0.1:8080?log=debug&rate=1"
# High-speed development server with monitoring
nodepass "server://0.0.0.0:10101/127.0.0.1:3000?log=debug&rate=500"
```
This configuration:
- Client simulation of 1 Mbps connection for testing slow network scenarios
- Development server with 500 Mbps limit and detailed logging for debugging
- Helps identify performance issues under different bandwidth constraints
## IoT Device Management
### Example 8: IoT Gateway
### Example 12: IoT Gateway
Create a central access point for IoT devices:
@@ -138,7 +214,7 @@ This configuration:
## Multi-environment Development
### Example 9: Development Environment Access
### Example 13: Development Environment Access
Access different development environments through tunnels:
@@ -159,9 +235,79 @@ This setup:
- Enables developers to access environments without direct network exposure
- Maps remote services to different local ports for easy identification
## PROXY Protocol Integration
### Example 14: Load Balancer Integration with PROXY Protocol
Enable PROXY protocol support for integration with load balancers and reverse proxies:
```bash
# Server side: Enable PROXY protocol v1 for HAProxy/Nginx integration
nodepass "server://0.0.0.0:10101/127.0.0.1:8080?log=info&tls=1&proxy=1"
# Client side: Enable PROXY protocol to preserve client connection information
nodepass "client://tunnel.example.com:10101/127.0.0.1:3000?log=info&proxy=1"
```
This configuration:
- Sends PROXY protocol v1 headers before data transfer begins
- Preserves original client IP and port information through the tunnel
- Enables backend services to see real client connection details
- Compatible with HAProxy, Nginx, and other PROXY protocol aware services
- Useful for maintaining accurate access logs and IP-based access controls
### Example 15: Reverse Proxy Support for Web Applications
Enable web applications behind NodePass to receive original client information:
```bash
# NodePass server with PROXY protocol for web application
nodepass "server://0.0.0.0:10101/127.0.0.1:8080?log=warn&tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem&proxy=1"
# Backend web server (e.g., Nginx) configuration to handle PROXY protocol
# In nginx.conf:
# server {
# listen 8080 proxy_protocol;
# real_ip_header proxy_protocol;
# set_real_ip_from 127.0.0.1;
# ...
# }
```
This setup:
- Web applications receive original client IP addresses instead of NodePass tunnel IP
- Enables proper access logging, analytics, and security controls
- Supports compliance requirements for connection auditing
- Works with web servers that support PROXY protocol (Nginx, HAProxy, etc.)
### Example 16: Database Access with Client IP Preservation
Maintain client IP information for database access logging and security:
```bash
# Database proxy server with PROXY protocol
nodepass "server://0.0.0.0:10101/127.0.0.1:5432?log=error&proxy=1"
# Application client connecting through tunnel
nodepass "client://dbproxy.example.com:10101/127.0.0.1:5432?proxy=1"
```
Benefits:
- Database logs show original application server IPs instead of tunnel IPs
- Enables IP-based database access controls to work properly
- Maintains audit trails for security and compliance
- Compatible with databases that support PROXY protocol (PostgreSQL with appropriate configuration)
**Important Notes for PROXY Protocol:**
- Target services must support PROXY protocol v1 to handle the headers correctly
- PROXY headers are only sent for TCP connections, not UDP traffic
- The header includes: protocol (TCP4/TCP6), source IP, destination IP, source port, destination port
- If target service doesn't support PROXY protocol, connections may fail or behave unexpectedly
- Test thoroughly in non-production environments before deploying with PROXY protocol enabled
## Container Deployment
### Example 10: Containerized NodePass
### Example 17: Containerized NodePass
Deploy NodePass in a Docker environment:
@@ -196,7 +342,7 @@ This configuration:
## Master API Management
### Example 11: Centralized Management
### Example 18: Centralized Management
Set up a central controller for multiple NodePass instances:
@@ -233,7 +379,7 @@ This setup:
- Offers a RESTful API for automation and integration
- Includes a built-in Swagger UI at http://localhost:9090/api/v1/docs
### Example 12: Custom API Prefix
### Example 19: Custom API Prefix
Use a custom API prefix for the master mode:
@@ -252,6 +398,44 @@ This allows:
- Custom URL paths for security or organizational purposes
- Swagger UI access at http://localhost:9090/admin/v1/docs
### Example 20: Real-time Connection and Traffic Monitoring
Monitor instance connection counts and traffic statistics through the master API:
```bash
# Get detailed instance information including connection count statistics
curl -H "X-API-Key: your-api-key" http://localhost:9090/api/v1/instances/{id}
# Example response (including TCPS and UDPS fields)
{
"id": "a1b2c3d4",
"alias": "web-proxy",
"type": "server",
"status": "running",
"url": "server://0.0.0.0:10101/127.0.0.1:8080",
"restart": true,
"pool": 64,
"ping": 25,
"tcps": 12,
"udps": 5,
"tcprx": 1048576,
"tcptx": 2097152,
"udprx": 512000,
"udptx": 256000
}
# Use SSE to monitor real-time status changes for all instances
curl -H "X-API-Key: your-api-key" \
-H "Accept: text/event-stream" \
http://localhost:9090/api/v1/events
```
This monitoring setup provides:
- **Real-time connection tracking**: TCPS and UDPS fields show current active connection counts
- **Performance analysis**: Evaluate system load through connection and traffic data
- **Capacity planning**: Resource planning based on historical connection data
- **Troubleshooting**: Abnormal connection count changes may indicate network issues
## Next Steps
Now that you've seen various usage examples, you might want to:

View File

@@ -27,6 +27,10 @@ NodePass creates a network architecture with separate channels for control and d
4. **Client Mode Operation**:
- Connects to the server's control channel
- **Handshake Phase**: After server validates the tunnel key, it delivers configuration to client:
- Data flow direction mode (determines whether client receives or sends traffic)
- Maximum connection pool capacity (centrally managed and allocated by server)
- TLS security level (ensures client uses correct encryption mode)
- Listens for signals indicating incoming connections
- Creates data connections using the TLS security level specified by the server
- Forwards data between the secure channel and local target
@@ -35,12 +39,12 @@ NodePass creates a network architecture with separate channels for control and d
5. **Client Single-End Forwarding Mode**:
- Automatically enabled when tunnel address is a local address (e.g., 127.0.0.1)
- Client directly listens on local port without server control channel coordination
- Uses connection pooling technology for TCP connections to significantly improve forwarding performance
- Uses direct connection establishment for both TCP and UDP protocols
- Suitable for pure local forwarding scenarios, reducing network overhead and latency
- Supports high-performance single-end forwarding for both TCP and UDP protocols
- Supports high-performance single-end forwarding with optimized connection handling
5. **Protocol Support**:
- **TCP**: Full bidirectional streaming with persistent connections, supports connection pool optimization in client single-end forwarding mode
- **TCP**: Full bidirectional streaming with persistent connections, optimized for direct connection establishment in client single-end forwarding mode
- **UDP**: Datagram forwarding with configurable buffer sizes and timeouts
## Data Transmission Flow
@@ -48,9 +52,9 @@ NodePass creates a network architecture with separate channels for control and d
NodePass establishes a bidirectional data flow through its tunnel architecture, supporting both TCP and UDP protocols. The system supports three data flow modes:
### Data Flow Mode Explanation
- **Server Receives Mode (dataFlow: "-")**: Server listens on target address, client listens locally, data flows from target address to client local
- **Server Sends Mode (dataFlow: "+")**: Server connects to remote target address, client listens locally, data flows from client local to remote target
- **Client Single-End Forwarding Mode**: Client directly listens locally and forwards to target address without server coordination, using connection pooling technology for high-performance forwarding
- **Server Receives Mode**: Server listens on target address, client listens locally, data flows from target address to client local
- **Server Sends Mode**: Server connects to remote target address, client listens locally, data flows from client local to remote target
- **Client Single-End Forwarding Mode**: Client directly listens locally and forwards to target address without server coordination, using direct connection establishment for optimized forwarding
The data flow mode is automatically determined based on tunnel address and target address:
- If tunnel address is a local address (localhost, 127.0.0.1, etc.), enables Client Single-End Forwarding Mode
@@ -128,34 +132,33 @@ The data flow mode is automatically determined based on tunnel address and targe
- Directly starts TCP or UDP listener on specified tunnel port
- No need to connect to remote server, achieving zero-latency startup
3. **Connection Pool Initialization** (TCP Only):
3. **Direct Connection Establishment**:
```
[Client] → [Initialize Target Connection Pool] → [Pre-establish Connections to Target Address]
[Client] → [Create Direct Connection to Target Address] → [Establish Target Connection]
```
- Creates high-performance connection pool for TCP forwarding
- Pre-establishes multiple connections to target address, significantly reducing connection establishment latency
- Connection pool size can be dynamically adjusted based on concurrent demand
- For TCP: Directly establishes TCP connection to target address for each tunnel connection
- For UDP: Creates UDP socket for datagram exchange with target address
- Eliminates connection pool overhead, providing simpler and more direct forwarding path
4. **High-Performance Forwarding**:
4. **Optimized Forwarding**:
```
[Local Connection] → [Get Target Connection from Pool] → [Direct Data Exchange] → [Connection Reuse or Release]
[Local Connection] → [Direct Target Connection] → [Data Exchange] → [Connection Cleanup]
```
- For TCP: Quickly gets pre-established target connection from pool for efficient data exchange
- For UDP: Directly forwards datagrams to target address without connection pool
- Optimized data path minimizing forwarding overhead and latency
- For TCP: Direct connection establishment followed by efficient data exchange
- For UDP: Direct datagram forwarding to target address with minimal latency
- Simplified data path ensuring reliable and efficient forwarding
### Protocol-Specific Characteristics
- **TCP Exchange**:
- Persistent connections for full-duplex communication
- Continuous data streaming until connection termination
- Error handling with automatic reconnection
- **Client Single-End Forwarding Optimization**: Pre-established connections through connection pooling technology, significantly reducing connection establishment latency
- Automatic error handling with reconnection capability
- **Client Single-End Forwarding Optimization**: Direct connection establishment for each tunnel connection, ensuring reliable and efficient forwarding
- **UDP Exchange**:
- One-time datagram forwarding with configurable buffer sizes (`UDP_DATA_BUF_SIZE`)
- Read timeout control for response waiting (`UDP_READ_TIMEOUT`)
- Optimized for low-latency, stateless communications
- **Client Single-End Forwarding Optimization**: Direct forwarding mechanism without connection pool, achieving minimal latency
- One-shot datagram forwarding with configurable buffer sizes (`UDP_DATA_BUF_SIZE`)
- Read timeout control for response waiting (`read` parameter or default 10m)
- Optimized for low-latency, stateless communication
- **Client Single-End Forwarding Optimization**: Direct forwarding mechanism with minimal latency
## Signal Communication Mechanism
@@ -206,105 +209,308 @@ NodePass uses a sophisticated URL-based signaling protocol through the TCP tunne
## Connection Pool Architecture
NodePass implements an efficient connection pooling system for managing network connections:
NodePass implements an efficient connection pooling system for managing network connections, which forms the core of its performance advantages:
### Design Philosophy
The connection pool design follows the principle of "warm-up over cold start," eliminating network latency through pre-established connections. This design philosophy draws from modern high-performance server best practices, amortizing the cost of connection establishment to the system startup phase rather than bearing this overhead on the critical path.
### Pool Design
1. **Pool Types**:
- **Client Pool**: Pre-establishes connections to the remote endpoint
- **Server Pool**: Manages incoming connections from clients
- **Client Pool**: Pre-establishes connections to the remote endpoint with active connection management
- **Server Pool**: Manages incoming connections from clients with passive connection acceptance
2. **Pool Components**:
- **Connection Storage**: Thread-safe map of connection IDs to net.Conn objects
- **ID Channel**: Buffered channel for available connection IDs
- **Capacity Management**: Dynamic adjustment based on usage patterns
- **Interval Control**: Time-based throttling between connection creations
- **Connection Factory**: Customizable connection creation function
- **Connection Storage**: Thread-safe map of connection IDs to net.Conn objects, supporting high-concurrency access
- **ID Channel**: Buffered channel for available connection IDs, enabling lock-free rapid allocation
- **Capacity Management**: Dynamic adjustment based on usage patterns, implementing intelligent scaling
- Minimum capacity set by client, ensuring basic connection guarantee for client
- Maximum capacity delivered by server during handshake, enabling global resource coordination
- **Interval Control**: Time-based throttling between connection creations, preventing network resource overload
- **Connection Factory**: Customizable connection creation function, supporting different TLS modes and network configurations
### Advanced Design Features
1. **Zero-Latency Connections**:
- Pre-established connection pools eliminate TCP three-way handshake delays
- TLS handshakes complete during connection pool initialization, avoiding runtime encryption negotiation overhead
- Connection warm-up strategies ensure hot connections are always available in the pool
2. **Intelligent Load Awareness**:
- Dynamic pool management based on real-time connection utilization
- Predictive connection creation based on historical usage patterns
- Adaptive timeout and retry mechanisms responding to network fluctuations
### Connection Lifecycle
1. **Connection Creation**:
- Connections are created up to the configured capacity
- Each connection is assigned a unique ID
- IDs and connections are stored in the pool
- Connections are created up to the configured capacity, ensuring resource controllability
- Each connection is assigned a unique ID, supporting precise connection tracking and management
- IDs and connections are stored in the pool with copy-on-write and delayed deletion strategies
2. **Connection Acquisition**:
- Client retrieves connections using connection IDs
- Server retrieves the next available connection from the pool
- Connections are validated before being returned
- Client retrieves connections using connection IDs, supporting precise matching and fast lookups
- Server retrieves the next available connection from the pool using round-robin or least-used strategies
- Connections are validated before being returned, including network status and TLS session checks
3. **Connection Usage**:
- Connection is removed from the pool when acquired
- Used for data exchange between endpoints
- No connection reuse (one-time use model)
- Connection is removed from the pool when acquired, avoiding reuse conflicts
- Used for data exchange between endpoints with efficient zero-copy transmission
- One-time use model ensures connection state cleanliness
4. **Connection Termination**:
- Connections are closed after use
- Resources are properly released
- Error handling ensures clean termination
- Connections are closed immediately after use, preventing resource leaks
- Proper release of system resources including file descriptors and memory buffers
- Error handling ensures clean termination under exceptional conditions
### Session Management and State Maintenance
1. **Stateful UDP Processing**:
- Converts stateless UDP protocol into stateful session handling
- Intelligent session timeout management, balancing resource usage and responsiveness
- Session reuse mechanisms, reducing connection establishment overhead
2. **TCP Connection Reuse**:
- Long connection keep-alive technology, reducing connection establishment/closure overhead
- Intelligent connection reuse strategies, maximizing connection utilization
- Connection health checks, ensuring reliability of reused connections
3. **Cross-Protocol Unified Management**:
- Unified connection lifecycle management, simplifying system complexity
- Protocol-agnostic monitoring and statistics, providing consistent observability experience
- Flexible protocol conversion capabilities, supporting heterogeneous network environments
## Signal Communication and Coordination Mechanisms
NodePass's signaling system embodies the essence of distributed system design:
### Signal Design Principles
1. **Event-Driven Architecture**:
- Event-based asynchronous communication patterns, avoiding blocking waits
- Publish-subscribe pattern for signal distribution, supporting multiple subscribers
- Signal priority management, ensuring timely processing of critical events
2. **Reliability Guarantees**:
- Signal persistence mechanisms, preventing critical signal loss
- Retry and acknowledgment mechanisms, ensuring reliable signal delivery
- Idempotent signal design, avoiding side effects from repeated execution
3. **Performance Optimization**:
- Batch signal processing, reducing system call overhead
- Signal compression and merging, optimizing network bandwidth usage
- Asynchronous signal processing, avoiding blocking of main processing flows
### Distributed Coordination
1. **Consistency Guarantees**:
- Distributed locking mechanisms, ensuring atomicity of critical operations
- State synchronization protocols, maintaining data consistency across multiple nodes
- Conflict resolution strategies, handling race conditions in concurrent operations
2. **Fault Handling**:
- Node failure detection, timely discovery and isolation of failed nodes
- Automatic failover, ensuring service continuity
- State recovery mechanisms, supporting rapid recovery after failures
### Pool Management
1. **Capacity Control**:
- `MIN_POOL_CAPACITY`: Ensures minimum available connections
- `MAX_POOL_CAPACITY`: Prevents excessive resource consumption
- Dynamic scaling based on demand patterns
- Minimum capacity guarantee: Ensures sufficient warm connections are always available
- Maximum capacity limit: Prevents excessive resource consumption, protecting system stability
- Dynamic scaling based on demand patterns, responding to traffic changes
2. **Interval Control**:
- `MIN_POOL_INTERVAL`: Minimum time between connection creation attempts
- `MAX_POOL_INTERVAL`: Maximum time between connection creation attempts
- Minimum interval limit: Prevents connection creation storms, protecting network resources
- Maximum interval limit: Ensures timely response to connection demands
- Adaptive time-based throttling to optimize resource usage
3. **Dynamic Pool Adaptation**:
The connection pool employs a dual-adaptive mechanism to ensure optimal performance:
**A. Capacity Adjustment**
- Pool capacity dynamically adjusts based on real-time usage patterns
- If connection creation success rate is low (<20%), capacity decreases to minimize resource waste
- If connection creation success rate is high (>80%), capacity increases to accommodate higher traffic
- Gradual scaling prevents oscillation and provides stability
- Respects configured minimum and maximum capacity boundaries
- Pool capacity dynamically adjusts based on real-time usage patterns, implementing intelligent scaling
- Feedback adjustment based on connection creation success rate: contracts capacity during low success rates to reduce resource waste
- Expands capacity during high success rates to meet growing demands
- Gradual scaling prevents system oscillation, providing smooth performance transitions
- Strictly respects configured capacity boundaries, ensuring system controllability
**B. Interval Adjustment**
- Creation intervals adapt based on pool idle connection count
- When idle connections are low (<20% of capacity), intervals decrease toward min interval
- When idle connections are high (>80% of capacity), intervals increase toward max interval
- Prevents overwhelming network resources during periods of low demand
- Accelerates connection creation during high demand periods when pool is depleting
- Creation intervals adapt based on pool idle connection count in real-time
- Accelerates connection creation during low idle rates, ensuring adequate supply
- Slows creation pace during high idle rates, avoiding resource waste
- Prevents pressure on network resources during low-demand periods
- Accelerates connection creation during high-demand periods when pool is depleting, ensuring service quality
4. **Performance Optimization Strategies**:
- **Predictive Scaling**: Forecasts future demands based on historical usage patterns
- **Tiered Connection Management**: Different priority connections use different management strategies
- **Batch Operation Optimization**: Bulk creation and destruction of connections, reducing system call overhead
- **Connection Affinity**: Intelligent connection allocation based on geographic location or network topology
## Data Exchange Mechanisms
NodePass's data exchange mechanisms embody modern network programming best practices:
### High-Performance Data Transfer
1. **Zero-Copy Architecture**:
- Data transfers directly in kernel space, avoiding multiple copies in user space
- Reduces CPU overhead and memory bandwidth consumption
- Supports optimized transmission for large files and high-throughput scenarios
2. **Asynchronous I/O Model**:
- Non-blocking event-driven architecture maximizes concurrent processing capabilities
- Efficient event loops based on epoll/kqueue
- Intelligent read/write buffer management, balancing memory usage and performance
3. **Traffic Statistics and Monitoring**:
- Real-time byte-level traffic statistics, supporting precise bandwidth control
- Protocol-specific traffic analysis, facilitating performance tuning
- Connection-level performance metrics, supporting fine-grained monitoring
- Real-time tracking of active TCP and UDP connection counts for capacity planning and performance analysis
### Protocol Optimization
1. **TCP Optimization**:
- Intelligent TCP_NODELAY configuration, reducing small packet delays
- Keep-alive mechanisms ensure long connection reliability
- Adaptive selection of congestion control algorithms
2. **UDP Optimization**:
- Session-based UDP processing, supporting stateful datagram exchange
- Intelligent timeout management, balancing responsiveness and resource usage
- Datagram deduplication and out-of-order processing
## Master API Architecture
In master mode, NodePass provides a RESTful API for centralized management:
In master mode, NodePass provides a RESTful API for centralized management, embodying cloud-native architectural design principles:
### Architectural Design Philosophy
Master mode adopts a "unified management, distributed execution" architecture pattern, separating the control plane from the data plane. This design gives the system enterprise-grade manageability and observability while maintaining high-performance data transmission.
### API Components
1. **HTTP/HTTPS Server**:
- Listens on configured address and port
- Optional TLS encryption with same modes as tunnel server
- Configurable API prefix path
- Listens on configured address and port, supporting flexible network deployment
- Optional TLS encryption with same security modes as tunnel server, ensuring management channel security
- Configurable API prefix path, supporting reverse proxy and API gateway integration
2. **Instance Management**:
- In-memory registry of NodePass instances
- UID-based instance identification
- State tracking for each instance (running, stopped, etc.)
- High-performance memory-based instance registry, supporting fast queries and updates
- UID-based instance identification, ensuring global uniqueness
- State tracking for each instance (running, stopped, etc.), supporting real-time status monitoring
3. **RESTful Endpoints**:
- Standard CRUD operations for instances
- Instance control actions (start, stop, restart)
- Health status reporting
- OpenAPI specification for API documentation
- Standard CRUD operations following REST design principles
- Instance control actions (start, stop, restart), supporting remote lifecycle management
- Health status reporting, providing real-time system health information
- OpenAPI specification support, facilitating API documentation generation and client development
### Instance Lifecycle Management
1. **Instance Creation**:
- URL-based configuration similar to command line
- Dynamic initialization based on instance type
- Parameter validation before instance creation
- URL-based configuration similar to command line, reducing learning curve
- Dynamic initialization based on instance type, supporting multiple deployment modes
- Parameter validation before instance creation, ensuring configuration correctness
2. **Instance Control**:
- Start/stop/restart capabilities
- Graceful shutdown with configurable timeout
- Resource cleanup on termination
- Start/stop/restart capabilities, supporting remote operations
- Graceful shutdown with configurable timeout, ensuring data integrity
- Resource cleanup on termination, preventing resource leaks
3. **API Security**:
- TLS encryption options for API connections
- Same security modes as tunnel server
- Certificate management for HTTPS
- TLS encryption options for API connections, protecting management communication security
- Same security modes as tunnel server, unified security policies
- Certificate management support, simplifying HTTPS deployment
## System Architecture Advancement
### Layered Decoupling Design
NodePass adopts layered design principles of modern software architecture:
1. **Transport Layer Separation**:
- Complete separation of control and data channels, avoiding control information interference with data transmission
- Independent optimization for different protocols, TCP and UDP each using optimal strategies
- Multiplexing support, single tunnel carrying multiple application connections
2. **Pluggable Security Layer**:
- Modular TLS implementation, supporting flexible selection of different security levels
- Automated certificate management, reducing operational complexity
- Key rotation mechanisms, enhancing long-term security
3. **Cloud-Native Management Layer**:
- API-first design philosophy, all functions accessible through APIs
- Container-friendly configuration methods, supporting modern DevOps practices
- Stateless design, facilitating horizontal scaling
### Performance Optimization Philosophy
1. **Latency Optimization**:
- Pre-connection pools eliminate cold start latency
- Intelligent routing reduces network hops
- Batch processing reduces system call overhead
2. **Throughput Optimization**:
- Zero-copy data transmission maximizes bandwidth utilization
- Concurrent connection management supports high-concurrency scenarios
- Adaptive buffer sizing optimizes memory usage
3. **Resource Optimization**:
- Intelligent connection reuse reduces resource consumption
- Dynamic capacity adjustment adapts to load changes
- Garbage collection optimization reduces pause times
### Reliability Guarantees
1. **Fault Isolation**:
- Connection-level fault isolation, single point failures don't affect overall service
- Automatic reconnection mechanisms, transparently handling network fluctuations
- Graceful degradation strategies, ensuring core functionality under resource constraints
2. **State Management**:
- Distributed state synchronization, ensuring consistency across multiple instances
- Persistence of critical state, supporting failure recovery
- Versioned configuration management, supporting rollback operations
## NodePass Architecture Innovation Summary
### Technical Innovation Points
1. **Connection Pool Warm-up Technology**:
- Revolutionarily eliminates cold start latency in network tunnels
- Transforms traditional "connect-on-demand" to "pre-warm-and-ready"
- Significantly improves first connection response speed
2. **Separated Architecture Design**:
- Complete separation of control plane and data plane
- Independent optimization of signaling and data channels
- Achieves perfect combination of high performance and high manageability
3. **Adaptive Resource Management**:
- Intelligent scaling based on real-time load
- Predictive resource allocation strategies
- Self-healing resilient system design
### Industry-Leading Advantages
1. **Performance Advantages**:
- Zero-latency connection establishment, industry-leading response speed
- High concurrency processing capabilities, supporting enterprise-grade application scenarios
- Intelligent routing optimization, shortest path data transmission
2. **Reliability Advantages**:
- Multi-layer fault isolation and recovery mechanisms
- High availability guarantees of distributed architecture
- Graceful degradation service quality assurance
3. **Security Advantages**:
- End-to-end encryption protection
- Multi-layer security protection system
- Compliance with enterprise-grade security standards
### Applicable Scenarios and Value
1. **Enterprise Applications**:
- Service mesh for microservice architectures
- Network connections in hybrid cloud environments
- Cross-regional service access
2. **Development and Operations**:
- Rapid setup of local development environments
- Flexible configuration of test environments
- Traffic management in production environments
3. **Network Optimization**:
- Significant reduction in network latency
- Notable improvement in bandwidth utilization
- Reliable guarantee of connection stability
NodePass, through its innovative architectural design and technical implementation, provides a high-performance, high-reliability, high-security tunnel solution for modern network applications, representing the future direction of network tunnel technology.
## Next Steps

View File

@@ -67,10 +67,10 @@ docker run -d --name nodepass-server -p 10101:10101 -p 8080:8080 \
# Run in client mode
docker run -d --name nodepass-client \
-e MIN_POOL_CAPACITY=32 \
-e MAX_POOL_CAPACITY=512 \
-e NP_MIN_POOL_INTERVAL=200ms \
-e NP_SEMAPHORE_LIMIT=512 \
-p 8080:8080 \
ghcr.io/yosebyte/nodepass client://nodepass-server:10101/127.0.0.1:8080
ghcr.io/yosebyte/nodepass "client://nodepass-server:10101/127.0.0.1:8080?min=32&max=512"
```
### Option 5: Using Management Script (Linux Only)

View File

@@ -69,12 +69,13 @@ This guide helps you diagnose and resolve common issues you might encounter when
- Check file descriptor limits with `ulimit -n` on Linux/macOS
3. **Timeout Configuration**
- Adjust `UDP_READ_TIMEOUT` if using UDP with slow response times
- Consider adjusting TCP keepalive settings at the OS level for long-lived connections
- Adjust `NP_UDP_DIAL_TIMEOUT` if using UDP with slow response times
- Increase `read` parameter in URL for long-running transfers (default: 10m)
- Consider adjusting `NP_TCP_DIAL_TIMEOUT` for unstable network conditions
4. **Overloaded Server**
- Check server logs for signs of connection overload
- Adjust `MAX_POOL_CAPACITY` and `SEMAPHORE_LIMIT` to handle the load
- Adjust `max` parameter and `NP_SEMAPHORE_LIMIT` to handle the load
- Consider scaling horizontally with multiple NodePass instances
## Certificate Issues
@@ -135,9 +136,9 @@ This guide helps you diagnose and resolve common issues you might encounter when
**Possible Causes and Solutions**:
1. **Pool Configuration**
- Increase `MIN_POOL_CAPACITY` to have more connections ready
- Increase `min` parameter to have more connections ready
- Decrease `MIN_POOL_INTERVAL` to create connections faster
- Adjust `SEMAPHORE_LIMIT` if connection queue is backing up
- Adjust `NP_SEMAPHORE_LIMIT` if connection queue is backing up
2. **Network Path**
- Check for network congestion or high-latency links
@@ -162,7 +163,7 @@ This guide helps you diagnose and resolve common issues you might encounter when
1. **Pool Thrashing**
- If pool is constantly creating and destroying connections, adjust timings
- Increase `MIN_POOL_INTERVAL` to reduce connection creation frequency
- Find a good balance for `MIN_POOL_CAPACITY` and `MAX_POOL_CAPACITY`
- Find a good balance for `min` and `max` pool parameters
2. **Excessive Logging**
- Reduce log level from debug to info or warn for production use
@@ -184,12 +185,12 @@ This guide helps you diagnose and resolve common issues you might encounter when
**Possible Causes and Solutions**:
1. **Connection Leaks**
- Ensure `SHUTDOWN_TIMEOUT` is sufficient to properly close connections
- Ensure `NP_SHUTDOWN_TIMEOUT` is sufficient to properly close connections
- Check for proper error handling in custom scripts or management code
- Monitor connection counts with system tools like `netstat`
2. **Pool Size Issues**
- If `MAX_POOL_CAPACITY` is very large, memory usage will be higher
- If `max` parameter is very large, memory usage will be higher
- Monitor actual pool usage vs. configured capacity
- Adjust capacity based on actual concurrent connection needs
@@ -210,7 +211,8 @@ This guide helps you diagnose and resolve common issues you might encounter when
- Default of 8192 bytes may be too small for some applications
2. **Timeout Issues**
- If responses are slow, increase `UDP_READ_TIMEOUT`
- If responses are slow, increase `NP_UDP_DIAL_TIMEOUT`
- Adjust `read` parameter for longer session timeouts
- For applications with variable response times, find an optimal balance
3. **High Packet Rate**
@@ -277,6 +279,49 @@ This guide helps you diagnose and resolve common issues you might encounter when
- Ensure the NodePass master has sufficient permissions to create processes
- Check file system permissions for any referenced certificates or keys
## Data Recovery
### Master State File Corruption
**Symptoms**: Master mode fails to start showing state file corruption errors, or instance data is lost.
**Possible Causes and Solutions**:
1. **Recovery using automatic backup file**
- NodePass automatically creates backup file `nodepass.gob.backup` every hour
- Stop the NodePass master service
- Copy backup file as main file: `cp nodepass.gob.backup nodepass.gob`
- Restart the master service
2. **Manual state file recovery**
```bash
# Stop NodePass service
pkill nodepass
# Backup corrupted file (optional)
mv nodepass.gob nodepass.gob.corrupted
# Use backup file
cp nodepass.gob.backup nodepass.gob
# Restart service
nodepass "master://0.0.0.0:9090?log=info"
```
3. **When backup file is also corrupted**
- Remove corrupted state files: `rm nodepass.gob*`
- Restart master, which will create new state file
- Need to reconfigure all instances and settings
4. **Preventive backup recommendations**
- Regularly backup `nodepass.gob` to external storage
- Adjust backup frequency: set environment variable `export NP_RELOAD_INTERVAL=30m`
- Monitor state file size, abnormal growth may indicate issues
**Best Practices**:
- In production environments, recommend regularly backing up `nodepass.gob` to different storage locations
- Use configuration management tools to save text-form backups of instance configurations
## Next Steps
If you encounter issues not covered in this guide:

View File

@@ -7,7 +7,7 @@ NodePass creates tunnels with an unencrypted TCP control channel and configurabl
The general syntax for NodePass commands is:
```bash
nodepass "<core>://<tunnel_addr>/<target_addr>?log=<level>&tls=<mode>&crt=<cert_file>&key=<key_file>&min=<min_pool>&max=<max_pool>"
nodepass "<core>://<tunnel_addr>/<target_addr>?log=<level>&tls=<mode>&crt=<cert_file>&key=<key_file>&min=<min_pool>&max=<max_pool>&mode=<run_mode>&read=<timeout>&rate=<mbps>&proxy=<mode>"
```
Where:
@@ -19,8 +19,12 @@ Where:
Common query parameters:
- `log=<level>`: Log verbosity level (`none`, `debug`, `info`, `warn`, `error`, or `event`)
- `min=<min_pool>`: Minimum connection pool capacity (default: 64, client mode only)
- `max=<max_pool>`: Maximum connection pool capacity (default: 1024, client mode only)
- `min=<min_pool>`: Minimum connection pool capacity (default: 64, set by client)
- `max=<max_pool>`: Maximum connection pool capacity (default: 1024, set by server and delivered to client)
- `mode=<run_mode>`: Run mode control (`0`, `1`, or `2`) - controls operational behavior
- `read=<timeout>`: Data read timeout duration (default: 10m, supports time units like 30s, 5m, 30m, etc.)
- `rate=<mbps>`: Bandwidth rate limit in Mbps (default: 0 for unlimited)
- `proxy=<mode>`: PROXY protocol support (default: `0`, `1` enables PROXY protocol v1 header transmission)
TLS-related parameters (server/master modes only):
- `tls=<mode>`: TLS security level for data channels (`0`, `1`, or `2`)
@@ -36,7 +40,7 @@ NodePass offers three complementary operating modes to suit various deployment s
Server mode establishes tunnel control channels and supports bidirectional data flow forwarding.
```bash
nodepass "server://<tunnel_addr>/<target_addr>?log=<level>&tls=<mode>&crt=<cert_file>&key=<key_file>"
nodepass "server://<tunnel_addr>/<target_addr>?log=<level>&tls=<mode>&crt=<cert_file>&key=<key_file>&max=<max_pool>&mode=<run_mode>&read=<timeout>&rate=<mbps>&proxy=<mode>"
```
#### Parameters
@@ -50,18 +54,31 @@ nodepass "server://<tunnel_addr>/<target_addr>?log=<level>&tls=<mode>&crt=<cert_
- `2`: Custom certificate (requires `crt` and `key` parameters)
- `crt`: Path to certificate file (required when `tls=2`)
- `key`: Path to private key file (required when `tls=2`)
- `max`: Maximum connection pool capacity (default: 1024)
- `mode`: Run mode control for data flow direction
- `0`: Automatic detection (default) - attempts local binding first, falls back if unavailable
- `1`: Force reverse mode - server binds to target address locally and receives traffic
- `2`: Force forward mode - server connects to remote target address
- `read`: Data read timeout duration (default: 10m, supports time units like 30s, 5m, 30m, etc.)
- `rate`: Bandwidth rate limit (default: 0 means no limit)
- `proxy`: PROXY protocol support (default: `0`, `1` enables PROXY protocol v1 header before data transfer)
#### How Server Mode Works
In server mode, NodePass supports two data flow directions:
Server mode supports automatic mode detection or forced mode selection through the `mode` parameter:
**Mode 1: Server Receives Traffic** (target_addr is local address)
**Mode 0: Automatic Detection** (default)
- Attempts to bind to `target_addr` locally first
- If successful, operates in reverse mode (server receives traffic)
- If binding fails, operates in forward mode (server sends traffic)
**Mode 1: Reverse Mode** (server receives traffic)
1. Listens for TCP tunnel connections (control channel) on `tunnel_addr`
2. Listens for incoming TCP and UDP traffic on `target_addr`
2. Binds to and listens for incoming TCP and UDP traffic on `target_addr`
3. When a connection arrives at `target_addr`, it signals the connected client through the control channel
4. Creates a data channel for each connection with the specified TLS encryption level
**Mode 2: Server Sends Traffic** (target_addr is remote address)
**Mode 2: Forward Mode** (server sends traffic)
1. Listens for TCP tunnel connections (control channel) on `tunnel_addr`
2. Waits for clients to listen locally and receive connections through the tunnel
3. Establishes connections to remote `target_addr` and forwards data
@@ -69,14 +86,14 @@ In server mode, NodePass supports two data flow directions:
#### Examples
```bash
# No TLS encryption for data channel - Server receives mode
# Automatic mode detection with no TLS encryption
nodepass "server://10.1.0.1:10101/10.1.0.1:8080?log=debug&tls=0"
# Self-signed certificate (auto-generated) - Server sends mode
nodepass "server://10.1.0.1:10101/192.168.1.100:8080?log=debug&tls=1"
# Force reverse mode with self-signed certificate
nodepass "server://10.1.0.1:10101/10.1.0.1:8080?log=debug&tls=1&mode=1"
# Custom domain certificate - Server receives mode
nodepass "server://10.1.0.1:10101/10.1.0.1:8080?log=debug&tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem"
# Force forward mode with custom certificate
nodepass "server://10.1.0.1:10101/192.168.1.100:8080?log=debug&tls=2&mode=2&crt=/path/to/cert.pem&key=/path/to/key.pem"
```
### Client Mode
@@ -84,7 +101,7 @@ nodepass "server://10.1.0.1:10101/10.1.0.1:8080?log=debug&tls=2&crt=/path/to/cer
Client mode connects to a NodePass server and supports bidirectional data flow forwarding.
```bash
nodepass "client://<tunnel_addr>/<target_addr>?log=<level>&min=<min_pool>&max=<max_pool>"
nodepass "client://<tunnel_addr>/<target_addr>?log=<level>&min=<min_pool>&mode=<run_mode>&read=<timeout>&rate=<mbps>&proxy=<mode>"
```
#### Parameters
@@ -93,47 +110,59 @@ nodepass "client://<tunnel_addr>/<target_addr>?log=<level>&min=<min_pool>&max=<m
- `target_addr`: The destination address for business data with bidirectional flow support (e.g., 127.0.0.1:8080)
- `log`: Log level (debug, info, warn, error, event)
- `min`: Minimum connection pool capacity (default: 64)
- `max`: Maximum connection pool capacity (default: 1024)
- `mode`: Run mode control for client behavior
- `0`: Automatic detection (default) - attempts local binding first, falls back to handshake mode
- `1`: Force single-end forwarding mode - local proxy with connection pooling
- `2`: Force dual-end handshake mode - requires server coordination
- `read`: Data read timeout duration (default: 10m, supports time units like 30s, 5m, 30m, etc.)
- `rate`: Bandwidth rate limit (default: 0 means no limit)
- `proxy`: PROXY protocol support (default: `0`, `1` enables PROXY protocol v1 header before data transfer)
#### How Client Mode Works
In client mode, NodePass supports three operating modes:
Client mode supports automatic mode detection or forced mode selection through the `mode` parameter:
**Mode 1: Client Single-End Forwarding** (when tunnel address is local)
**Mode 0: Automatic Detection** (default)
- Attempts to bind to `tunnel_addr` locally first
- If successful, operates in single-end forwarding mode
- If binding fails, operates in dual-end handshake mode
**Mode 1: Single-End Forwarding Mode**
1. Listens for TCP and UDP connections on the local tunnel address
2. Uses connection pooling technology to pre-establish TCP connections to target address, eliminating connection latency
3. Directly forwards received traffic to the target address with high performance
4. No handshake with server required, enables point-to-point direct forwarding
5. Suitable for local proxy and simple forwarding scenarios
**Mode 2: Client Receives Traffic** (when server sends traffic)
1. Connects to the server's TCP tunnel endpoint (control channel)
2. Listens locally and waits for connections through the tunnel
3. Establishes connections to local `target_addr` and forwards data
**Mode 2: Dual-End Handshake Mode**
- **Client Receives Traffic** (when server sends traffic)
1. Connects to the server's TCP tunnel endpoint (control channel)
2. Listens locally and waits for connections through the tunnel
3. Establishes connections to local `target_addr` and forwards data
**Mode 3: Client Sends Traffic** (when server receives traffic)
1. Connects to the server's TCP tunnel endpoint (control channel)
2. Listens for signals from the server through this control channel
3. When a signal is received, establishes a data connection with the TLS security level specified by the server
4. Creates a connection to `target_addr` and forwards traffic
- **Client Sends Traffic** (when server receives traffic)
1. Connects to the server's TCP tunnel endpoint (control channel)
2. Listens for signals from the server through this control channel
3. When a signal is received, establishes a data connection with the TLS security level specified by the server
4. Creates a connection to `target_addr` and forwards traffic
#### Examples
```bash
# Client single-end forwarding mode - Local proxy listening on port 1080, forwarding to target server
nodepass client://127.0.0.1:1080/target.example.com:8080?log=debug
# Automatic mode detection - Local proxy listening on port 1080, forwarding to target server
nodepass "client://127.0.0.1:1080/target.example.com:8080?log=debug"
# Connect to a NodePass server and adopt its TLS security policy - Client sends mode
nodepass client://server.example.com:10101/127.0.0.1:8080
# Force single-end forwarding mode - High performance local proxy
nodepass "client://127.0.0.1:1080/target.example.com:8080?mode=1&log=debug"
# Connect with debug logging - Client receives mode
nodepass client://server.example.com:10101/192.168.1.100:8080?log=debug
# Force dual-end handshake mode - Connect to NodePass server and adopt its TLS security policy
nodepass "client://server.example.com:10101/127.0.0.1:8080?mode=2"
# Custom connection pool capacity - High performance configuration
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=128&max=4096"
# Connect with debug logging and custom connection pool capacity
nodepass "client://server.example.com:10101/192.168.1.100:8080?log=debug&min=128"
# Resource-constrained configuration - Small connection pool
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=16&max=512&log=info"
# Resource-constrained configuration with forced mode
nodepass "client://server.example.com:10101/127.0.0.1:8080?mode=2&min=16&log=info"
```
### Master Mode (API)
@@ -168,11 +197,16 @@ In master mode, NodePass:
All endpoints are relative to the configured prefix (default: `/api`):
**Protected Endpoints (Require API Key):**
- `GET {prefix}/v1/instances` - List all instances
- `POST {prefix}/v1/instances` - Create a new instance with JSON body: `{"url": "server://0.0.0.0:10101/0.0.0.0:8080"}`
- `GET {prefix}/v1/instances/{id}` - Get instance details
- `PATCH {prefix}/v1/instances/{id}` - Update instance with JSON body: `{"action": "start|stop|restart"}`
- `DELETE {prefix}/v1/instances/{id}` - Delete instance
- `GET {prefix}/v1/events` - Server-Sent Events stream (SSE)
- `GET {prefix}/v1/info` - Get system information
**Public Endpoints (No API Key Required):**
- `GET {prefix}/v1/openapi.json` - OpenAPI specification
- `GET {prefix}/v1/docs` - Swagger UI documentation
@@ -196,28 +230,53 @@ nodepass "master://0.0.0.0:9090?log=info&tls=2&crt=/path/to/cert.pem&key=/path/t
### Creating and Managing via API
You can use standard HTTP requests to manage NodePass instances through the master API:
NodePass master mode provides RESTful API for instance management, and all API requests require authentication using an API Key.
#### API Key Retrieval
When starting master mode, the system automatically generates an API Key and displays it in the logs:
```bash
# Create and manage instances via API (using default prefix)
# Start master mode
nodepass "master://0.0.0.0:9090?log=info"
# The log output will show:
# INFO: API Key created: abc123def456...
```
#### API Request Examples
All protected API endpoints require the `X-API-Key` header:
```bash
# Get API Key (assume: abc123def456789)
# Create instance via API (using default prefix)
curl -X POST http://localhost:9090/api/v1/instances \
-H "Content-Type: application/json" \
-H "X-API-Key: abc123def456789" \
-d '{"url":"server://0.0.0.0:10101/0.0.0.0:8080?tls=1"}'
# Using custom prefix
curl -X POST http://localhost:9090/admin/v1/instances \
-H "Content-Type: application/json" \
-H "X-API-Key: abc123def456789" \
-d '{"url":"server://0.0.0.0:10101/0.0.0.0:8080?tls=1"}'
# List all running instances
curl http://localhost:9090/api/v1/instances
curl http://localhost:9090/api/v1/instances \
-H "X-API-Key: abc123def456789"
# Control an instance (replace {id} with actual instance ID)
curl -X PUT http://localhost:9090/api/v1/instances/{id} \
-H "Content-Type: application/json" \
curl -X PATCH http://localhost:9090/api/v1/instances/{id} \
-H "X-API-Key: abc123def456789" \
-d '{"action":"restart"}'
```
#### Public Endpoints
The following endpoints do not require API Key authentication:
- `GET {prefix}/v1/openapi.json` - OpenAPI specification
- `GET {prefix}/v1/docs` - Swagger UI documentation
## Bidirectional Data Flow Explanation
NodePass supports flexible bidirectional data flow configuration:
@@ -228,12 +287,12 @@ NodePass supports flexible bidirectional data flow configuration:
- **No Server Required**: Operates independently without server handshake
- **Use Case**: Local proxy, simple port forwarding, testing environments, high-performance forwarding
### Server Receives Mode (dataFlow: "-")
### Server Receives Mode
- **Server**: Listens for incoming connections on target_addr, forwards through tunnel to client
- **Client**: Connects to local target_addr to provide services
- **Use Case**: Expose internal services to external access
### Server Sends Mode (dataFlow: "+")
### Server Sends Mode
- **Server**: Connects to remote target_addr to fetch data, sends through tunnel to client
- **Client**: Listens locally to receive connections from server
- **Use Case**: Access remote services through tunnel proxy
@@ -270,10 +329,13 @@ The handshake process between client and server is as follows:
1. **Client Connection**: Client connects to the server's tunnel address
2. **Key Authentication**: Client sends XOR-encrypted tunnel key
3. **Server Verification**: Server decrypts and verifies if the key matches
4. **Configuration Sync**: Upon successful verification, server sends tunnel configuration (including TLS mode)
4. **Configuration Sync**: Upon successful verification, server sends tunnel configuration including:
- Data flow direction
- Maximum connection pool capacity
- TLS security mode
5. **Connection Established**: Handshake complete, data transmission begins
This design ensures that only clients with the correct key can establish tunnel connections.
This design ensures that only clients with the correct key can establish tunnel connections, while allowing the server to centrally manage connection pool capacity.
## Next Steps

View File

@@ -1,9 +1,3 @@
# NodePass API参考
## 概述
NodePass在主控模式Master Mode下提供了RESTful API使前端应用能够以编程方式进行控制和集成。本节提供API端点、集成模式和最佳实践的全面文档。
# NodePass API 参考
## 概述
@@ -48,6 +42,7 @@ nodepass "master://0.0.0.0:9090/admin?log=info&tls=1"
| `/instances/{id}` | DELETE | 删除实例 |
| `/events` | GET | SSE 实时事件流 |
| `/info` | GET | 获取主控服务信息 |
| `/tcping` | GET | TCP连接测试 |
| `/openapi.json` | GET | OpenAPI 规范 |
| `/docs` | GET | Swagger UI 文档 |
@@ -55,7 +50,7 @@ nodepass "master://0.0.0.0:9090/admin?log=info&tls=1"
API Key 认证默认启用,首次启动自动生成并保存在 `nodepass.gob`
- 受保护接口:`/instances``/instances/{id}``/events``/info`
- 受保护接口:`/instances``/instances/{id}``/events``/info``/tcping`
- 公共接口:`/openapi.json``/docs`
- 认证方式:请求头加 `X-API-Key: <key>`
- 重置 KeyPATCH `/instances/********`body `{ "action": "restart" }`
@@ -70,16 +65,21 @@ API Key 认证默认启用,首次启动自动生成并保存在 `nodepass.gob`
"status": "running|stopped|error",
"url": "...",
"restart": true,
"mode": 0,
"ping": 0,
"pool": 0,
"tcps": 0,
"udps": 0,
"tcprx": 0,
"tcptx": 0,
"udprx": 0,
"udptx": 0,
"pool": 0, // 健康检查池连接数
"ping": 0 // 健康检查延迟(ms)
"udptx": 0
}
```
- `pool`/`ping`:健康检查数据,仅 debug 模式下统计
- `mode`:实例运行模式
- `ping`/`pool`:健康检查数据
- `tcps`/`udps`:当前活动连接数统计
- `tcprx`/`tcptx`/`udprx`/`udptx`:累计流量统计
- `restart`:自启动策略
@@ -87,7 +87,20 @@ API Key 认证默认启用,首次启动自动生成并保存在 `nodepass.gob`
- 服务端:`server://<bind_addr>:<bind_port>/<target_host>:<target_port>?<参数>`
- 客户端:`client://<server_host>:<server_port>/<local_host>:<local_port>?<参数>`
- 支持参数:`tls``log``crt``key`
- 支持参数:`log``tls``crt``key``min``max``mode``read``rate`
### URL 查询参数
- `log`:日志级别(`none``debug``info``warn``error``event`
- `tls`TLS加密模式`0``1``2`- 仅服务端/主控模式
- `crt`/`key`:证书/密钥文件路径(当`tls=2`时)
- `min`/`max`:连接池容量(`min`由客户端设置,`max`由服务端设置并在握手时传递给客户端)
- `mode`:运行模式控制(`0``1``2`- 控制操作行为
- 对于服务端:`0`=自动,`1`=反向模式,`2`=正向模式
- 对于客户端:`0`=自动,`1`=单端转发,`2`=双端握手
- `read`数据读取超时时长如1h、30m、15s
- `rate`带宽速率限制单位Mbps0=无限制)
- `proxy`PROXY协议支持`0``1`- 启用后在数据传输前发送PROXY协议v1头部
### 实时事件流SSE
@@ -142,6 +155,44 @@ GET /events
5. `shutdown` - 主控服务即将关闭时发送,通知前端应用关闭连接
6. `log` - 实例产生新日志内容时发送,包含日志文本
#### 处理实例日志
在前端应用中,可以通过监听`log`事件来处理实例日志。以下是一个示例函数用于将日志追加到特定实例的UI中
```javascript
// 处理日志事件
function appendLogToInstanceUI(instanceId, logText) {
// 找到或创建日志容器
let logContainer = document.getElementById(`logs-${instanceId}`);
if (!logContainer) {
logContainer = document.createElement('div');
logContainer.id = `logs-${instanceId}`;
document.getElementById('instance-container').appendChild(logContainer);
}
// 创建新的日志条目
const logEntry = document.createElement('div');
logEntry.className = 'log-entry';
// 可以在这里解析ANSI颜色代码或格式化日志
logEntry.textContent = logText;
// 添加到容器
logContainer.appendChild(logEntry);
// 滚动到最新日志
logContainer.scrollTop = logContainer.scrollHeight;
}
```
日志集成最佳实践:
1. **缓冲管理**:限制日志条目的数量,以防止内存问题
2. **ANSI颜色解析**解析日志中的ANSI颜色代码以提高可读性
3. **过滤选项**:提供按严重性或内容过滤日志的选项
4. **搜索功能**:允许用户在实例日志中搜索
5. **日志持久化**:可选地将日志保存到本地存储,以便在页面刷新后查看
#### JavaScript客户端实现
以下是JavaScript前端消费SSE端点的示例
@@ -261,36 +312,6 @@ function connectToEventSourceWithApiKey(apiKey) {
}
```
#### 处理实例日志
新增的`log`事件类型允许实时接收和显示实例的日志输出。这对于监控和调试非常有用:
```javascript
// 处理日志事件
function appendLogToInstanceUI(instanceId, logText) {
// 找到或创建日志容器
let logContainer = document.getElementById(`logs-${instanceId}`);
if (!logContainer) {
logContainer = document.createElement('div');
logContainer.id = `logs-${instanceId}`;
document.getElementById('instance-container').appendChild(logContainer);
}
// 创建新的日志条目
const logEntry = document.createElement('div');
logEntry.className = 'log-entry';
// 可以在这里解析ANSI颜色代码或格式化日志
logEntry.textContent = logText;
// 添加到容器
logContainer.appendChild(logEntry);
// 滚动到最新日志
logContainer.scrollTop = logContainer.scrollHeight;
}
```
#### SSE相比轮询的优势
使用SSE监控实例状态比传统轮询提供多种优势
@@ -327,7 +348,20 @@ NodePass主控模式现在支持使用gob序列化格式进行实例持久化。
- 启用自启动策略的实例在主控重启时自动启动
- 重启后无需手动重新注册
**注意:** 虽然实例配置现在已经持久化,前端应用仍应保留自己的实例配置记录作为备份策略。
#### 自动备份功能
NodePass主控模式提供自动备份功能定期备份状态文件以防止数据丢失
- **备份文件**:自动创建 `nodepass.gob.backup` 备份文件
- **备份周期**每1小时自动备份一次可通过环境变量 `NP_RELOAD_INTERVAL` 配置)
- **备份策略**:使用单一备份文件,新备份会覆盖旧备份
- **备份内容**:包含所有实例配置、状态、自启动策略和统计数据
- **故障恢复**:当主文件损坏时,可手动使用备份文件恢复
- **自动启动**:备份功能随主控服务自动启动,无需额外配置
备份文件位置:与主状态文件 `nodepass.gob` 相同目录下的 `nodepass.gob.backup`
**注意:** 虽然实例配置现在已经持久化并自动备份,前端应用仍应保留自己的实例配置记录作为额外的备份策略。
### 实例生命周期管理
@@ -502,7 +536,7 @@ NodePass主控模式现在支持使用gob序列化格式进行实例持久化。
method: 'PATCH',
headers: {
'Content-Type': 'application/json',
'X-API-Key': apiKey // 如果启用了API Key
'X-API-Key': apiKey
},
body: JSON.stringify({ restart: enableAutoStart })
});
@@ -511,13 +545,12 @@ NodePass主控模式现在支持使用gob序列化格式进行实例持久化。
return data.success;
}
// 组合操作:控制实例并更新自启动策略
async function controlInstanceWithAutoStart(instanceId, action, enableAutoStart) {
const response = await fetch(`${API_URL}/instances/${instanceId}`, {
method: 'PATCH',
headers: {
'Content-Type': 'application/json',
'X-API-Key': apiKey // 如果启用了API Key
'X-API-Key': apiKey
},
body: JSON.stringify({
action: action,
@@ -529,13 +562,12 @@ NodePass主控模式现在支持使用gob序列化格式进行实例持久化。
return data.success;
}
// 组合操作:同时更新别名、控制实例和自启动策略
async function updateInstanceComplete(instanceId, alias, action, enableAutoStart) {
const response = await fetch(`${API_URL}/instances/${instanceId}`, {
method: 'PATCH',
headers: {
'Content-Type': 'application/json',
'X-API-Key': apiKey // 如果启用了API Key
'X-API-Key': apiKey
},
body: JSON.stringify({
alias: alias,
@@ -761,16 +793,17 @@ API响应中的实例对象包含以下字段
"status": "running", // 实例状态running、stopped 或 error
"url": "server://...", // 实例配置URL
"restart": true, // 自启动策略
"tcprx": 1024, // TCP接收字节数
"tcptx": 2048, // TCP发送字节数
"udprx": 512, // UDP接收字节数
"udptx": 256 // UDP发送字节数
"mode": 0, // 运行模式
"tcprx": 1024, // TCP接收字节数
"tcptx": 2048, // TCP发送字节数
"udprx": 512, // UDP接收字节数
"udptx": 256 // UDP发送字节数
}
```
**注意:**
- `alias` 字段为可选,如果未设置则为空字符串
- 流量统计字段tcprx、tcptx、udprx、udptx仅在启用调试模式时有效
- `mode` 字段表示实例当前的运行模式
- `restart` 字段控制实例的自启动行为
## 系统信息端点
@@ -791,15 +824,25 @@ GET /info
```json
{
"os": "linux", // 操作系统类型
"arch": "amd64", // 系统架构
"ver": "1.2.0", // NodePass版本
"name": "example.com", // 隧道主机名
"uptime": 11525, // API运行时间
"log": "info", // 日志级别
"tls": "1", // TLS启用状态
"crt": "/path/to/cert", // 证书路径
"key": "/path/to/key" // 密钥路径
"os": "linux", // 操作系统类型
"arch": "amd64", // 系统架构
"cpu": 45, // CPU使用率百分比仅Linux系统
"mem_total": 8589934592, // 内存容量字节仅Linux系统
"mem_free": 2684354560, // 内存可用字节仅Linux系统
"swap_total": 3555328000, // 交换区总量字节仅Linux系统
"swap_free": 3555328000, // 交换区可用字节仅Linux系统
"netrx": 1048576000, // 网络接收字节数累计值仅Linux
"nettx": 2097152000, // 网络发送字节数累计值仅Linux
"diskr": 4194304000, // 磁盘读取字节数累计值仅Linux
"diskw": 8388608000, // 磁盘写入字节数累计值仅Linux
"sysup": 86400, // 系统运行时间仅Linux
"ver": "1.2.0", // NodePass版本
"name": "example.com", // 隧道主机名
"uptime": 11525, // API运行时间
"log": "info", // 日志级别
"tls": "1", // TLS启用状态
"crt": "/path/to/cert", // 证书路径
"key": "/path/to/key" // 密钥路径
}
```
@@ -818,15 +861,42 @@ async function getSystemInfo() {
return await response.json();
}
// 显示服务运行时间
function displayServiceUptime() {
// 显示服务运行时间和系统资源使用情况
function displaySystemStatus() {
getSystemInfo().then(info => {
console.log(`服务已运行: ${info.uptime} 秒`);
// 也可以格式化为更友好的显示
// 格式化运行时间为更友好的显示
const hours = Math.floor(info.uptime / 3600);
const minutes = Math.floor((info.uptime % 3600) / 60);
const seconds = info.uptime % 60;
console.log(`服务已运行: ${hours}小时${minutes}分${seconds}秒`);
// 显示系统资源使用情况仅Linux系统
if (info.os === 'linux') {
if (info.cpu !== -1) {
console.log(`CPU使用率: ${info.cpu}%`);
}
if (info.mem_total > 0) {
const memUsagePercent = ((info.mem_total - info.mem_free) / info.mem_total * 100).toFixed(1);
console.log(`内存使用率: ${memUsagePercent}% (${(info.mem_free / 1024 / 1024 / 1024).toFixed(1)}GB 可用,共 ${(info.mem_total / 1024 / 1024 / 1024).toFixed(1)}GB)`);
}
if (info.swap_total > 0) {
const swapUsagePercent = ((info.swap_total - info.swap_free) / info.swap_total * 100).toFixed(1);
console.log(`交换区使用率: ${swapUsagePercent}% (${(info.swap_free / 1024 / 1024 / 1024).toFixed(1)}GB 可用,共 ${(info.swap_total / 1024 / 1024 / 1024).toFixed(1)}GB)`);
}
} else {
console.log('CPU、内存、交换区、网络I/O、磁盘I/O和系统运行时间监控功能仅在Linux系统上可用');
}
// 显示网络I/O统计累计值
if (info.os === 'linux') {
console.log(`网络接收: ${(info.netrx / 1024 / 1024).toFixed(2)} MB累计`);
console.log(`网络发送: ${(info.nettx / 1024 / 1024).toFixed(2)} MB累计`);
console.log(`磁盘读取: ${(info.diskr / 1024 / 1024).toFixed(2)} MB累计`);
console.log(`磁盘写入: ${(info.diskw / 1024 / 1024).toFixed(2)} MB累计`);
console.log(`系统运行时间: ${Math.floor(info.sysup / 3600)}小时`);
}
});
}
```
@@ -837,6 +907,15 @@ function displayServiceUptime() {
- **版本验证**:在部署更新后检查版本号
- **运行时间监控**:监控运行时间以检测意外重启
- **日志级别验证**:确认当前日志级别符合预期
- **资源监控**在Linux系统上监控CPU、内存、交换区、网络I/O、磁盘I/O使用情况以确保最佳性能
- CPU使用率通过解析`/proc/stat`计算(非空闲时间百分比)
- 内存信息通过解析`/proc/meminfo`获取(总量和可用量,单位为字节)
- 交换区信息通过解析`/proc/meminfo`获取(总量和可用量,单位为字节)
- 网络I/O通过解析`/proc/net/dev`计算(累计字节数,排除虚拟接口)
- 磁盘I/O通过解析`/proc/diskstats`计算(累计字节数,仅统计主设备)
- 系统运行时间通过解析`/proc/uptime`获取
- 值为-1或0表示系统信息不可用非Linux系统
- 网络和磁盘I/O字段提供的是累计值前端应用需要存储历史数据并计算差值来得到实时速率字节/秒)
## API端点文档
@@ -992,7 +1071,23 @@ await fetch(`${API_URL}/instances/abc123`, {
#### GET /info
- **描述**:获取主控服务信息
- **认证**需要API Key
- **响应**:包含系统信息、版本、运行时间等
- **响应**:包含系统信息、版本、运行时间、CPU和RAM使用率
#### GET /tcping
- **描述**TCP连接测试检测目标地址的连通性和延迟
- **认证**需要API Key
- **参数**
- `target`(必需):目标地址,格式为 `host:port`
- **响应**
```json
{
"target": "example.com:80",
"connected": true,
"latency": 45,
"error": null
}
```
- **示例**`GET /api/tcping?target=fast.com:443`
#### GET /openapi.json
- **描述**获取OpenAPI 3.1.1规范
@@ -1015,7 +1110,7 @@ server://<bind_address>:<bind_port>/<target_host>:<target_port>?<parameters>
示例:
- `server://0.0.0.0:8080/localhost:3000` - 在8080端口监听转发到本地3000端口
- `server://0.0.0.0:9090/localhost:8080?tls=1` - 启用TLS的服务器
- `server://0.0.0.0:9090/localhost:8080?tls=1&mode=1` - 启用TLS的服务器,强制反向模式
#### 客户端模式 (Client Mode)
```
@@ -1024,13 +1119,19 @@ client://<server_host>:<server_port>/<local_host>:<local_port>?<parameters>
示例:
- `client://example.com:8080/localhost:3000` - 连接到远程服务器本地监听3000端口
- `client://vpn.example.com:443/localhost:22?tls=1` - 通过TLS连接到VPN服务器
- `client://remote.example.com:443/localhost:22?mode=2&min=32` - 通过远程服务器,强制双端模式
#### 支持的参数
| 参数 | 描述 | 值 | 默认值 |
|------|------|----|----|
| `tls` | TLS加密级别 | `0`(无), `1`(自签名), `2`(证书) | `0` |
| `log` | 日志级别 | `trace`, `debug`, `info`, `warn`, `error` | `info` |
| `crt` | 证书路径 | 文件路径 | 无 |
| `key` | 私钥路径 | 文件路径 | 无 |
| 参数 | 描述 | 值 | 默认值 | 适用范围 |
|------|------|----|----|---------|
| `log` | 日志级别 | `none`, `debug`, `info`, `warn`, `error`, `event` | `info` | 两者 |
| `tls` | TLS加密级别 | `0`(无), `1`(自签名), `2`(证书) | `0` | 仅服务器 |
| `crt` | 证书路径 | 文件路径 | 无 | 仅服务器 |
| `key` | 私钥路径 | 文件路径 | 无 | 仅服务器 |
| `mode` | 运行模式控制 | `0`(自动), `1`(强制模式1), `2`(强制模式2) | `0` | 两者 |
| `min` | 最小连接池容量 | 整数 > 0 | `64` | 仅客户端双端握手模式 |
| `max` | 最大连接池容量 | 整数 > 0 | `1024` | 双端握手模式 |
| `read` | 读取超时时间 | 时间长度 (如 `10m`, `30s`, `1h`) | `10m` | 两者 |
| `rate` | 带宽速率限制 | 整数 (Mbps), 0=无限制 | `0` | 两者 |
| `proxy` | PROXY协议支持 | `0`(禁用), `1`(启用) | `0` | 两者 |

View File

@@ -47,19 +47,169 @@ TLS模式2示例自定义证书
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem"
```
## 连接池容量参数
## 运行模式控制
连接池容量可以通过URL查询参数进行配置
NodePass支持通过`mode`查询参数配置运行模式,以控制客户端和服务端实例的行为。这在自动模式检测不适合的部署场景中提供了灵活性。
- `min`: 最小连接池容量(默认: 64
- `max`: 最大连接池容量(默认: 1024
### 客户端模式控制
对于客户端实例,`mode`参数控制连接策略:
- **模式0**(默认):自动模式检测
- 首先尝试本地绑定隧道地址
- 如果成功,以单端转发模式运行
- 如果绑定失败,以双端握手模式运行
- **模式1**:强制单端转发模式
- 本地绑定隧道地址并直接转发流量到目标
- 使用直接连接建立实现高性能
- 无需与服务器握手
- **模式2**:强制双端握手模式
- 始终连接到远程服务器建立隧道
- 数据传输前需要与服务器握手
- 支持双向数据流协调
示例:
```bash
# 设置最小连接池为32最大为4096
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=32&max=4096"
# 强制客户端以单端转发模式运行
nodepass "client://127.0.0.1:1080/target.example.com:8080?mode=1"
# 强制客户端以双端握手模式运行
nodepass "client://server.example.com:10101/127.0.0.1:8080?mode=2"
```
### 服务端模式控制
对于服务端实例,`mode`参数控制数据流方向:
- **模式0**(默认):自动流向检测
- 首先尝试本地绑定目标地址
- 如果成功,以反向模式运行(服务器接收流量)
- 如果绑定失败,以正向模式运行(服务器发送流量)
- **模式1**:强制反向模式
- 服务器本地绑定目标地址并接收流量
- 入站连接转发到已连接的客户端
- 数据流:外部 → 服务器 → 客户端 → 目标
- **模式2**:强制正向模式
- 服务器连接到远程目标地址
- 客户端连接转发到远程目标
- 数据流:客户端 → 服务器 → 外部目标
示例:
```bash
# 强制服务器以反向模式运行
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?mode=1"
# 强制服务器以正向模式运行
nodepass "server://0.0.0.0:10101/remote.example.com:8080?mode=2"
```
## 连接池容量参数
连接池容量参数仅适用于双端握手模式,通过不同方式进行配置:
- `min`: 最小连接池容量(默认: 64- 由客户端通过URL查询参数设置
- `max`: 最大连接池容量(默认: 1024- 由服务端确定,在握手过程中下发给客户端
**重要说明**
- 客户端设置的`max`参数会被服务端在握手时传递的值覆盖
- `min`参数由客户端完全控制,服务端不会修改此值
- 在客户端单端转发模式下,不使用连接池,这些参数被忽略
示例:
```bash
# 客户端设置最小连接池为32最大连接池将由服务端决定
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=32"
```
## 数据读取超时
数据读取超时可以通过URL查询参数`read`设置,单位为秒或分钟:
- `read`: 数据读取超时时间(默认: 1小时
- 值格式:整数后跟可选单位(`s`表示秒,`m`表示分钟)
- 示例:`30s`30秒`5m`5分钟`1h`1小时
- 适用于客户端和服务端模式
- 如果在超时时间内未接收到数据,连接将被关闭
示例:
```bash
# 设置数据读取超时为5分钟
nodepass "client://server.example.com:10101/127.0.0.1:8080?read=5m"
# 设置数据读取超时为30秒适用于快速响应应用
nodepass "client://server.example.com:10101/127.0.0.1:8080?read=30s"
# 设置数据读取超时为30分钟适用于长时间传输
nodepass "client://server.example.com:10101/127.0.0.1:8080?read=30m"
```
## 速率限制
NodePass支持通过`rate`参数进行带宽速率限制,用于流量控制。此功能有助于防止网络拥塞,确保多个连接间的公平资源分配。
- `rate`: 最大带宽限制单位为Mbps兆比特每秒
- 值为0或省略无速率限制无限带宽
- 正整数以Mbps为单位的速率限制例如10表示10 Mbps
- 同时应用于上传和下载流量
- 使用令牌桶算法进行平滑流量整形
示例:
```bash
# 限制带宽为50 Mbps
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?rate=50"
# 客户端100 Mbps速率限制
nodepass "client://server.example.com:10101/127.0.0.1:8080?rate=100"
# 与其他参数组合使用
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?log=error&tls=1&rate=50"
```
**速率限制使用场景:**
- **带宽控制**防止NodePass消耗所有可用带宽
- **公平共享**:确保多个应用程序可以共享网络资源
- **成本管理**:在按流量计费的网络环境中控制数据使用
- **QoS合规**:满足带宽使用的服务级别协议
- **测试**:模拟低带宽环境进行应用程序测试
## PROXY协议支持
NodePass支持PROXY协议v1用于在通过负载均衡器、反向代理或其他中介服务转发流量时保留客户端连接信息。
- `proxy`PROXY协议支持默认0
- 值0禁用 - 不发送PROXY协议头部
- 值1启用 - 在数据传输前发送PROXY协议v1头部
- 支持TCP4和TCP6连接
- 兼容HAProxy、Nginx和其他支持PROXY协议的服务
PROXY协议头部包含原始客户端IP、服务器IP和端口信息即使流量通过NodePass隧道也允许下游服务识别真实的客户端连接详情。
示例:
```bash
# 为服务端模式启用PROXY协议v1
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?proxy=1"
# 为客户端模式启用PROXY协议v1
nodepass "client://server.example.com:10101/127.0.0.1:8080?proxy=1"
# 与其他参数组合使用
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?log=info&tls=1&proxy=1&rate=100"
```
**PROXY协议使用场景**
- **负载均衡器集成**通过负载均衡器转发时保留客户端IP信息
- **反向代理支持**:使后端服务能够看到原始客户端连接
- **日志和分析**:维护准确的客户端连接日志用于安全和分析
- **访问控制**允许下游服务应用基于IP的访问控制
- **合规性**:满足连接日志记录和审计的监管要求
**重要说明:**
- 目标服务必须支持PROXY协议v1才能正确处理头部
- PROXY头部仅对TCP连接发送不支持UDP
- 头部格式遵循HAProxy PROXY协议v1规范
- 如果目标服务不支持PROXY协议将导致连接失败
## URL查询参数配置及作用范围
NodePass支持通过URL查询参数进行灵活配置不同参数在 server、client、master 模式下的适用性如下表:
@@ -71,7 +221,12 @@ NodePass支持通过URL查询参数进行灵活配置不同参数在 server
| `crt` | 自定义证书路径 | O | X | O |
| `key` | 自定义密钥路径 | O | X | O |
| `min` | 最小连接池容量 | X | O | X |
| `max` | 最大连接池容量 | O | O | X |
| `max` | 最大连接池容量 | O | X | X |
| `mode` | 运行模式控制 | O | O | X |
| `read` | 读取超时时间 | O | O | X |
| `rate` | 带宽速率限制 | O | O | X |
| `slot` | 最大连接数限制 | O | O | X |
| `proxy` | PROXY协议支持 | O | O | X |
- O参数有效推荐根据实际场景配置
@@ -79,7 +234,9 @@ NodePass支持通过URL查询参数进行灵活配置不同参数在 server
**最佳实践:**
- server/master 模式建议配置安全相关参数(如 tls、crt、key提升数据通道安全性。
- client 模式建议根据流量和资源情况调整连接池容量min/max优化性能。
- client/server 双端握手模式建议根据流量和资源情况调整连接池容量min/max优化性能。
- 当自动检测不符合部署需求时或需要跨环境一致行为时使用运行模式控制mode
- 配置速率限制rate以控制带宽使用防止共享环境中的网络拥塞。
- 日志级别log可在所有模式下灵活调整便于运维和排查。
## 环境变量
@@ -88,22 +245,22 @@ NodePass支持通过URL查询参数进行灵活配置不同参数在 server
| 变量 | 描述 | 默认值 | 示例 |
|----------|-------------|---------|---------|
| `NP_SEMAPHORE_LIMIT` | 最大并发连接数 | 1024 | `export NP_SEMAPHORE_LIMIT=2048` |
| `NP_UDP_DATA_BUF_SIZE` | UDP数据包缓冲区大小 | 8192 | `export NP_UDP_DATA_BUF_SIZE=16384` |
| `NP_UDP_READ_TIMEOUT` | UDP读取操作超时 | 20s | `export NP_UDP_READ_TIMEOUT=30s` |
| `NP_UDP_DIAL_TIMEOUT` | UDP连接建立超时 | 20s | `export NP_UDP_DIAL_TIMEOUT=30s` |
| `NP_TCP_READ_TIMEOUT` | TCP读取操作超时 | 20s | `export NP_TCP_READ_TIMEOUT=30s` |
| `NP_TCP_DIAL_TIMEOUT` | TCP连接建立超时 | 20s | `export NP_TCP_DIAL_TIMEOUT=30s` |
| `NP_MIN_POOL_INTERVAL` | 连接创建之间的最小间隔 | 1s | `export NP_MIN_POOL_INTERVAL=500ms` |
| `NP_MAX_POOL_INTERVAL` | 连接创建之间的最大间隔 | 5s | `export NP_MAX_POOL_INTERVAL=3s` |
| `NP_SEMAPHORE_LIMIT` | 信号缓冲区大小 | 65536 | `export NP_SEMAPHORE_LIMIT=2048` |
| `NP_UDP_DATA_BUF_SIZE` | UDP数据包缓冲区大小 | 2048 | `export NP_UDP_DATA_BUF_SIZE=16384` |
| `NP_HANDSHAKE_TIMEOUT` | 握手操作超时 | 10s | `export NP_HANDSHAKE_TIMEOUT=30s` |
| `NP_TCP_DIAL_TIMEOUT` | TCP连接建立超时 | 30s | `export NP_TCP_DIAL_TIMEOUT=60s` |
| `NP_UDP_DIAL_TIMEOUT` | UDP连接建立超时 | 10s | `export NP_UDP_DIAL_TIMEOUT=30s` |
| `NP_POOL_GET_TIMEOUT` | 从连接池获取连接的超时时间 | 30s | `export NP_POOL_GET_TIMEOUT=60s` |
| `NP_MIN_POOL_INTERVAL` | 连接创建之间的最小间隔 | 100ms | `export NP_MIN_POOL_INTERVAL=200ms` |
| `NP_MAX_POOL_INTERVAL` | 连接创建之间的最大间隔 | 1s | `export NP_MAX_POOL_INTERVAL=3s` |
| `NP_REPORT_INTERVAL` | 健康检查报告间隔 | 5s | `export NP_REPORT_INTERVAL=10s` |
| `NP_SERVICE_COOLDOWN` | 重启尝试前的冷却期 | 3s | `export NP_SERVICE_COOLDOWN=5s` |
| `NP_SHUTDOWN_TIMEOUT` | 优雅关闭超时 | 5s | `export NP_SHUTDOWN_TIMEOUT=10s` |
| `NP_RELOAD_INTERVAL` | 证书/连接池重载间隔 | 1h | `export NP_RELOAD_INTERVAL=30m` |
| `NP_RELOAD_INTERVAL` | 证书重载/状态备份间隔 | 1h | `export NP_RELOAD_INTERVAL=30m` |
### 连接池调优
连接池参数是性能调优中的重要设置:
连接池参数是双端握手模式下性能调优中的重要设置,在客户端单端转发模式下不适用
#### 池容量设置
@@ -121,18 +278,18 @@ NodePass支持通过URL查询参数进行灵活配置不同参数在 server
- `NP_MIN_POOL_INTERVAL`:控制连接创建尝试之间的最小时间
- 太低:可能以连接尝试压垮网络
- 推荐范围:根据网络延迟500ms-2s
- 推荐范围:根据网络延迟和预期负载100ms-500ms
- `NP_MAX_POOL_INTERVAL`:控制连接创建尝试之间的最大时间
- 太高:流量高峰期可能导致池耗尽
- 推荐范围:根据预期流量模式,3s-10s
- 推荐范围:根据预期流量模式,1s-5s
#### 连接管理
- `NP_SEMAPHORE_LIMIT`:控制最大并发隧道操作数
-低:流量高峰期拒绝连接
-太多并发goroutine可能导致内存压力
- 推荐范围:大多数应用1000-5000,高吞吐量场景更高
- `NP_SEMAPHORE_LIMIT`:控制信号缓冲区大小
-小:容易导致信号丢失
-大:内存使用增加
- 推荐范围1000-5000
### UDP设置
@@ -143,27 +300,27 @@ NodePass支持通过URL查询参数进行灵活配置不同参数在 server
- 默认值(8192)适用于大多数情况
- 考虑为媒体流或游戏服务器增加到16384或更高
- `NP_UDP_READ_TIMEOUT`UDP读取操作超时
- 对于高延迟网络或响应时间慢的应用增加此值
- 对于需要快速故障转移的低延迟应用减少此值
- `NP_UDP_DIAL_TIMEOUT`UDP拨号超时
- 对于高延迟网络增加此值
- 对于需要快速连接的应用减少此值
- `NP_UDP_DIAL_TIMEOUT`UDP连接建立超时
- 默认值(10s)为大多数应用提供良好平衡
- 对于高延迟网络或响应缓慢的应用增加此值
- 对于需要快速故障切换的低延迟应用减少此值
### TCP设置
对于TCP连接的优化
- `NP_TCP_READ_TIMEOUT`TCP读取操作超时
- 对于高延迟网络或响应慢的服务器增加此值
- 对于需要快速检测断开连接的应用降低此值
- 影响数据传输过程中的等待时间
- `NP_TCP_DIAL_TIMEOUT`TCP连接建立超时
- 默认值(30s)适用于大多数网络条件
- 对于网络条件不稳定的环境增加此值
- 对于需要快速判断连接成功与否的应用减少此值
- 影响初始连接建立阶段
### 连接池管理设置
- `NP_POOL_GET_TIMEOUT`:从连接池获取连接时的最大等待时间
- 默认值(30s)为连接建立提供充足时间
- 对于高延迟环境或使用大型连接池时增加此值
- 对于需要快速故障检测的应用减少此值
- 在客户端单端转发模式下不使用连接池,此参数被忽略
### 服务管理设置
@@ -193,15 +350,20 @@ NodePass支持通过URL查询参数进行灵活配置不同参数在 server
URL参数
```bash
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=128&max=8192"
# 高吞吐量服务器1 Gbps速率限制
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?max=8192&rate=1000"
# 高吞吐量客户端500 Mbps速率限制
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=128&rate=500"
```
环境变量:
```bash
export NP_MIN_POOL_INTERVAL=500ms
export NP_MAX_POOL_INTERVAL=3s
export NP_MIN_POOL_INTERVAL=50ms
export NP_MAX_POOL_INTERVAL=500ms
export NP_SEMAPHORE_LIMIT=8192
export NP_UDP_DATA_BUF_SIZE=32768
export NP_POOL_GET_TIMEOUT=60s
export NP_REPORT_INTERVAL=10s
```
@@ -211,15 +373,21 @@ export NP_REPORT_INTERVAL=10s
URL参数
```bash
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=256&max=4096"
# 低延迟服务器,适度速率限制
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?max=4096&rate=200"
# 低延迟客户端,适度速率限制
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=256&rate=200"
```
环境变量:
```bash
export NP_MIN_POOL_INTERVAL=100ms
export NP_MAX_POOL_INTERVAL=1s
export NP_MIN_POOL_INTERVAL=50ms
export NP_MAX_POOL_INTERVAL=500ms
export NP_SEMAPHORE_LIMIT=4096
export NP_UDP_READ_TIMEOUT=5s
export NP_TCP_DIAL_TIMEOUT=5s
export NP_UDP_DIAL_TIMEOUT=5s
export NP_POOL_GET_TIMEOUT=15s
export NP_REPORT_INTERVAL=1s
```
@@ -229,14 +397,21 @@ export NP_REPORT_INTERVAL=1s
URL参数
```bash
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=16&max=512"
# 资源受限服务器,保守速率限制
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?max=512&rate=50"
# 资源受限客户端,保守速率限制
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=16&rate=50"
```
环境变量:
```bash
export NP_MIN_POOL_INTERVAL=2s
export NP_MAX_POOL_INTERVAL=10s
export NP_MIN_POOL_INTERVAL=200ms
export NP_MAX_POOL_INTERVAL=2s
export NP_SEMAPHORE_LIMIT=512
export NP_TCP_DIAL_TIMEOUT=20s
export NP_UDP_DIAL_TIMEOUT=20s
export NP_POOL_GET_TIMEOUT=45s
export NP_REPORT_INTERVAL=30s
export NP_SHUTDOWN_TIMEOUT=3s
```

View File

@@ -76,9 +76,30 @@ nodepass client://server.example.com:10101/127.0.0.1:8080?log=debug
- 数据传输详情
- 错误情况
### 示例6运行模式控制
通过明确的模式设置控制操作行为:
```bash
# 强制服务器以反向模式运行(服务器接收流量)
nodepass "server://0.0.0.0:10101/0.0.0.0:8080?mode=1&tls=1"
# 强制客户端以单端转发模式运行(高性能本地代理)
nodepass "client://127.0.0.1:1080/remote.example.com:8080?mode=1"
# 强制客户端以双端握手模式运行(需要服务器协调)
nodepass "client://server.example.com:10101/127.0.0.1:8080?mode=2&log=debug"
```
这些配置:
- **服务器 mode=1**:强制反向模式,服务器本地绑定目标地址
- **客户端 mode=1**:强制单端转发模式,使用直接连接实现高性能
- **客户端 mode=2**:强制双端握手模式,适用于需要服务器协调的场景
- 当自动检测不符合部署需求时使用模式控制
## 通过防火墙访问数据库
### 示例6:数据库隧道
### 示例7:数据库隧道
启用对防火墙后的数据库服务器的安全访问:
@@ -98,7 +119,7 @@ nodepass client://server.example.com:10101/127.0.0.1:5432
## 安全的微服务通信
### 示例7:服务间通信
### 示例8:服务间通信
启用微服务之间的安全通信:
@@ -116,9 +137,64 @@ nodepass client://service-a:10101/127.0.0.1:8082
- 将日志限制为仅警告和错误
- 使服务A的API在服务B上显示为本地服务
## 带宽速率限制
### 示例9带速率限制的文件传输服务器
控制文件传输服务的带宽使用:
```bash
# 服务端限制文件传输带宽为100 Mbps
nodepass "server://0.0.0.0:10101/127.0.0.1:8080?log=info&tls=1&rate=100"
# 客户端连接时限制为50 Mbps
nodepass "client://fileserver.example.com:10101/127.0.0.1:3000?log=info&rate=50"
```
此配置:
- 限制服务器带宽为100 Mbps以防止网络拥塞
- 客户端进一步限制下载速度为50 Mbps以实现公平共享
- 允许文件传输的同时为其他服务保留带宽
- 使用TLS加密确保文件传输安全
### 示例10物联网传感器数据收集的保守限制
对于带宽有限或按流量计费的物联网设备:
```bash
# 服务器接受物联网数据限制为5 Mbps
nodepass "server://0.0.0.0:10101/127.0.0.1:1883?log=warn&rate=5"
# 物联网设备客户端发送传感器数据限制为2 Mbps
nodepass "client://iot-gateway.example.com:10101/127.0.0.1:1883?log=error&rate=2"
```
此设置:
- 限制服务器为5 Mbps用于从多个物联网设备收集传感器数据
- 单个物联网客户端限制为2 Mbps以防止单一设备消耗所有带宽
- 最小日志记录warn/error以减少物联网设备的资源使用
- 高效适用于MQTT或其他物联网协议
### 示例11开发环境速率控制
在带宽约束下测试应用程序:
```bash
# 模拟慢速网络条件进行测试
nodepass "client://api.example.com:443/127.0.0.1:8080?log=debug&rate=1"
# 带监控的高速开发服务器
nodepass "server://0.0.0.0:10101/127.0.0.1:3000?log=debug&rate=500"
```
此配置:
- 客户端模拟1 Mbps连接用于测试慢速网络场景
- 开发服务器限制为500 Mbps并提供详细日志记录用于调试
- 帮助识别不同带宽约束下的性能问题
## 物联网设备管理
### 示例8:物联网网关
### 示例12:物联网网关
创建物联网设备的中央访问点:
@@ -138,7 +214,7 @@ nodepass client://mgmt.example.com:10101/127.0.0.1:80
## 多环境开发
### 示例9:开发环境访问
### 示例13:开发环境访问
通过隧道访问不同的开发环境:
@@ -159,9 +235,79 @@ nodepass "server://tunnel.example.com:10101/127.0.0.1:3001?log=warn&tls=1"
- 使开发人员能够访问环境而无需直接网络暴露
- 将远程服务映射到不同的本地端口,便于识别
## PROXY协议集成
### 示例14负载均衡器与PROXY协议集成
启用PROXY协议支持与负载均衡器和反向代理集成
```bash
# 服务端为HAProxy/Nginx集成启用PROXY协议v1
nodepass "server://0.0.0.0:10101/127.0.0.1:8080?log=info&tls=1&proxy=1"
# 客户端启用PROXY协议以保留客户端连接信息
nodepass "client://tunnel.example.com:10101/127.0.0.1:3000?log=info&proxy=1"
```
此配置:
- 在数据传输开始前发送PROXY协议v1头部
- 通过隧道保留原始客户端IP和端口信息
- 使后端服务能够看到真实的客户端连接详情
- 兼容HAProxy、Nginx和其他支持PROXY协议的服务
- 有助于维护准确的访问日志和基于IP的访问控制
### 示例15Web应用的反向代理支持
使NodePass后的Web应用能够接收原始客户端信息
```bash
# 为Web应用启用PROXY协议的NodePass服务器
nodepass "server://0.0.0.0:10101/127.0.0.1:8080?log=warn&tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem&proxy=1"
# 后端Web服务器如Nginx配置以处理PROXY协议
# 在nginx.conf中
# server {
# listen 8080 proxy_protocol;
# real_ip_header proxy_protocol;
# set_real_ip_from 127.0.0.1;
# ...
# }
```
此设置:
- Web应用接收原始客户端IP地址而不是NodePass隧道IP
- 启用正确的访问日志记录、分析和安全控制
- 支持连接审计的合规性要求
- 适用于支持PROXY协议的Web服务器Nginx、HAProxy等
### 示例16数据库访问与客户端IP保留
为数据库访问日志记录和安全维护客户端IP信息
```bash
# 启用PROXY协议的数据库代理服务器
nodepass "server://0.0.0.0:10101/127.0.0.1:5432?log=error&proxy=1"
# 通过隧道连接的应用客户端
nodepass "client://dbproxy.example.com:10101/127.0.0.1:5432?proxy=1"
```
优势:
- 数据库日志显示原始应用服务器IP而不是隧道IP
- 启用基于IP的数据库访问控制正常工作
- 维护安全和合规的审计轨迹
- 兼容支持PROXY协议的数据库适当配置的PostgreSQL
**PROXY协议重要说明**
- 目标服务必须支持PROXY协议v1才能正确处理头部
- PROXY头部仅对TCP连接发送不支持UDP流量
- 头部包含协议TCP4/TCP6、源IP、目标IP、源端口、目标端口
- 如果目标服务不支持PROXY协议连接可能失败或行为异常
- 在生产环境部署前请在非生产环境中充分测试启用PROXY协议的配置
## 容器部署
### 示例10容器化NodePass
### 示例17容器化NodePass
在Docker环境中部署NodePass
@@ -196,7 +342,7 @@ docker run -d --name nodepass-client \
## 主控API管理
### 示例11:集中管理
### 示例18:集中管理
为多个NodePass实例设置中央控制器
@@ -233,7 +379,7 @@ curl -X PUT http://localhost:9090/api/v1/instances/{id} \
- 提供用于自动化和集成的RESTful API
- 包含内置的Swagger UI位于http://localhost:9090/api/v1/docs
### 示例12自定义API前缀
### 示例19自定义API前缀
为主控模式使用自定义API前缀
@@ -252,6 +398,44 @@ curl -X POST http://localhost:9090/admin/v1/instances \
- 用于安全或组织目的的自定义URL路径
- 在http://localhost:9090/admin/v1/docs访问Swagger UI
### 示例20实时连接和流量监控
通过主控API监控实例的连接数和流量统计
```bash
# 获取实例详细信息,包括连接数统计
curl -H "X-API-Key: your-api-key" http://localhost:9090/api/v1/instances/{id}
# 响应示例包含TCPS和UDPS字段
{
"id": "a1b2c3d4",
"alias": "网站代理",
"type": "server",
"status": "running",
"url": "server://0.0.0.0:10101/127.0.0.1:8080",
"restart": true,
"pool": 64,
"ping": 25,
"tcps": 12,
"udps": 5,
"tcprx": 1048576,
"tcptx": 2097152,
"udprx": 512000,
"udptx": 256000
}
# 使用SSE实时监控所有实例状态变化
curl -H "X-API-Key: your-api-key" \
-H "Accept: text/event-stream" \
http://localhost:9090/api/v1/events
```
此监控设置提供:
- **实时连接数跟踪**TCPS和UDPS字段显示当前活动连接数
- **性能分析**:通过连接数和流量数据评估系统负载
- **容量规划**:基于历史连接数据进行资源规划
- **故障诊断**:异常的连接数变化可能指示网络问题
## 下一步
现在您已经了解了各种使用示例,您可能想要:

View File

@@ -27,6 +27,10 @@ NodePass 创建了一个具有独立控制和数据通道的网络架构:
4. **客户端模式操作**
- 连接到服务端的控制通道
- **握手阶段**:服务端验证隧道密钥后,向客户端下发配置信息:
- 数据流向模式(决定客户端接收或发送流量)
- 最大连接池容量(由服务端统一管理和分配)
- TLS安全级别确保客户端使用正确的加密模式
- 监听指示传入连接的信号
- 使用服务端指定的 TLS 安全级别创建数据连接
- 在安全通道和本地目标之间转发数据
@@ -35,12 +39,12 @@ NodePass 创建了一个具有独立控制和数据通道的网络架构:
5. **客户端单端转发模式**
- 当隧道地址为本地地址时如127.0.0.1)自动启用
- 客户端直接在本地监听端口,无需服务端的控制通道协调
- 对于TCP连接使用连接池技术,显著提高转发性能
- 对于TCP和UDP协议使用直接连接建立方式
- 适用于纯本地转发场景,减少网络开销和延迟
- 支持TCP和UDP协议的高性能单端转发
- 支持TCP和UDP协议的高性能单端转发,采用优化的连接处理
5. **协议支持**
- **TCP**:具有持久连接的全双工流式传输,在客户端单端转发模式下支持连接池优化
- **TCP**:具有持久连接的全双工流式传输,在客户端单端转发模式下优化了直接连接建立
- **UDP**:具有可配置缓冲区大小和超时的数据报转发
## 数据传输流
@@ -48,9 +52,9 @@ NodePass 创建了一个具有独立控制和数据通道的网络架构:
NodePass 通过其隧道架构建立双向数据流,支持 TCP 和 UDP 协议。系统支持三种数据流模式:
### 数据流模式说明
- **服务端接收模式dataFlow: "-"**:服务端在目标地址监听,客户端在本地监听,数据从目标地址流向客户端本地
- **服务端发送模式dataFlow: "+"**:服务端连接到远程目标地址,客户端在本地监听,数据从客户端本地流向远程目标
- **客户端单端转发模式**:客户端在本地直接监听并转发到目标地址,无需服务端协调,使用连接池技术实现高性能转发
- **服务端接收模式**:服务端在目标地址监听,客户端在本地监听,数据从目标地址流向客户端本地
- **服务端发送模式**:服务端连接到远程目标地址,客户端在本地监听,数据从客户端本地流向远程目标
- **客户端单端转发模式**:客户端在本地直接监听并转发到目标地址,无需服务端协调,使用直接连接建立实现优化转发
数据流模式根据隧道地址和目标地址自动确定:
- 如果隧道地址是本地地址localhost、127.0.0.1等),启用客户端单端转发模式
@@ -128,34 +132,34 @@ NodePass 通过其隧道架构建立双向数据流,支持 TCP 和 UDP 协议
- 直接在指定的隧道端口启动TCP或UDP监听器
- 无需连接到远程服务端,实现零延迟启动
3. **连接池初始化**仅TCP
3. **直接连接建立**
```
[客户端] → [初始化目标连接] → [建立连接到目标地址]
[客户端] → [创建到目标地址的直接连接] → [建立目标连接]
```
- TCP转发创建高性能连接
- 预先建立多个到目标地址的连接,显著减少连接建立延迟
- 连接池大小可根据并发需求动态调整
- 对于TCP为每个隧道连接直接建立到目标地址的TCP连接
- 对于UDP创建用于与目标地址交换数据报的UDP套接字
- 消除连接池开销,提供更简单、更直接的转发路径
4. **高性能转发**
4. **优化转发**
```
[本地连接] → [从连接池获取目标连接] → [直接数据交换] → [连接复用或释放]
[本地连接] → [直接目标连接] → [数据交换] → [连接清理]
```
- 对于TCP从连接池中快速获取预建立的目标连接,进行高效数据交换
- 对于UDP直接转发数据报到目标地址无需连接池
- 化的数据路径,最小化转发开销和延迟
- 对于TCP直接连接建立后进行高效数据交换
- 对于UDP直接转发数据报到目标地址延迟最小
- 化的数据路径,确保可靠高效的转发
### 特定协议特性
- **TCP 交换**
- 用于全双工通信的持久连接
- 连接终止前的持续数据流传输
- 具有自动重连的错误处理
- **客户端单端转发优化**通过连接池技术预建立连接,显著减少连接建立延迟
- **客户端单端转发优化**为每个隧道连接直接建立连接,确保可靠高效的转发
- **UDP 交换**
- 具有可配置缓冲区大小的一次性数据报转发 (`UDP_DATA_BUF_SIZE`)
- 响应等待的读取超时控制 (`UDP_READ_TIMEOUT`)
- 响应等待的读取超时控制 (`read`参数或默认10m)
- 针对低延迟、无状态通信进行了优化
- **客户端单端转发优化**:直接转发机制,无需连接池,实现最低延迟
- **客户端单端转发优化**:直接转发机制,实现最低延迟
## 信号通信机制
@@ -206,105 +210,308 @@ NodePass 通过 TCP 隧道使用复杂的基于 URL 的信号协议:
## 连接池架构
NodePass 实现了一个高效的连接池系统来管理网络连接:
NodePass 实现了一个高效的连接池系统来管理网络连接,这是其性能优势的核心设计
### 设计哲学
连接池的设计遵循"预热优于冷启动"的原则,通过预先建立连接消除网络延迟。这种设计理念借鉴了现代高性能服务器的最佳实践,将连接建立的成本分摊到系统启动阶段,而非在关键路径上承担这一开销。
### 池设计
1. **池类型**
- **客户端池**:预先建立到远程端点的连接
- **服务器池**:管理来自客户端的传入连接
- **客户端池**:预先建立到远程端点的连接,采用主动式连接管理
- **服务器池**:管理来自客户端的传入连接,采用被动式连接接收
2. **池组件**
- **连接存储**:线程安全的连接 ID 到 net.Conn 对象的映射
- **ID 通道**:用于可用连接 ID 的缓冲通道
- **容量管理**:基于使用模式的动态调整
- **间隔控制**:连接创建之间的基于时间的限流
- **连接工厂**:可定制的连接创建函数
- **连接存储**:线程安全的连接 ID 到 net.Conn 对象的映射,支持高并发访问
- **ID 通道**:用于可用连接 ID 的缓冲通道,实现无锁的快速分配
- **容量管理**:基于使用模式的动态调整,实现智能扩缩容
- 最小容量由客户端设置,确保客户端具备基础连接保障
- 最大容量由服务端在握手时统一下发,实现全局资源协调
- **间隔控制**:连接创建之间的基于时间的限流,防止网络资源过载
- **连接工厂**可定制的连接创建函数支持不同的TLS模式和网络配置
### 先进性设计
1. **零延迟连接**
- 预建立的连接池消除了TCP三次握手的延迟
- TLS握手在连接池初始化时完成避免运行时加密协商开销
- 连接预热策略确保池中始终有可用的热连接
2. **智能负载感知**
- 基于实时连接使用率的动态池管理
- 预测性连接创建,根据历史使用模式提前准备连接
- 自适应超时和重试机制,应对网络波动
### 连接生命周期
1. **连接创建**
- 连接创建数量不超过配置的容量
- 每个连接都分配一个唯一 ID
- ID 和连接存储在池中
- 连接创建数量不超过配置的容量,确保资源可控性
- 每个连接都分配一个唯一 ID,支持精确的连接跟踪和管理
- ID 和连接存储在池中,采用写时复制和延迟删除策略
2. **连接获取**
- 客户端使用连接 ID 检索连接
- 服务端从池中检索下一个可用连接
- 在返回前验证连接
- 客户端使用连接 ID 检索连接,支持精确匹配和快速查找
- 服务端从池中检索下一个可用连接,采用轮询或最少使用策略
- 在返回前验证连接有效性包括网络状态和TLS会话检查
3. **连接使用**
- 获取时从池中移除连接
- 用于端点之间的数据交换
- 无连接重用(一次性使用模型)
- 获取时从池中移除连接,避免重复使用冲突
- 用于端点之间的数据交换,采用高效的零拷贝传输
- 采用一次性使用模型,确保连接状态的干净性
4. **连接终止**
- 使用后关闭连接
- 正确释放资源
- 错误处理确保干净终止
- 使用后立即关闭连接,避免资源泄漏
- 正确释放系统资源,包括文件描述符和内存缓冲区
- 错误处理确保异常情况下的干净终止
### 会话管理与状态维护
1. **有状态的UDP处理**
- 将无状态的UDP协议转换为有状态的会话处理
- 智能的会话超时管理,平衡资源使用和响应性
- 会话复用机制,减少连接建立开销
2. **TCP连接复用**
- 长连接保持技术,减少连接建立/关闭的开销
- 智能的连接复用策略,最大化连接利用率
- 连接健康检查,确保复用连接的可靠性
3. **跨协议统一管理**
- 统一的连接生命周期管理,简化系统复杂性
- 协议无关的监控和统计,提供一致的观测体验
- 灵活的协议转换能力,支持异构网络环境
## 信号通信与协调机制
NodePass 的信号系统体现了分布式系统设计的精髓:
### 信号设计原理
1. **事件驱动架构**
- 基于事件的异步通信模式,避免阻塞等待
- 发布-订阅模式的信号分发,支持多订阅者
- 信号的优先级管理,确保关键事件的及时处理
2. **可靠性保障**
- 信号的持久化机制,防止关键信号丢失
- 重试和确认机制,确保信号的可靠传递
- 信号的幂等性设计,避免重复执行的副作用
3. **性能优化**
- 批量信号处理,减少系统调用开销
- 信号压缩和合并,优化网络带宽使用
- 异步信号处理,避免阻塞主处理流程
### 分布式协调
1. **一致性保证**
- 分布式锁机制,确保关键操作的原子性
- 状态同步协议,保持多节点间的数据一致性
- 冲突解决策略,处理并发操作的竞争条件
2. **故障处理**
- 节点故障检测,及时发现和隔离故障节点
- 自动故障转移,保证服务的连续性
- 状态恢复机制,支持故障后的快速恢复
### 池管理
1. **容量控制**
- `MIN_POOL_CAPACITY`:确保最小可用连接数
- `MAX_POOL_CAPACITY`:防止过度资源消耗
- 基于需求模式的动态缩放
- 最小容量保证:确保始终有足够的预热连接可用
- 最大容量限制:防止过度资源消耗,保护系统稳定性
- 基于需求模式的动态缩放,响应流量变化
2. **间隔控制**
- `MIN_POOL_INTERVAL`:连接创建尝试之间的最小时间
- `MAX_POOL_INTERVAL`:连接创建尝试之间的最大时间
- 最小间隔限制:防止连接创建风暴,保护网络资源
- 最大间隔限制:确保及时响应连接需求
- 自适应基于时间的限流以优化资源使用
3. **动态池适应**
连接池采用双重自适应机制以确保最佳性能:
**A. 容量调整**
- 池容量根据实时使用模式动态调整
- 如果连接创建成功率低(<20%容量减少以最小化资源浪费
- 如果连接创建成功率高(>80%),容量增加以适应更高的流量
- 渐进缩放防止振荡并提供稳定性
- 遵守配置的最小和最大容量边界
- 池容量根据实时使用模式动态调整,实现智能扩缩容
- 基于连接创建成功率的反馈调节:低成功率时收缩容量减少资源浪费
- 高成功率时扩展容量以满足增长需求
- 渐进缩放防止系统震荡,提供平滑的性能过渡
- 严格遵守配置的容量边界,确保系统可控性
**B. 间隔调整**
- 创建间隔根据池空闲连接数调整
- 空闲连接较少(容量的<20%)时,间隔向最小间隔减少
- 空闲连接较多(容量的>80%)时,间隔向最大间隔增加
- 防止在低需求期间压垮网络资源
- 在池耗尽的高需求期间加速连接创建
- 创建间隔根据池空闲连接数实时调整
- 空闲率时加速连接创建,确保供应充足
- 空闲率时放缓创建节奏,避免资源浪费
- 防止在低需求期间网络资源造成压力
- 在池耗尽的高需求期间加速连接创建,保证服务质量
4. **性能优化策略**
- **预测性扩容**:基于历史使用模式预测未来需求
- **分层连接管理**:不同优先级的连接采用不同的管理策略
- **批量操作优化**:连接的批量创建和销毁,减少系统调用开销
- **连接亲和性**:基于地理位置或网络拓扑的智能连接分配
## 数据交换机制
NodePass 的数据交换机制体现了现代网络编程的最佳实践:
### 高性能数据传输
1. **零拷贝架构**
- 数据在内核空间直接传输,避免用户空间的多次拷贝
- 减少CPU开销和内存带宽占用
- 支持大文件和高吞吐量场景的优化传输
2. **异步I/O模型**
- 非阻塞的事件驱动架构,最大化并发处理能力
- 基于epoll/kqueue的高效事件循环
- 智能的读写缓冲区管理,平衡内存使用和性能
3. **流量统计与监控**
- 实时的字节级流量统计,支持精确的带宽控制
- 分协议的流量分析,便于性能调优
- 连接级别的性能指标,支持细粒度监控
- 实时跟踪TCP和UDP活动连接数便于容量规划和性能分析
### 协议优化
1. **TCP优化**
- 智能的TCP_NODELAY配置减少小包延迟
- Keep-alive机制确保长连接的可靠性
- 拥塞控制算法的自适应选择
2. **UDP优化**
- 会话式UDP处理支持有状态的数据报交换
- 智能超时管理,平衡响应性和资源使用
- 数据报去重和乱序处理
## 主控API架构
在主控模式下NodePass提供RESTful API进行集中管理
在主控模式下NodePass提供RESTful API进行集中管理,体现了云原生架构的设计理念
### 架构设计哲学
主控模式采用"统一管理,分布式执行"的架构模式,将控制平面与数据平面分离。这种设计使得系统具备了企业级的可管理性和可观测性,同时保持了数据传输的高性能。
### API组件
1. **HTTP/HTTPS服务器**
- 在配置的地址和端口上监听
- 可选的TLS加密与隧道服务器使用相同模式
- 可配置的API前缀路径
- 在配置的地址和端口上监听,支持灵活的网络部署
- 可选的TLS加密与隧道服务器使用相同安全模式,确保管理通道的安全性
- 可配置的API前缀路径支持反向代理和API网关集成
2. **实例管理**
- NodePass实例的内存注册表
- 基于UID的实例标识
- 每个实例的状态跟踪(运行中、已停止等)
- 基于内存的高性能实例注册表,支持快速查询和更新
- 基于UID的实例标识,确保全局唯一性
- 每个实例的状态跟踪(运行中、已停止等),支持实时状态监控
3. **RESTful端点**
- 实例的标准CRUD操作
- 实例控制操作(启动、停止、重启)
- 健康状态报告
- API文档的OpenAPI规范
- 标准CRUD操作遵循REST设计原则
- 实例控制操作(启动、停止、重启),支持远程生命周期管理
- 健康状态报告,提供实时的系统健康信息
- OpenAPI规范支持便于API文档生成和客户端开发
### 实例生命周期管理
1. **实例创建**
- 基于URL的配置类似于命令行
- 基于实例类型的动态初始化
- 实例创建前的参数验证
- 基于URL的配置类似于命令行,降低学习成本
- 基于实例类型的动态初始化,支持多种部署模式
- 实例创建前的参数验证,确保配置正确性
2. **实例控制**
- 启动/停止/重启能力
- 可配置超时的优雅关闭
- 终止时的资源清理
- 启动/停止/重启能力,支持远程运维操作
- 可配置超时的优雅关闭,确保数据完整性
- 终止时的资源清理,防止资源泄漏
3. **API安全**
- API连接的TLS加密选项
- 与隧道服务端相同的安全模式
- HTTPS的证书管理
- API连接的TLS加密选项,保护管理通信安全
- 与隧道服务端相同的安全模式,统一安全策略
- 证书管理支持简化HTTPS部署
## 系统架构的先进性
### 分层解耦设计
NodePass 采用了现代软件架构的分层设计原则:
1. **传输层分离**
- 控制通道与数据通道的彻底分离,避免控制信息干扰数据传输
- 不同协议的独立优化TCP和UDP各自采用最优策略
- 多路复用支持,单一隧道承载多个应用连接
2. **安全层可插拔**
- 模块化的TLS实现支持不同安全级别的灵活选择
- 证书管理的自动化,减少运维复杂性
- 密钥轮换机制,增强长期安全性
3. **管理层云原生**
- API优先的设计理念所有功能均可通过API访问
- 容器化友好的配置方式支持现代DevOps实践
- 无状态设计,便于水平扩展
### 性能优化理念
1. **延迟优化**
- 预连接池消除冷启动延迟
- 智能路由减少网络跳数
- 批量处理减少系统调用开销
2. **吞吐量优化**
- 零拷贝数据传输最大化带宽利用
- 并发连接管理支持高并发场景
- 自适应缓冲区大小优化内存使用
3. **资源优化**
- 智能连接复用减少资源消耗
- 动态容量调整适应负载变化
- 垃圾回收优化减少暂停时间
### 可靠性保障
1. **故障隔离**
- 连接级别的故障隔离,单点故障不影响整体服务
- 自动重连机制,透明处理网络波动
- 优雅降级策略,在资源不足时保证核心功能
2. **状态管理**
- 分布式状态同步,确保多实例间的一致性
- 持久化关键状态,支持故障恢复
- 版本化配置管理,支持回滚操作
## NodePass 架构创新总结
### 技术创新点
1. **连接池预热技术**
- 革命性地消除了网络隧道的冷启动延迟
- 将传统的"按需建连"转变为"预热待用"
- 显著提升了首次连接的响应速度
2. **分离式架构设计**
- 控制平面与数据平面的彻底分离
- 信令通道与数据通道的独立优化
- 实现了高性能与高可管理性的完美结合
3. **自适应资源管理**
- 基于实时负载的智能扩缩容
- 预测性的资源分配策略
- 故障自愈的弹性系统设计
### 行业领先优势
1. **性能优势**
- 零延迟连接建立,业界领先的响应速度
- 高并发处理能力,支持企业级应用场景
- 智能路由优化,最短路径数据传输
2. **可靠性优势**
- 多层次的故障隔离和恢复机制
- 分布式架构的高可用保障
- 优雅降级的服务质量保证
3. **安全性优势**
- 端到端的加密保护
- 多层次的安全防护体系
- 符合企业级安全标准
### 适用场景与价值
1. **企业级应用**
- 微服务架构的服务网格
- 混合云环境的网络连接
- 跨地域的服务访问
2. **开发运维**
- 本地开发环境的快速搭建
- 测试环境的灵活配置
- 生产环境的流量管理
3. **网络优化**
- 网络延迟的大幅降低
- 带宽利用率的显著提升
- 连接稳定性的可靠保障
NodePass 通过其创新的架构设计和技术实现,为现代网络应用提供了一个高性能、高可靠、高安全的隧道解决方案,代表了网络隧道技术的发展方向。
## 下一步

View File

@@ -67,10 +67,10 @@ docker run -d --name nodepass-server -p 10101:10101 -p 8080:8080 \
# 客户端模式运行
docker run -d --name nodepass-client \
-e MIN_POOL_CAPACITY=32 \
-e MAX_POOL_CAPACITY=512 \
-e NP_MIN_POOL_INTERVAL=200ms \
-e NP_SEMAPHORE_LIMIT=512 \
-p 8080:8080 \
ghcr.io/yosebyte/nodepass client://nodepass-server:10101/127.0.0.1:8080
ghcr.io/yosebyte/nodepass "client://nodepass-server:10101/127.0.0.1:8080?min=32&max=512"
```
### 方式5使用管理脚本(仅限Linux)

View File

@@ -69,12 +69,13 @@
- 在Linux/macOS上使用`ulimit -n`检查文件描述符限制
3. **超时配置**
- 如果使用具有慢响应时间的UDP调整`UDP_READ_TIMEOUT`
- 考虑在操作系统级别调整TCP keepalive设置以支持长寿命连接
- 如果使用具有慢响应时间的UDP调整`NP_UDP_DIAL_TIMEOUT`
- 增加URL中的`read`参数用于长时间传输默认10m
- 考虑为不稳定网络条件调整`NP_TCP_DIAL_TIMEOUT`
4. **服务器过载**
- 检查服务器日志中的连接过载迹象
- 调整`MAX_POOL_CAPACITY``SEMAPHORE_LIMIT`以处理负载
- 调整`max`参数和`NP_SEMAPHORE_LIMIT`以处理负载
- 考虑用多个NodePass实例水平扩展
## 证书问题
@@ -135,9 +136,9 @@
**可能的原因和解决方案**
1. **池配置**
- 增加`MIN_POOL_CAPACITY`以准备更多连接
- 增加`min`参数以准备更多连接
- 减少`MIN_POOL_INTERVAL`以更快创建连接
- 如果连接队列堆积,调整`SEMAPHORE_LIMIT`
- 如果连接队列堆积,调整`NP_SEMAPHORE_LIMIT`
2. **网络路径**
- 检查网络拥塞或高延迟链路
@@ -162,7 +163,7 @@
1. **池抖动**
- 如果池不断创建和销毁连接,调整时间
- 增加`MIN_POOL_INTERVAL`以减少连接创建频率
- 为`MIN_POOL_CAPACITY`和`MAX_POOL_CAPACITY`找到良好平衡
- 为`min`和`max`连接池参数找到良好平衡
2. **过度日志记录**
- 在生产环境中将日志级别从debug降低到info或warn
@@ -184,12 +185,12 @@
**可能的原因和解决方案**
1. **连接泄漏**
- 确保`SHUTDOWN_TIMEOUT`足够长以正确关闭连接
- 确保`NP_SHUTDOWN_TIMEOUT`足够长以正确关闭连接
- 检查自定义脚本或管理代码中的错误处理
- 使用系统工具如`netstat`监控连接数量
2. **池大小问题**
- 如果`MAX_POOL_CAPACITY`非常大,内存使用会更高
- 如果`max`参数非常大,内存使用会更高
- 监控实际池使用情况与配置容量
- 根据实际并发连接需求调整容量
@@ -210,7 +211,8 @@
- 默认8192字节对某些应用程序可能太小
2. **超时问题**
- 如果响应较慢,增加`UDP_READ_TIMEOUT`
- 如果响应较慢,增加`NP_UDP_DIAL_TIMEOUT`
- 调整`read`参数以获得更长的会话超时
- 对于响应时间变化的应用程序,找到最佳平衡点
3. **高数据包率**
@@ -277,6 +279,49 @@
- 确保NodePass主控具有创建进程的足够权限
- 检查任何引用的证书或密钥的文件系统权限
## 数据恢复
### 主控状态文件损坏
**症状**:主控模式启动失败,显示状态文件损坏错误,或实例数据丢失。
**可能的原因和解决方案**
1. **使用自动备份文件恢复**
- NodePass每小时自动创建备份文件 `nodepass.gob.backup`
- 停止NodePass主控服务
- 将备份文件复制为主文件:`cp nodepass.gob.backup nodepass.gob`
- 重新启动主控服务
2. **手动状态文件恢复**
```bash
# 停止NodePass服务
pkill nodepass
# 备份损坏的文件(可选)
mv nodepass.gob nodepass.gob.corrupted
# 使用备份文件
cp nodepass.gob.backup nodepass.gob
# 重新启动服务
nodepass "master://0.0.0.0:9090?log=info"
```
3. **备份文件也损坏时**
- 删除损坏的状态文件:`rm nodepass.gob*`
- 重新启动主控,将创建新的状态文件
- 需要重新配置所有实例和设置
4. **预防性备份建议**
- 定期备份 `nodepass.gob` 到外部存储
- 调整备份频率:设置环境变量 `export NP_RELOAD_INTERVAL=30m`
- 监控状态文件大小,异常增长可能表示问题
**最佳实践**
- 在生产环境中,建议将 `nodepass.gob` 定期备份到不同的存储位置
- 使用配置管理工具保存实例配置的文本形式备份
## 下一步
如果您遇到本指南未涵盖的问题:

View File

@@ -7,7 +7,7 @@ NodePass创建一个带有未加密TCP控制通道的隧道并为数据交换
NodePass命令的一般语法是
```bash
nodepass "<core>://<tunnel_addr>/<target_addr>?log=<level>&tls=<mode>&crt=<cert_file>&key=<key_file>&min=<min_pool>&max=<max_pool>"
nodepass "<core>://<tunnel_addr>/<target_addr>?log=<level>&tls=<mode>&crt=<cert_file>&key=<key_file>&min=<min_pool>&max=<max_pool>&mode=<run_mode>&read=<timeout>&rate=<mbps>&proxy=<mode>"
```
其中:
@@ -19,8 +19,12 @@ nodepass "<core>://<tunnel_addr>/<target_addr>?log=<level>&tls=<mode>&crt=<cert_
通用查询参数:
- `log=<level>`:日志详细级别(`none``debug``info``warn``error``event`
- `min=<min_pool>`最小连接池容量默认64仅适用于client模式
- `max=<max_pool>`最大连接池容量默认1024仅适用于client模式
- `min=<min_pool>`最小连接池容量默认64由客户端设置
- `max=<max_pool>`最大连接池容量默认1024服务端设置并传递给客户端
- `mode=<run_mode>`:运行模式控制(`0``1``2`- 控制操作行为
- `read=<timeout>`数据读取超时时间默认10m支持时间单位如30s、5m、30m等
- `rate=<mbps>`带宽速率限制单位Mbps默认0表示无限制
- `proxy=<mode>`PROXY协议支持默认`0``1`启用PROXY协议v1头部传输
TLS相关参数仅适用于server/master模式
- `tls=<mode>`数据通道的TLS安全级别`0``1``2`
@@ -36,7 +40,7 @@ NodePass提供三种互补的运行模式以适应各种部署场景。
服务端模式建立隧道控制通道,并支持双向数据流转发。
```bash
nodepass "server://<tunnel_addr>/<target_addr>?log=<level>&tls=<mode>&crt=<cert_file>&key=<key_file>"
nodepass "server://<tunnel_addr>/<target_addr>?log=<level>&tls=<mode>&crt=<cert_file>&key=<key_file>&max=<max_pool>&mode=<run_mode>&read=<timeout>&rate=<mbps>&proxy=<mode>"
```
#### 参数
@@ -50,18 +54,31 @@ nodepass "server://<tunnel_addr>/<target_addr>?log=<level>&tls=<mode>&crt=<cert_
- `2`:自定义证书(需要`crt``key`参数)
- `crt`:证书文件路径(当`tls=2`时必需)
- `key`:私钥文件路径(当`tls=2`时必需)
- `max`最大连接池容量默认1024
- `mode`:数据流方向的运行模式控制
- `0`:自动检测(默认)- 首先尝试本地绑定,如果不可用则回退
- `1`:强制反向模式 - 服务器本地绑定目标地址并接收流量
- `2`:强制正向模式 - 服务器连接到远程目标地址
- `read`数据读取超时时间默认10m支持时间单位如30s、5m、30m等
- `rate`带宽速率限制单位Mbps默认0表示无限制
- `proxy`PROXY协议支持默认`0``1`在数据传输前启用PROXY协议v1头部
#### 服务端模式工作原理
服务端模式NodePass支持两种数据流方向
服务端模式通过`mode`参数支持自动模式检测或强制模式选择
**模式一:服务端接收流量**target_addr为本地地址
**模式0自动检测**(默认
- 首先尝试本地绑定`target_addr`
- 如果成功,以反向模式运行(服务端接收流量)
- 如果绑定失败,以正向模式运行(服务端发送流量)
**模式1反向模式**(服务端接收流量)
1.`tunnel_addr`上监听TCP隧道连接控制通道
2.`target_addr`上监听传入的TCP和UDP流量
3.`target_addr`收到连接时,通过控制通道向客户端发送信号
2. 绑定并`target_addr`上监听传入的TCP和UDP流量
3.`target_addr`收到连接时,通过控制通道向已连接的客户端发送信号
4. 为每个连接创建具有指定TLS加密级别的数据通道
**模式二:服务端发送流量**target_addr为远程地址
**模式2正向模式**(服务端发送流量
1.`tunnel_addr`上监听TCP隧道连接控制通道
2. 等待客户端在其本地监听,并通过隧道接收连接
3. 建立到远程`target_addr`的连接并转发数据
@@ -69,14 +86,14 @@ nodepass "server://<tunnel_addr>/<target_addr>?log=<level>&tls=<mode>&crt=<cert_
#### 示例
```bash
# 数据通道无TLS加密 - 服务端接收模式
# 自动模式检测无TLS加密
nodepass "server://10.1.0.1:10101/10.1.0.1:8080?log=debug&tls=0"
# 自签名证书(自动生成) - 服务端发送模式
nodepass "server://10.1.0.1:10101/192.168.1.100:8080?log=debug&tls=1"
# 强制反向模式,自签名证书
nodepass "server://10.1.0.1:10101/10.1.0.1:8080?log=debug&tls=1&mode=1"
# 自定义域名证书 - 服务端接收模式
nodepass "server://10.1.0.1:10101/10.1.0.1:8080?log=debug&tls=2&crt=/path/to/cert.pem&key=/path/to/key.pem"
# 强制正向模式,自定义证书
nodepass "server://10.1.0.1:10101/192.168.1.100:8080?log=debug&tls=2&mode=2&crt=/path/to/cert.pem&key=/path/to/key.pem"
```
### 客户端模式
@@ -84,7 +101,7 @@ nodepass "server://10.1.0.1:10101/10.1.0.1:8080?log=debug&tls=2&crt=/path/to/cer
客户端模式连接到NodePass服务端并支持双向数据流转发。
```bash
nodepass "client://<tunnel_addr>/<target_addr>?log=<level>&min=<min_pool>&max=<max_pool>"
nodepass "client://<tunnel_addr>/<target_addr>?log=<level>&min=<min_pool>&mode=<run_mode>&read=<timeout>&rate=<mbps>&proxy=<mode>"
```
#### 参数
@@ -93,47 +110,62 @@ nodepass "client://<tunnel_addr>/<target_addr>?log=<level>&min=<min_pool>&max=<m
- `target_addr`:业务数据的目标地址,支持双向数据流模式(例如, 127.0.0.1:8080)
- `log`:日志级别(debug, info, warn, error, event)
- `min`最小连接池容量默认64
- `max`最大连接池容量默认1024
- `mode`:客户端行为的运行模式控制
- `0`:自动检测(默认)- 首先尝试本地绑定,如果失败则回退到握手模式
- `1`:强制单端转发模式 - 带连接池的本地代理
- `2`:强制双端握手模式 - 需要服务器协调
- `read`数据读取超时时间默认10m支持时间单位如30s、5m、30m等
- `rate`带宽速率限制单位Mbps默认0表示无限制
- `proxy`PROXY协议支持默认`0``1`在数据传输前启用PROXY协议v1头部
#### 客户端模式工作原理
客户端模式NodePass支持三种操作模式
客户端模式通过`mode`参数支持自动模式检测或强制模式选择
**模式一:客户端单端转发**(当隧道地址为本地地址时
**模式0自动检测**(默认
- 首先尝试本地绑定`tunnel_addr`
- 如果成功,以单端转发模式运行
- 如果绑定失败,以双端握手模式运行
**模式1单端转发模式**
1. 在本地隧道地址上监听TCP和UDP连接
2. 使用连接池技术预建立到目标地址的TCP连接消除连接延迟
3. 直接将接收到的流量转发到目标地址,实现高性能转发
4. 无需与服务端握手,实现点对点的直接转发
5. 适用于本地代理和简单转发场景
**模式二:客户端接收流量**(当服务端发送流量时)
1. 连接到服务端的TCP隧道端点控制通道
2. 在本地监听端口,等待通过隧道传入的连接
3. 建立到本地`target_addr`的连接并转发数据
**模式2双端握手模式**
- **客户端接收流量**(当服务端发送流量时
1. 连接到服务端的TCP隧道端点控制通道
2. 在本地监听端口,等待通过隧道传入的连接
3. 建立到本地`target_addr`的连接并转发数据
**模式三:客户端发送流量**(当服务端接收流量时)
1. 连接到服务端的TCP隧道端点控制通道
2. 通过控制通道监听来自服务端的信号
3. 当收到信号时使用服务端指定的TLS安全级别建立数据连接
4. 建立到`target_addr`本地连接并转发流量
- **客户端发送流量**(当服务端接收流量时)
1. 连接到服务端的TCP隧道端点控制通道
2. 通过控制通道监听来自服务端的信号
3. 当收到信号时使用服务端指定的TLS安全级别建立数据连接
4. 建立到`target_addr`的连接并转发流量
#### 示例
```bash
# 客户端单端转发模式 - 本地代理监听1080端口转发到目标服务器
nodepass client://127.0.0.1:1080/target.example.com:8080?log=debug
# 自动模式检测 - 本地代理监听1080端口转发到目标服务器
nodepass "client://127.0.0.1:1080/target.example.com:8080?log=debug"
# 连接到NodePass服务端并采用其TLS安全策略 - 客户端发送模式
nodepass client://server.example.com:10101/127.0.0.1:8080
# 强制单端转发模式 - 高性能本地代理
nodepass "client://127.0.0.1:1080/target.example.com:8080?mode=1&log=debug"
# 使用调试日志连接 - 客户端接收模式
nodepass client://server.example.com:10101/192.168.1.100:8080?log=debug
# 强制双端握手模式 - 连接到NodePass服务端并采用其TLS安全策略
nodepass "client://server.example.com:10101/127.0.0.1:8080?mode=2"
# 自定义连接池容量 - 高性能配置
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=128&max=4096"
# 使用调试日志和自定义连接池容量连接
nodepass "client://server.example.com:10101/192.168.1.100:8080?log=debug&min=128"
# 强制模式的资源受限配置
nodepass "client://server.example.com:10101/127.0.0.1:8080?mode=2&min=16&log=info"
# 资源受限配置 - 小型连接池
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=16&max=512&log=info"
nodepass "client://server.example.com:10101/127.0.0.1:8080?min=16&log=info"
```
### 主控模式 (API)
@@ -168,11 +200,16 @@ nodepass "master://<api_addr>[<prefix>]?log=<level>&tls=<mode>&crt=<cert_file>&k
所有端点都是相对于配置的前缀(默认:`/api`
**受保护的端点需要API Key**
- `GET {prefix}/v1/instances` - 列出所有实例
- `POST {prefix}/v1/instances` - 创建新实例JSON请求体: `{"url": "server://0.0.0.0:10101/0.0.0.0:8080"}`
- `GET {prefix}/v1/instances/{id}` - 获取实例详情
- `PATCH {prefix}/v1/instances/{id}` - 更新实例JSON请求体: `{"action": "start|stop|restart"}`
- `DELETE {prefix}/v1/instances/{id}` - 删除实例
- `GET {prefix}/v1/events` - 服务端发送事件流SSE
- `GET {prefix}/v1/info` - 获取系统信息
**公共端点无需API Key**
- `GET {prefix}/v1/openapi.json` - OpenAPI规范
- `GET {prefix}/v1/docs` - Swagger UI文档
@@ -196,28 +233,53 @@ nodepass "master://0.0.0.0:9090?log=info&tls=2&crt=/path/to/cert.pem&key=/path/t
### 通过API创建和管理
您可以使用标准HTTP请求通过主控API管理NodePass实例
NodePass主控模式提供RESTful API来管理实例所有API请求都需要使用API Key进行身份验证。
#### API Key获取
启动主控模式后系统会自动生成API Key并在日志中显示
```bash
# 通过API创建和管理实例使用默认前缀
# 启动主控模式
nodepass "master://0.0.0.0:9090?log=info"
# 日志输出中会显示:
# INFO: API Key created: abc123def456...
```
#### API请求示例
所有受保护的API端点都需要在请求头中包含`X-API-Key`
```bash
# 获取API Key (假设为: abc123def456789)
# 通过API创建实例使用默认前缀
curl -X POST http://localhost:9090/api/v1/instances \
-H "Content-Type: application/json" \
-H "X-API-Key: abc123def456789" \
-d '{"url":"server://0.0.0.0:10101/0.0.0.0:8080?tls=1"}'
# 使用自定义前缀
curl -X POST http://localhost:9090/admin/v1/instances \
-H "Content-Type: application/json" \
-H "X-API-Key: abc123def456789" \
-d '{"url":"server://0.0.0.0:10101/0.0.0.0:8080?tls=1"}'
# 列出所有运行实例
curl http://localhost:9090/api/v1/instances
curl http://localhost:9090/api/v1/instances \
-H "X-API-Key: abc123def456789"
# 控制实例用实际实例ID替换{id}
curl -X PUT http://localhost:9090/api/v1/instances/{id} \
-H "Content-Type: application/json" \
curl -X PATCH http://localhost:9090/api/v1/instances/{id} \
-H "X-API-Key: abc123def456789" \
-d '{"action":"restart"}'
```
#### 公共端点
以下端点不需要API Key身份验证
- `GET {prefix}/v1/openapi.json` - OpenAPI规范
- `GET {prefix}/v1/docs` - Swagger UI文档
## 双向数据流说明
NodePass支持灵活的双向数据流配置
@@ -228,12 +290,12 @@ NodePass支持灵活的双向数据流配置
- **无需服务端**:独立运行,不依赖服务端握手
- **使用场景**:本地代理、简单端口转发、测试环境、高性能转发
### 服务端接收模式 (dataFlow: "-")
### 服务端接收模式
- **服务端**在target_addr监听传入连接通过隧道转发到客户端
- **客户端**连接到本地target_addr提供服务
- **使用场景**:将内网服务暴露给外网访问
### 服务端发送模式 (dataFlow: "+")
### 服务端发送模式
- **服务端**连接到远程target_addr获取数据通过隧道发送到客户端
- **客户端**:在本地监听,接收来自服务端的连接
- **使用场景**:通过隧道代理访问远程服务
@@ -270,10 +332,13 @@ NodePass使用隧道密钥来验证客户端和服务端之间的连接。密钥
1. **客户端连接**:客户端连接到服务端的隧道地址
2. **密钥验证**客户端发送XOR加密的隧道密钥
3. **服务端验证**:服务端解密并验证密钥是否匹配
4. **配置同步**:验证成功后,服务端发送隧道配置信息包括TLS模式
4. **配置同步**:验证成功后,服务端发送隧道配置信息包括
- 数据流向模式
- 最大连接池容量
- TLS安全模式
5. **连接确立**:握手完成,开始数据传输
这种设计确保了只有拥有正确密钥的客户端才能建立隧道连接。
这种设计确保了只有拥有正确密钥的客户端才能建立隧道连接,同时允许服务端统一管理连接池容量
## 下一步

View File

@@ -1,10 +1,10 @@
module github.com/yosebyte/nodepass
go 1.24.3
go 1.25.0
require (
github.com/NodePassProject/cert v1.0.0
github.com/NodePassProject/conn v1.0.3
github.com/NodePassProject/logs v1.0.2
github.com/NodePassProject/pool v1.0.18
github.com/NodePassProject/cert v1.0.1
github.com/NodePassProject/conn v1.0.10
github.com/NodePassProject/logs v1.0.3
github.com/NodePassProject/pool v1.0.24
)

View File

@@ -1,8 +1,8 @@
github.com/NodePassProject/cert v1.0.0 h1:cBNNvR+ja22AgNlUmeGWLcCM1vmnLTqpbCQ4Hdn5was=
github.com/NodePassProject/cert v1.0.0/go.mod h1:4EJDS3GozJ74dtICJ/xcq42WKKvF0tiTM9/M7Q9NF9c=
github.com/NodePassProject/conn v1.0.3 h1:yw9rimaOMvQYF2kzMD9a5MfvJ+US7AOFinyx+QbdX78=
github.com/NodePassProject/conn v1.0.3/go.mod h1:mWe3Rylunp6Sx4v6pkSGgYZe2R+I/O+7nZ2od0yJ3aQ=
github.com/NodePassProject/logs v1.0.2 h1:z4b+jAMHtVJoBb2tsD58gVa/9ftd1Dy6DXHrS4IgafM=
github.com/NodePassProject/logs v1.0.2/go.mod h1:ocFTMNXBTnQFJFAhF+qobAzu7+y+wYPik7D+a1jPfis=
github.com/NodePassProject/pool v1.0.18 h1:urZeotSjcVdzoZDBDPOGNt/NtH1Ngxlj60ByF2ZsvY0=
github.com/NodePassProject/pool v1.0.18/go.mod h1:kdRAEDK45j/+iHH4kRTpXt/wI28NIguJ13n/5NDXxkw=
github.com/NodePassProject/cert v1.0.1 h1:BDy2tTOudy6yk7hvcmScAJMw4NrpCdSCsbuu7hHsIuw=
github.com/NodePassProject/cert v1.0.1/go.mod h1:wP7joOJeQAIlIuOUmhHPwMExjuwGa4XApMWQYChGSrk=
github.com/NodePassProject/conn v1.0.10 h1:YVV/PG76k8axBZ2qEa7yOP9hrOmKGWuIDFYzCM/wMSE=
github.com/NodePassProject/conn v1.0.10/go.mod h1:xfQ7ZLUxrtdLsljGHYYCToW+Hdg6DAbmL1Cs94n5h6E=
github.com/NodePassProject/logs v1.0.3 h1:CDUZVQ477vmmFQHazrQCWM0gJPNINm0C2N3FzC4jVyw=
github.com/NodePassProject/logs v1.0.3/go.mod h1:TwtPXOzLtb8iH+fdduQjEEywICXivsM39cy9AinMSks=
github.com/NodePassProject/pool v1.0.24 h1:8DSgZ2dxnzYXgplp9ZwZlpwFiKIWU4JYW1glUm+hq/4=
github.com/NodePassProject/pool v1.0.24/go.mod h1:joQFk1oocg56QpJ1QK/2g5Jv/AyqYUQgPXMG1gWe8iA=

View File

@@ -5,10 +5,13 @@ import (
"bufio"
"bytes"
"context"
"io"
"net"
"net/url"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
@@ -28,36 +31,48 @@ func NewClient(parsedURL *url.URL, logger *logs.Logger) *Client {
client := &Client{
Common: Common{
logger: logger,
semaphore: make(chan struct{}, semaphoreLimit),
signalChan: make(chan string, semaphoreLimit),
},
tunnelName: parsedURL.Hostname(),
}
// 初始化公共字段
client.getTunnelKey(parsedURL)
client.getPoolCapacity(parsedURL)
client.getAddress(parsedURL)
client.initConfig(parsedURL)
client.initRateLimiter()
return client
}
// Run 管理客户端生命周期
func (c *Client) Run() {
c.logger.Info("Client started: %v@%v/%v", c.tunnelKey, c.tunnelAddr, c.targetTCPAddr)
logInfo := func(prefix string) {
c.logger.Info("%v: %v@%v/%v?min=%v&mode=%v&read=%v&rate=%v&slot=%v",
prefix, c.tunnelKey, c.tunnelTCPAddr, c.targetTCPAddr,
c.minPoolCapacity, c.runMode, c.readTimeout, c.rateLimit/125000, c.slotLimit)
}
logInfo("Client started")
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
// 启动客户端服务并处理重启
go func() {
for {
time.Sleep(serviceCooldown)
if err := c.start(); err != nil {
if ctx.Err() != nil {
return
}
// 启动客户端
if err := c.start(); err != nil && err != io.EOF {
c.logger.Error("Client error: %v", err)
// 重启客户端
c.stop()
c.logger.Info("Client restarted: %v@%v/%v", c.tunnelKey, c.tunnelAddr, c.targetTCPAddr)
select {
case <-ctx.Done():
return
case <-time.After(serviceCooldown):
}
logInfo("Client restarting")
}
}
}()
// 监听系统信号以优雅关闭
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
<-ctx.Done()
stop()
@@ -76,58 +91,62 @@ func (c *Client) start() error {
// 初始化上下文
c.initContext()
// 通过是否监听成功判断单端转发或双端握手
if err := c.initTunnelListener(); err == nil {
// 初始化连接池
c.tunnelPool = pool.NewClientPool(
c.minPoolCapacity,
c.maxPoolCapacity,
minPoolInterval,
maxPoolInterval,
reportInterval,
c.tlsCode,
true,
c.tunnelName,
func() (net.Conn, error) {
return net.DialTCP("tcp", nil, c.targetTCPAddr)
})
go c.tunnelPool.ClientManager()
return c.singleLoop()
} else {
if err := c.tunnelHandshake(); err != nil {
// 运行模式判断
switch c.runMode {
case "1": // 单端模式
if err := c.initTunnelListener(); err != nil {
return err
}
// 初始化连接池
c.tunnelPool = pool.NewClientPool(
c.minPoolCapacity,
c.maxPoolCapacity,
minPoolInterval,
maxPoolInterval,
reportInterval,
c.tlsCode,
false,
c.tunnelName,
func() (net.Conn, error) {
return net.DialTCP("tcp", nil, c.tunnelTCPAddr)
})
go c.tunnelPool.ClientManager()
if c.dataFlow == "+" {
// 初始化目标监听器
if err := c.initTargetListener(); err != nil {
return err
}
go c.commonLoop()
return c.singleStart()
case "2": // 双端模式
return c.commonStart()
default: // 自动判断
if err := c.initTunnelListener(); err == nil {
c.runMode = "1"
return c.singleStart()
} else {
c.runMode = "2"
return c.commonStart()
}
return c.commonControl()
}
}
// singleStart 启动单端转发模式
func (c *Client) singleStart() error {
return c.singleControl()
}
// commonStart 启动双端握手模式
func (c *Client) commonStart() error {
// 与隧道服务端进行握手
if err := c.tunnelHandshake(); err != nil {
return err
}
// 初始化连接池
c.tunnelPool = pool.NewClientPool(
c.minPoolCapacity,
c.maxPoolCapacity,
minPoolInterval,
maxPoolInterval,
reportInterval,
c.tlsCode,
c.tunnelName,
func() (net.Conn, error) {
return net.DialTimeout("tcp", c.tunnelTCPAddr.String(), tcpDialTimeout)
})
go c.tunnelPool.ClientManager()
if c.dataFlow == "+" {
// 初始化目标监听器
if err := c.initTargetListener(); err != nil {
return err
}
go c.commonLoop()
}
return c.commonControl()
}
// tunnelHandshake 与隧道服务端进行握手
func (c *Client) tunnelHandshake() error {
// 建立隧道TCP连接
@@ -137,7 +156,7 @@ func (c *Client) tunnelHandshake() error {
}
c.tunnelTCPConn = tunnelTCPConn.(*net.TCPConn)
c.bufReader = bufio.NewReader(&conn.TimeoutReader{Conn: c.tunnelTCPConn, Timeout: tcpReadTimeout})
c.bufReader = bufio.NewReader(&conn.TimeoutReader{Conn: c.tunnelTCPConn, Timeout: 2 * reportInterval})
c.tunnelTCPConn.SetKeepAlive(true)
c.tunnelTCPConn.SetKeepAlivePeriod(reportInterval)
@@ -153,17 +172,25 @@ func (c *Client) tunnelHandshake() error {
return err
}
tunnelSignal := string(c.xor(bytes.TrimSuffix(rawTunnelURL, []byte{'\n'})))
// 解析隧道URL
tunnelURL, err := url.Parse(tunnelSignal)
tunnelURL, err := url.Parse(string(c.xor(bytes.TrimSuffix(rawTunnelURL, []byte{'\n'}))))
if err != nil {
return err
}
c.dataFlow = tunnelURL.Host
// 更新客户端配置
if tunnelURL.Host == "" || tunnelURL.Path == "" || tunnelURL.Fragment == "" {
return net.UnknownNetworkError(tunnelURL.String())
}
if max, err := strconv.Atoi(tunnelURL.Host); err != nil {
return err
} else {
c.maxPoolCapacity = max
}
c.dataFlow = strings.TrimPrefix(tunnelURL.Path, "/")
c.tlsCode = tunnelURL.Fragment
c.logger.Info("Tunnel signal <- : %v <- %v", tunnelSignal, c.tunnelTCPConn.RemoteAddr())
c.logger.Info("Tunnel signal <- : %v <- %v", tunnelURL.String(), c.tunnelTCPConn.RemoteAddr())
c.logger.Info("Tunnel handshaked: %v <-> %v", c.tunnelTCPConn.LocalAddr(), c.tunnelTCPConn.RemoteAddr())
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -31,11 +31,13 @@ import (
// 常量定义
const (
openAPIVersion = "v1" // OpenAPI版本
stateFilePath = "gob" // 实例状态持久化文件路径
stateFileName = "nodepass.gob" // 实例状态持久化文件名
sseRetryTime = 3000 // 重试间隔时间(毫秒)
apiKeyID = "********" // API Key的特殊ID
openAPIVersion = "v1" // OpenAPI版本
stateFilePath = "gob" // 实例状态持久化文件路径
stateFileName = "nodepass.gob" // 实例状态持久化文件名
sseRetryTime = 3000 // 重试间隔时间(毫秒)
apiKeyID = "********" // API Key的特殊ID
tcpingSemLimit = 10 // TCPing最大并发数
baseDuration = 100 * time.Millisecond // 基准持续时间
)
// Swagger UI HTML模板
@@ -76,26 +78,36 @@ type Master struct {
stateMu sync.Mutex // 持久化文件写入互斥锁
subscribers sync.Map // SSE订阅者映射表
notifyChannel chan *InstanceEvent // 事件通知通道
tcpingSem chan struct{} // TCPing并发控制
startTime time.Time // 启动时间
backupDone chan struct{} // 备份停止信号
}
// Instance 实例信息
type Instance struct {
ID string `json:"id"` // 实例ID
Alias string `json:"alias"` // 实例别名
Type string `json:"type"` // 实例类型
Status string `json:"status"` // 实例状态
URL string `json:"url"` // 实例URL
Restart bool `json:"restart"` // 是否自启动
TCPRX uint64 `json:"tcprx"` // TCP接收字节数
TCPTX uint64 `json:"tcptx"` // TCP发送字节数
UDPRX uint64 `json:"udprx"` // UDP接收字节
UDPTX uint64 `json:"udptx"` // UDP发送字节
Pool int64 `json:"pool"` // 健康检查池连接数
Ping int64 `json:"ping"` // 健康检查端内延迟
cmd *exec.Cmd `json:"-" gob:"-"` // 命令对象(不序列化)
stopped chan struct{} `json:"-" gob:"-"` // 停止信号通道(不序列化)
cancelFunc context.CancelFunc `json:"-" gob:"-"` // 取消函数(不序列化)
ID string `json:"id"` // 实例ID
Alias string `json:"alias"` // 实例别名
Type string `json:"type"` // 实例类型
Status string `json:"status"` // 实例状态
URL string `json:"url"` // 实例URL
Restart bool `json:"restart"` // 是否自启动
Mode int32 `json:"mode"` // 实例模式
Ping int32 `json:"ping"` // 端内延迟
Pool int32 `json:"pool"` // 池连接
TCPS int32 `json:"tcps"` // TCP连接
UDPS int32 `json:"udps"` // UDP连接数
TCPRX uint64 `json:"tcprx"` // TCP接收字节数
TCPTX uint64 `json:"tcptx"` // TCP发送字节数
UDPRX uint64 `json:"udprx"` // UDP接收字节数
UDPTX uint64 `json:"udptx"` // UDP发送字节数
TCPRXBase uint64 `json:"-" gob:"-"` // TCP接收字节数基线不序列化
TCPTXBase uint64 `json:"-" gob:"-"` // TCP发送字节数基线不序列化
UDPRXBase uint64 `json:"-" gob:"-"` // UDP接收字节数基线不序列化
UDPTXBase uint64 `json:"-" gob:"-"` // UDP发送字节数基线不序列化
cmd *exec.Cmd `json:"-" gob:"-"` // 命令对象(不序列化)
stopped chan struct{} `json:"-" gob:"-"` // 停止信号通道(不序列化)
cancelFunc context.CancelFunc `json:"-" gob:"-"` // 取消函数(不序列化)
lastCheckPoint time.Time `json:"-" gob:"-"` // 上次检查点时间(不序列化)
}
// InstanceEvent 实例事件信息
@@ -106,25 +118,96 @@ type InstanceEvent struct {
Logs string `json:"logs,omitempty"` // 日志内容仅当Type为log时有效
}
// SystemInfo 系统信息结构体
type SystemInfo struct {
CPU int `json:"cpu"` // CPU使用率 (%)
MemTotal uint64 `json:"mem_total"` // 内存容量字节数
MemFree uint64 `json:"mem_free"` // 内存可用字节数
SwapTotal uint64 `json:"swap_total"` // 交换区容量字节数
SwapFree uint64 `json:"swap_free"` // 交换区可用字节数
NetRX uint64 `json:"netrx"` // 网络接收字节数
NetTX uint64 `json:"nettx"` // 网络发送字节数
DiskR uint64 `json:"diskr"` // 磁盘读取字节数
DiskW uint64 `json:"diskw"` // 磁盘写入字节数
SysUp uint64 `json:"sysup"` // 系统运行时间(秒)
}
// TCPingResult TCPing结果结构体
type TCPingResult struct {
Target string `json:"target"`
Connected bool `json:"connected"`
Latency int64 `json:"latency"`
Error *string `json:"error"`
}
// handleTCPing 处理TCPing请求
func (m *Master) handleTCPing(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
httpError(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
target := r.URL.Query().Get("target")
if target == "" {
httpError(w, "Target address required", http.StatusBadRequest)
return
}
// 执行TCPing
result := m.performTCPing(target)
writeJSON(w, http.StatusOK, result)
}
// performTCPing 执行单次TCPing
func (m *Master) performTCPing(target string) *TCPingResult {
result := &TCPingResult{
Target: target,
Connected: false,
Latency: 0,
Error: nil,
}
// 并发控制
select {
case m.tcpingSem <- struct{}{}:
defer func() { <-m.tcpingSem }()
case <-time.After(time.Second):
errMsg := "too many requests"
result.Error = &errMsg
return result
}
start := time.Now()
conn, err := net.DialTimeout("tcp", target, reportInterval)
if err != nil {
errMsg := err.Error()
result.Error = &errMsg
return result
}
result.Connected = true
result.Latency = time.Since(start).Milliseconds()
conn.Close()
return result
}
// InstanceLogWriter 实例日志写入器
type InstanceLogWriter struct {
instanceID string // 实例ID
instance *Instance // 实例对象
target io.Writer // 目标写入器
master *Master // 主控对象
statRegex *regexp.Regexp // 统计信息正则表达式
healthRegex *regexp.Regexp // 健康检查正则表达式
instanceID string // 实例ID
instance *Instance // 实例对象
target io.Writer // 目标写入器
master *Master // 主控对象
checkPoint *regexp.Regexp // 检查点正则表达式
}
// NewInstanceLogWriter 创建新的实例日志写入器
func NewInstanceLogWriter(instanceID string, instance *Instance, target io.Writer, master *Master) *InstanceLogWriter {
return &InstanceLogWriter{
instanceID: instanceID,
instance: instance,
target: target,
master: master,
statRegex: regexp.MustCompile(`TRAFFIC_STATS\|TCP_RX=(\d+)\|TCP_TX=(\d+)\|UDP_RX=(\d+)\|UDP_TX=(\d+)`),
healthRegex: regexp.MustCompile(`HEALTH_CHECKS\|POOL=(\d+)\|PING=(\d+)ms`),
instanceID: instanceID,
instance: instance,
target: target,
master: master,
checkPoint: regexp.MustCompile(`CHECK_POINT\|MODE=(\d+)\|PING=(\d+)ms\|POOL=(\d+)\|TCPS=(\d+)\|UDPS=(\d+)\|TCPRX=(\d+)\|TCPTX=(\d+)\|UDPRX=(\d+)\|UDPTX=(\d+)`),
}
}
@@ -135,33 +218,41 @@ func (w *InstanceLogWriter) Write(p []byte) (n int, err error) {
for scanner.Scan() {
line := scanner.Text()
// 解析并处理统计信息
if matches := w.statRegex.FindStringSubmatch(line); len(matches) == 5 {
// 解析并处理检查点信息
if matches := w.checkPoint.FindStringSubmatch(line); len(matches) == 10 {
// matches[1] = MODE, matches[2] = PING, matches[3] = POOL, matches[4] = TCPS, matches[5] = UDPS, matches[6] = TCPRX, matches[7] = TCPTX, matches[8] = UDPRX, matches[9] = UDPTX
if mode, err := strconv.ParseInt(matches[1], 10, 32); err == nil {
w.instance.Mode = int32(mode)
}
if ping, err := strconv.ParseInt(matches[2], 10, 32); err == nil {
w.instance.Ping = int32(ping)
}
if pool, err := strconv.ParseInt(matches[3], 10, 32); err == nil {
w.instance.Pool = int32(pool)
}
if tcps, err := strconv.ParseInt(matches[4], 10, 32); err == nil {
w.instance.TCPS = int32(tcps)
}
if udps, err := strconv.ParseInt(matches[5], 10, 32); err == nil {
w.instance.UDPS = int32(udps)
}
stats := []*uint64{&w.instance.TCPRX, &w.instance.TCPTX, &w.instance.UDPRX, &w.instance.UDPTX}
bases := []uint64{w.instance.TCPRXBase, w.instance.TCPTXBase, w.instance.UDPRXBase, w.instance.UDPTXBase}
for i, stat := range stats {
if v, err := strconv.ParseUint(matches[i+1], 10, 64); err == nil {
// 累加新的统计数据
*stat += v
if v, err := strconv.ParseUint(matches[i+6], 10, 64); err == nil {
*stat = bases[i] + v
}
}
w.instance.lastCheckPoint = time.Now()
w.master.instances.Store(w.instanceID, w.instance)
// 过滤统计日志
continue
}
// 解析并处理健康检查信息
if matches := w.healthRegex.FindStringSubmatch(line); len(matches) == 3 {
if v, err := strconv.ParseInt(matches[1], 10, 64); err == nil {
w.instance.Pool = v
}
if v, err := strconv.ParseInt(matches[2], 10, 64); err == nil {
w.instance.Ping = v
}
w.master.instances.Store(w.instanceID, w.instance)
// 发送健康检查更新事件
// 发送检查点更新事件
w.master.sendSSEEvent("update", w.instance)
// 过滤检查日志
// 过滤检查日志
continue
}
// 输出日志加实例ID
fmt.Fprintf(w.target, "%s [%s]\n", line, w.instanceID)
@@ -225,8 +316,10 @@ func NewMaster(parsedURL *url.URL, tlsCode string, tlsConfig *tls.Config, logger
tlsConfig: tlsConfig,
masterURL: parsedURL,
statePath: filepath.Join(baseDir, stateFilePath, stateFileName),
notifyChannel: make(chan *InstanceEvent, 1024),
notifyChannel: make(chan *InstanceEvent, semaphoreLimit),
tcpingSem: make(chan struct{}, tcpingSemLimit),
startTime: time.Now(),
backupDone: make(chan struct{}),
}
master.tunnelTCPAddr = host
@@ -236,12 +329,15 @@ func NewMaster(parsedURL *url.URL, tlsCode string, tlsConfig *tls.Config, logger
// 启动事件分发器
go master.startEventDispatcher()
// 启动定期备份
go master.startPeriodicBackup()
return master
}
// Run 管理主控生命周期
func (m *Master) Run() {
m.logger.Info("Master started: %v%v", m.tunnelAddr, m.prefix)
m.logger.Info("Master started: %v%v", m.tunnelTCPAddr, m.prefix)
// 初始化API Key
apiKey, ok := m.findInstance(apiKeyID)
@@ -267,6 +363,7 @@ func (m *Master) Run() {
fmt.Sprintf("%s/instances/", m.prefix): m.handleInstanceDetail,
fmt.Sprintf("%s/events", m.prefix): m.handleSSE,
fmt.Sprintf("%s/info", m.prefix): m.handleInfo,
fmt.Sprintf("%s/tcping", m.prefix): m.handleTCPing,
}
// 创建不需要API Key认证的端点
@@ -395,7 +492,7 @@ func (m *Master) Shutdown(ctx context.Context) error {
})
// 等待所有订阅者处理完关闭事件
time.Sleep(100 * time.Millisecond)
time.Sleep(baseDuration)
// 关闭所有订阅者通道
m.subscribers.Range(func(key, value any) bool {
@@ -428,6 +525,9 @@ func (m *Master) Shutdown(ctx context.Context) error {
wg.Wait()
// 关闭定期备份
close(m.backupDone)
// 关闭事件通知通道,停止事件分发器
close(m.notifyChannel)
@@ -447,7 +547,14 @@ func (m *Master) Shutdown(ctx context.Context) error {
// saveState 保存实例状态到文件
func (m *Master) saveState() error {
m.stateMu.Lock()
return m.saveStateToPath(m.statePath)
}
// saveStateToPath 保存实例状态到指定路径
func (m *Master) saveStateToPath(filePath string) error {
if !m.stateMu.TryLock() {
return nil
}
defer m.stateMu.Unlock()
// 创建持久化数据
@@ -463,20 +570,20 @@ func (m *Master) saveState() error {
// 如果没有实例,直接返回
if len(persistentData) == 0 {
// 如果状态文件存在,删除它
if _, err := os.Stat(m.statePath); err == nil {
return os.Remove(m.statePath)
if _, err := os.Stat(filePath); err == nil {
return os.Remove(filePath)
}
return nil
}
// 确保目录存在
if err := os.MkdirAll(filepath.Dir(m.statePath), 0755); err != nil {
if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil {
m.logger.Error("Create state dir failed: %v", err)
return err
}
// 创建临时文件
tempFile, err := os.CreateTemp(filepath.Dir(m.statePath), "np-*.tmp")
tempFile, err := os.CreateTemp(filepath.Dir(filePath), "np-*.tmp")
if err != nil {
m.logger.Error("Create temp failed: %v", err)
return err
@@ -507,7 +614,7 @@ func (m *Master) saveState() error {
}
// 原子地替换文件
if err := os.Rename(tempPath, m.statePath); err != nil {
if err := os.Rename(tempPath, filePath); err != nil {
m.logger.Error("Rename temp failed: %v", err)
removeTemp()
return err
@@ -516,6 +623,25 @@ func (m *Master) saveState() error {
return nil
}
// startPeriodicBackup 启动定期备份
func (m *Master) startPeriodicBackup() {
for {
select {
case <-time.After(ReloadInterval):
// 固定备份文件名
backupPath := fmt.Sprintf("%s.backup", m.statePath)
if err := m.saveStateToPath(backupPath); err != nil {
m.logger.Error("Backup state failed: %v", err)
} else {
m.logger.Info("State backup saved: %v", backupPath)
}
case <-m.backupDone:
return
}
}
}
// loadState 从文件加载实例状态
func (m *Master) loadState() {
// 检查文件是否存在
@@ -546,8 +672,8 @@ func (m *Master) loadState() {
// 处理自启动
if instance.Restart {
go m.startInstance(instance)
m.logger.Info("Auto-starting instance: %v [%v]", instance.URL, instance.ID)
m.startInstance(instance)
}
}
@@ -576,20 +702,174 @@ func (m *Master) handleInfo(w http.ResponseWriter, r *http.Request) {
}
info := map[string]any{
"os": runtime.GOOS,
"arch": runtime.GOARCH,
"ver": m.version,
"name": m.hostname,
"uptime": uint64(time.Since(m.startTime).Seconds()),
"log": m.logLevel,
"tls": m.tlsCode,
"crt": m.crtPath,
"key": m.keyPath,
"os": runtime.GOOS,
"arch": runtime.GOARCH,
"cpu": -1,
"mem_total": uint64(0),
"mem_free": uint64(0),
"swap_total": uint64(0),
"swap_free": uint64(0),
"netrx": uint64(0),
"nettx": uint64(0),
"diskr": uint64(0),
"diskw": uint64(0),
"sysup": uint64(0),
"ver": m.version,
"name": m.hostname,
"uptime": uint64(time.Since(m.startTime).Seconds()),
"log": m.logLevel,
"tls": m.tlsCode,
"crt": m.crtPath,
"key": m.keyPath,
}
if runtime.GOOS == "linux" {
sysInfo := getLinuxSysInfo()
info["cpu"] = sysInfo.CPU
info["mem_total"] = sysInfo.MemTotal
info["mem_free"] = sysInfo.MemFree
info["swap_total"] = sysInfo.SwapTotal
info["swap_free"] = sysInfo.SwapFree
info["netrx"] = sysInfo.NetRX
info["nettx"] = sysInfo.NetTX
info["diskr"] = sysInfo.DiskR
info["diskw"] = sysInfo.DiskW
info["sysup"] = sysInfo.SysUp
}
writeJSON(w, http.StatusOK, info)
}
// getLinuxSysInfo 获取Linux系统信息
func getLinuxSysInfo() SystemInfo {
info := SystemInfo{
CPU: -1,
MemTotal: 0,
MemFree: 0,
SwapTotal: 0,
SwapFree: 0,
NetRX: 0,
NetTX: 0,
DiskR: 0,
DiskW: 0,
SysUp: 0,
}
if runtime.GOOS != "linux" {
return info
}
// CPU使用率解析/proc/stat
readStat := func() (idle, total uint64) {
data, err := os.ReadFile("/proc/stat")
if err != nil {
return
}
for line := range strings.SplitSeq(string(data), "\n") {
if strings.HasPrefix(line, "cpu ") {
fields := strings.Fields(line)
for i, v := range fields[1:] {
val, _ := strconv.ParseUint(v, 10, 64)
total += val
if i == 3 {
idle = val
}
}
break
}
}
return
}
idle1, total1 := readStat()
time.Sleep(baseDuration)
idle2, total2 := readStat()
numCPU := runtime.NumCPU()
if deltaIdle, deltaTotal := idle2-idle1, total2-total1; deltaTotal > 0 && numCPU > 0 {
info.CPU = min(int((deltaTotal-deltaIdle)*100/deltaTotal/uint64(numCPU)), 100)
}
// RAM使用率解析/proc/meminfo
if data, err := os.ReadFile("/proc/meminfo"); err == nil {
var memTotal, memFree, swapTotal, swapFree uint64
for line := range strings.SplitSeq(string(data), "\n") {
if fields := strings.Fields(line); len(fields) >= 2 {
if val, err := strconv.ParseUint(fields[1], 10, 64); err == nil {
val *= 1024
switch fields[0] {
case "MemTotal:":
memTotal = val
case "MemFree:":
memFree = val
case "SwapTotal:":
swapTotal = val
case "SwapFree:":
swapFree = val
}
}
}
}
info.MemTotal = memTotal
info.MemFree = memFree
info.SwapTotal = swapTotal
info.SwapFree = swapFree
}
// 网络I/O解析/proc/net/dev
if data, err := os.ReadFile("/proc/net/dev"); err == nil {
for _, line := range strings.Split(string(data), "\n")[2:] {
if fields := strings.Fields(line); len(fields) >= 10 {
ifname := strings.TrimSuffix(fields[0], ":")
// 排除项
if strings.HasPrefix(ifname, "lo") || strings.HasPrefix(ifname, "veth") ||
strings.HasPrefix(ifname, "docker") || strings.HasPrefix(ifname, "podman") ||
strings.HasPrefix(ifname, "br-") || strings.HasPrefix(ifname, "virbr") {
continue
}
if val, err := strconv.ParseUint(fields[1], 10, 64); err == nil {
info.NetRX += val
}
if val, err := strconv.ParseUint(fields[9], 10, 64); err == nil {
info.NetTX += val
}
}
}
}
// 磁盘I/O解析/proc/diskstats
if data, err := os.ReadFile("/proc/diskstats"); err == nil {
for line := range strings.SplitSeq(string(data), "\n") {
if fields := strings.Fields(line); len(fields) >= 14 {
deviceName := fields[2]
// 排除项
if strings.Contains(deviceName, "loop") || strings.Contains(deviceName, "ram") ||
strings.HasPrefix(deviceName, "dm-") || strings.HasPrefix(deviceName, "md") {
continue
}
if matched, _ := regexp.MatchString(`\d+$`, deviceName); matched {
continue
}
if val, err := strconv.ParseUint(fields[5], 10, 64); err == nil {
info.DiskR += val * 512
}
if val, err := strconv.ParseUint(fields[9], 10, 64); err == nil {
info.DiskW += val * 512
}
}
}
}
// 系统运行时间:解析/proc/uptime
if data, err := os.ReadFile("/proc/uptime"); err == nil {
if fields := strings.Fields(string(data)); len(fields) > 0 {
if uptime, err := strconv.ParseFloat(fields[0], 64); err == nil {
info.SysUp = uint64(uptime)
}
}
}
return info
}
// handleInstances 处理实例集合请求
func (m *Master) handleInstances(w http.ResponseWriter, r *http.Request) {
switch r.Method {
@@ -649,7 +929,7 @@ func (m *Master) handleInstances(w http.ResponseWriter, r *http.Request) {
// 保存实例状态
go func() {
time.Sleep(100 * time.Millisecond)
time.Sleep(baseDuration)
m.saveState()
}()
writeJSON(w, http.StatusCreated, instance)
@@ -800,7 +1080,7 @@ func (m *Master) handlePutInstance(w http.ResponseWriter, r *http.Request, id st
// 如果实例正在运行,先停止它
if instance.Status == "running" {
m.stopInstance(instance)
time.Sleep(100 * time.Millisecond)
time.Sleep(baseDuration)
}
// 更新实例URL和类型
@@ -816,7 +1096,7 @@ func (m *Master) handlePutInstance(w http.ResponseWriter, r *http.Request, id st
// 保存实例状态
go func() {
time.Sleep(100 * time.Millisecond)
time.Sleep(baseDuration)
m.saveState()
}()
writeJSON(w, http.StatusOK, instance)
@@ -847,7 +1127,7 @@ func (m *Master) processInstanceAction(instance *Instance, action string) {
if instance.Status == "running" {
go func() {
m.stopInstance(instance)
time.Sleep(100 * time.Millisecond)
time.Sleep(baseDuration)
m.startInstance(instance)
}()
} else {
@@ -1016,11 +1296,11 @@ func (m *Master) startInstance(instance *Instance) {
}
}
// 保存原始流量统计
originalTCPRX := instance.TCPRX
originalTCPTX := instance.TCPTX
originalUDPRX := instance.UDPRX
originalUDPTX := instance.UDPTX
// 启动前,记录基线
instance.TCPRXBase = instance.TCPRX
instance.TCPTXBase = instance.TCPTX
instance.UDPRXBase = instance.UDPRX
instance.UDPTXBase = instance.UDPTX
// 获取可执行文件路径
execPath, err := os.Executable()
@@ -1028,6 +1308,7 @@ func (m *Master) startInstance(instance *Instance) {
m.logger.Error("Get path failed: %v [%v]", err, instance.ID)
instance.Status = "error"
m.instances.Store(instance.ID, instance)
m.sendSSEEvent("update", instance)
return
}
@@ -1043,23 +1324,23 @@ func (m *Master) startInstance(instance *Instance) {
m.logger.Info("Instance starting: %v [%v]", instance.URL, instance.ID)
// 启动实例
if err := cmd.Start(); err != nil {
m.logger.Error("Instance error: %v [%v]", err, instance.ID)
if err := cmd.Start(); err != nil || cmd.Process == nil || cmd.Process.Pid <= 0 {
if err != nil {
m.logger.Error("Instance error: %v [%v]", err, instance.ID)
} else {
m.logger.Error("Instance start failed [%v]", instance.ID)
}
instance.Status = "error"
m.instances.Store(instance.ID, instance)
m.sendSSEEvent("update", instance)
cancel()
} else {
instance.cmd = cmd
instance.Status = "running"
// 恢复原始流量统计
instance.TCPRX = originalTCPRX
instance.TCPTX = originalTCPTX
instance.UDPRX = originalUDPRX
instance.UDPTX = originalUDPTX
go m.monitorInstance(instance, cmd)
return
}
instance.cmd = cmd
instance.Status = "running"
go m.monitorInstance(instance, cmd)
m.instances.Store(instance.ID, instance)
// 发送启动事件
@@ -1068,29 +1349,36 @@ func (m *Master) startInstance(instance *Instance) {
// monitorInstance 监控实例状态
func (m *Master) monitorInstance(instance *Instance, cmd *exec.Cmd) {
select {
case <-instance.stopped:
// 实例被显式停止
return
default:
// 等待进程完成
err := cmd.Wait()
done := make(chan error, 1)
go func() {
done <- cmd.Wait()
}()
// 获取最新的实例状态
if value, exists := m.instances.Load(instance.ID); exists {
instance = value.(*Instance)
// 仅在实例状态为running时才发送事件
if instance.Status == "running" {
if err != nil {
m.logger.Error("Instance error: %v [%v]", err, instance.ID)
instance.Status = "error"
} else {
instance.Status = "stopped"
for {
select {
case <-instance.stopped:
// 实例被显式停止
return
case err := <-done:
// 获取最新的实例状态
if value, exists := m.instances.Load(instance.ID); exists {
instance = value.(*Instance)
if instance.Status == "running" {
if err != nil {
m.logger.Error("Instance error: %v [%v]", err, instance.ID)
instance.Status = "error"
} else {
instance.Status = "stopped"
}
m.instances.Store(instance.ID, instance)
m.sendSSEEvent("update", instance)
}
}
return
case <-time.After(reportInterval):
if !instance.lastCheckPoint.IsZero() && time.Since(instance.lastCheckPoint) > 5*reportInterval {
instance.Status = "error"
m.instances.Store(instance.ID, instance)
// 安全地发送停止事件,避免向已关闭的通道发送
m.sendSSEEvent("update", instance)
}
}
@@ -1119,7 +1407,7 @@ func (m *Master) stopInstance(instance *Instance) {
} else {
instance.cmd.Process.Signal(syscall.SIGTERM)
}
time.Sleep(100 * time.Millisecond)
time.Sleep(baseDuration)
}
// 关闭停止通道
@@ -1325,6 +1613,27 @@ func generateOpenAPISpec() string {
}
}
},
"/tcping": {
"get": {
"summary": "TCP connectivity test",
"security": [{"ApiKeyAuth": []}],
"parameters": [
{
"name": "target",
"in": "query",
"required": true,
"schema": {"type": "string"},
"description": "Target address in format host:port"
}
],
"responses": {
"200": {"description": "Success", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/TCPingResult"}}}},
"400": {"description": "Target address required"},
"401": {"description": "Unauthorized"},
"405": {"description": "Method not allowed"}
}
}
},
"/openapi.json": {
"get": {
"summary": "Get OpenAPI specification",
@@ -1361,12 +1670,15 @@ func generateOpenAPISpec() string {
"status": {"type": "string", "enum": ["running", "stopped", "error"], "description": "Instance status"},
"url": {"type": "string", "description": "Command string or API Key"},
"restart": {"type": "boolean", "description": "Restart policy"},
"mode": {"type": "integer", "description": "Instance mode"},
"ping": {"type": "integer", "description": "TCPing latency"},
"pool": {"type": "integer", "description": "Pool active count"},
"tcps": {"type": "integer", "description": "TCP connection count"},
"udps": {"type": "integer", "description": "UDP connection count"},
"tcprx": {"type": "integer", "description": "TCP received bytes"},
"tcptx": {"type": "integer", "description": "TCP transmitted bytes"},
"udprx": {"type": "integer", "description": "UDP received bytes"},
"udptx": {"type": "integer", "description": "UDP transmitted bytes"},
"pool": {"type": "integer", "description": "Health check pool active"},
"ping": {"type": "integer", "description": "Health check one-way latency"},
"udptx": {"type": "integer", "description": "UDP transmitted bytes"}
}
},
"CreateInstanceRequest": {
@@ -1392,14 +1704,33 @@ func generateOpenAPISpec() string {
"properties": {
"os": {"type": "string", "description": "Operating system"},
"arch": {"type": "string", "description": "System architecture"},
"cpu": {"type": "integer", "description": "CPU usage percentage"},
"mem_total": {"type": "integer", "format": "int64", "description": "Total memory in bytes"},
"mem_free": {"type": "integer", "format": "int64", "description": "Free memory in bytes"},
"swap_total": {"type": "integer", "format": "int64", "description": "Total swap space in bytes"},
"swap_free": {"type": "integer", "format": "int64", "description": "Free swap space in bytes"},
"netrx": {"type": "integer", "format": "int64", "description": "Network received bytes"},
"nettx": {"type": "integer", "format": "int64", "description": "Network transmitted bytes"},
"diskr": {"type": "integer", "format": "int64", "description": "Disk read bytes"},
"diskw": {"type": "integer", "format": "int64", "description": "Disk write bytes"},
"sysup": {"type": "integer", "format": "int64", "description": "System uptime in seconds"},
"ver": {"type": "string", "description": "NodePass version"},
"name": {"type": "string", "description": "Hostname"},
"uptime": {"type": "integer", "format": "int64", "description": "Uptime in seconds"},
"uptime": {"type": "integer", "format": "int64", "description": "API uptime in seconds"},
"log": {"type": "string", "description": "Log level"},
"tls": {"type": "string", "description": "TLS code"},
"crt": {"type": "string", "description": "Certificate path"},
"key": {"type": "string", "description": "Private key path"}
}
},
"TCPingResult": {
"type": "object",
"properties": {
"target": {"type": "string", "description": "Target address"},
"connected": {"type": "boolean", "description": "Is connected"},
"latency": {"type": "integer", "format": "int64", "description": "Latency in milliseconds"},
"error": {"type": "string", "nullable": true, "description": "Error message"}
}
}
}
}

View File

@@ -6,10 +6,12 @@ import (
"bytes"
"context"
"crypto/tls"
"io"
"net"
"net/url"
"os"
"os/signal"
"strconv"
"syscall"
"time"
@@ -30,38 +32,49 @@ func NewServer(parsedURL *url.URL, tlsCode string, tlsConfig *tls.Config, logger
server := &Server{
Common: Common{
tlsCode: tlsCode,
dataFlow: "+",
logger: logger,
semaphore: make(chan struct{}, semaphoreLimit),
signalChan: make(chan string, semaphoreLimit),
},
tlsConfig: tlsConfig,
}
// 初始化公共字段
server.getTunnelKey(parsedURL)
server.getPoolCapacity(parsedURL)
server.getAddress(parsedURL)
server.initConfig(parsedURL)
server.initRateLimiter()
return server
}
// Run 管理服务端生命周期
func (s *Server) Run() {
s.logger.Info("Server started: %v@%v/%v", s.tunnelKey, s.tunnelAddr, s.targetTCPAddr)
logInfo := func(prefix string) {
s.logger.Info("%v: %v@%v/%v?max=%v&mode=%v&read=%v&rate=%v&slot=%v",
prefix, s.tunnelKey, s.tunnelTCPAddr, s.targetTCPAddr,
s.maxPoolCapacity, s.runMode, s.readTimeout, s.rateLimit/125000, s.slotLimit)
}
logInfo("Server started")
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
// 启动服务端并处理重启
go func() {
for {
time.Sleep(serviceCooldown)
if err := s.start(); err != nil {
if ctx.Err() != nil {
return
}
// 启动服务端
if err := s.start(); err != nil && err != io.EOF {
s.logger.Error("Server error: %v", err)
// 重启服务端
s.stop()
s.logger.Info("Server restarted: %v@%v/%v", s.tunnelKey, s.tunnelAddr, s.targetTCPAddr)
select {
case <-ctx.Done():
return
case <-time.After(serviceCooldown):
}
logInfo("Server restarting")
}
}
}()
// 监听系统信号以优雅关闭
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
<-ctx.Done()
stop()
@@ -85,9 +98,23 @@ func (s *Server) start() error {
return err
}
// 通过是否监听成功判断数据流向
if err := s.initTargetListener(); err == nil {
// 运行模式判断
switch s.runMode {
case "1": // 反向模式
if err := s.initTargetListener(); err != nil {
return err
}
s.dataFlow = "-"
case "2": // 正向模式
s.dataFlow = "+"
default: // 自动判断
if err := s.initTargetListener(); err == nil {
s.runMode = "1"
s.dataFlow = "-"
} else {
s.runMode = "2"
s.dataFlow = "+"
}
}
// 与客户端进行握手
@@ -107,13 +134,11 @@ func (s *Server) start() error {
s.tlsConfig,
s.tunnelListener,
reportInterval)
go s.tunnelPool.ServerManager()
if s.dataFlow == "-" {
go s.commonLoop()
}
return s.commonControl()
}
@@ -121,21 +146,33 @@ func (s *Server) start() error {
func (s *Server) tunnelHandshake() error {
// 接受隧道连接
for {
if s.ctx.Err() != nil {
return s.ctx.Err()
}
tunnelTCPConn, err := s.tunnelListener.Accept()
if err != nil {
s.logger.Error("Accept error: %v", err)
time.Sleep(serviceCooldown)
select {
case <-s.ctx.Done():
return s.ctx.Err()
case <-time.After(serviceCooldown):
}
continue
}
tunnelTCPConn.SetReadDeadline(time.Now().Add(tcpReadTimeout))
tunnelTCPConn.SetReadDeadline(time.Now().Add(handshakeTimeout))
bufReader := bufio.NewReader(tunnelTCPConn)
rawTunnelKey, err := bufReader.ReadString('\n')
if err != nil {
s.logger.Warn("Handshake timeout: %v", tunnelTCPConn.RemoteAddr())
tunnelTCPConn.Close()
time.Sleep(serviceCooldown)
select {
case <-s.ctx.Done():
return s.ctx.Err()
case <-time.After(serviceCooldown):
}
continue
}
@@ -145,23 +182,29 @@ func (s *Server) tunnelHandshake() error {
if tunnelKey != s.tunnelKey {
s.logger.Warn("Access denied: %v", tunnelTCPConn.RemoteAddr())
tunnelTCPConn.Close()
time.Sleep(serviceCooldown)
select {
case <-s.ctx.Done():
return s.ctx.Err()
case <-time.After(serviceCooldown):
}
continue
} else {
s.tunnelTCPConn = tunnelTCPConn.(*net.TCPConn)
s.bufReader = bufio.NewReader(&conn.TimeoutReader{Conn: s.tunnelTCPConn, Timeout: tcpReadTimeout})
s.tunnelTCPConn.SetKeepAlive(true)
s.tunnelTCPConn.SetKeepAlivePeriod(reportInterval)
// 记录客户端IP
s.clientIP = s.tunnelTCPConn.RemoteAddr().(*net.TCPAddr).IP.String()
break
}
s.tunnelTCPConn = tunnelTCPConn.(*net.TCPConn)
s.bufReader = bufio.NewReader(&conn.TimeoutReader{Conn: s.tunnelTCPConn, Timeout: 2 * reportInterval})
s.tunnelTCPConn.SetKeepAlive(true)
s.tunnelTCPConn.SetKeepAlivePeriod(reportInterval)
// 记录客户端IP
s.clientIP = s.tunnelTCPConn.RemoteAddr().(*net.TCPAddr).IP.String()
break
}
// 构建并发送隧道URL到客户端
// 发送客户端配置
tunnelURL := &url.URL{
Host: s.dataFlow,
Scheme: "np",
Host: strconv.Itoa(s.maxPoolCapacity),
Path: s.dataFlow,
Fragment: s.tlsCode,
}