mirror of
https://github.com/EasyTier/EasyTier.git
synced 2025-10-04 00:16:41 +08:00
Compare commits
22 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
4dca25db86 | ||
![]() |
d87a440c04 | ||
![]() |
55efd62798 | ||
![]() |
70a41275c1 | ||
![]() |
dd941681ce | ||
![]() |
9824d0adaa | ||
![]() |
d2291628e0 | ||
![]() |
7ab8cad1af | ||
![]() |
2c017e0fc5 | ||
![]() |
d9453589ac | ||
![]() |
e344372616 | ||
![]() |
63821e56bc | ||
![]() |
1be64223c8 | ||
![]() |
a08a8e7f4c | ||
![]() |
b31996230d | ||
![]() |
1e836501a8 | ||
![]() |
d4e59ffc40 | ||
![]() |
37ceb77bf6 | ||
![]() |
ba3da97ad4 | ||
![]() |
984ed8f6cf | ||
![]() |
c7895963e4 | ||
![]() |
a0ece6ad4d |
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -21,7 +21,7 @@ on:
|
||||
version:
|
||||
description: 'Version for this release'
|
||||
type: string
|
||||
default: 'v2.0.0'
|
||||
default: 'v2.0.3'
|
||||
required: true
|
||||
make_latest:
|
||||
description: 'Mark this release as latest'
|
||||
|
4
Cargo.lock
generated
4
Cargo.lock
generated
@@ -1539,7 +1539,7 @@ checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
|
||||
|
||||
[[package]]
|
||||
name = "easytier"
|
||||
version = "2.0.0"
|
||||
version = "2.0.3"
|
||||
dependencies = [
|
||||
"aes-gcm",
|
||||
"anyhow",
|
||||
@@ -1631,7 +1631,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "easytier-gui"
|
||||
version = "2.0.0"
|
||||
version = "2.0.3"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"chrono",
|
||||
|
@@ -4,84 +4,23 @@
|
||||
"path": "."
|
||||
},
|
||||
{
|
||||
"name": "gui",
|
||||
"path": "easytier-gui"
|
||||
},
|
||||
{
|
||||
"name": "core",
|
||||
"path": "easytier"
|
||||
},
|
||||
{
|
||||
"name": "vpnservice",
|
||||
"path": "tauri-plugin-vpnservice"
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"eslint.useFlatConfig": true,
|
||||
"i18n-ally.sourceLanguage": "cn",
|
||||
"i18n-ally.keystyle": "nested",
|
||||
"i18n-ally.sortKeys": true,
|
||||
// Disable the default formatter
|
||||
"prettier.enable": false,
|
||||
"editor.formatOnSave": false,
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.fixAll.eslint": "explicit",
|
||||
"source.organizeImports": "never"
|
||||
},
|
||||
"eslint.rules.customizations": [
|
||||
{
|
||||
"rule": "style/*",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "style/eol-last",
|
||||
"severity": "error"
|
||||
},
|
||||
{
|
||||
"rule": "format/*",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*-indent",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*-spacing",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*-spaces",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*-order",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*-dangle",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*-newline",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*quotes",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*semi",
|
||||
"severity": "off"
|
||||
}
|
||||
],
|
||||
"eslint.validate": [
|
||||
"code-workspace",
|
||||
"javascript",
|
||||
"javascriptreact",
|
||||
"typescript",
|
||||
"typescriptreact",
|
||||
"vue",
|
||||
"html",
|
||||
"markdown",
|
||||
"json",
|
||||
"jsonc",
|
||||
"yaml",
|
||||
"toml",
|
||||
"gql",
|
||||
"graphql"
|
||||
],
|
||||
"i18n-ally.localesPaths": [
|
||||
"easytier-gui/locales"
|
||||
]
|
||||
}
|
||||
}
|
17
README.md
17
README.md
@@ -200,20 +200,20 @@ Subnet proxy information will automatically sync to each node in the virtual net
|
||||
|
||||
### Networking without Public IP
|
||||
|
||||
EasyTier supports networking using shared public nodes. The currently deployed shared public node is ``tcp://easytier.public.kkrainbow.top:11010``.
|
||||
EasyTier supports networking using shared public nodes. The currently deployed shared public node is ``tcp://public.easytier.top:11010``.
|
||||
|
||||
When using shared nodes, each node entering the network needs to provide the same ``--network-name`` and ``--network-secret`` parameters as the unique identifier of the network.
|
||||
|
||||
Taking two nodes as an example, Node A executes:
|
||||
|
||||
```sh
|
||||
sudo easytier-core -i 10.144.144.1 --network-name abc --network-secret abc -e tcp://easytier.public.kkrainbow.top:11010
|
||||
sudo easytier-core -i 10.144.144.1 --network-name abc --network-secret abc -e tcp://public.easytier.top:11010
|
||||
```
|
||||
|
||||
Node B executes
|
||||
|
||||
```sh
|
||||
sudo easytier-core --ipv4 10.144.144.2 --network-name abc --network-secret abc -e tcp://easytier.public.kkrainbow.top:11010
|
||||
sudo easytier-core --ipv4 10.144.144.2 --network-name abc --network-secret abc -e tcp://public.easytier.top:11010
|
||||
```
|
||||
|
||||
After the command is successfully executed, Node A can access Node B through the virtual IP 10.144.144.2.
|
||||
@@ -279,7 +279,16 @@ Before using the Client Config, you need to modify the Interface Address and Pee
|
||||
|
||||
### Self-Hosted Public Server
|
||||
|
||||
Each node can act as a relay node for other users' networks. Simply start EasyTier without any parameters.
|
||||
Every virtual network (with same network name and secret) can act as a public server cluster. Nodes of other network can connect to arbitrary nodes in public server cluster to discover each other without public IP.
|
||||
|
||||
Run you own public server cluster is exactly same as running an virtual network, except that you can skip config the ipv4 addr.
|
||||
|
||||
You can also join the official public server cluster with following command:
|
||||
|
||||
```
|
||||
sudo easytier-core --network-name easytier --network-secret easytier -p tcp://public.easytier.top:11010
|
||||
```
|
||||
|
||||
|
||||
### Configurations
|
||||
|
||||
|
16
README_CN.md
16
README_CN.md
@@ -199,20 +199,20 @@ sudo easytier-core --ipv4 10.144.144.2 -n 10.1.1.0/24
|
||||
|
||||
### 无公网IP组网
|
||||
|
||||
EasyTier 支持共享公网节点进行组网。目前已部署共享的公网节点 ``tcp://easytier.public.kkrainbow.top:11010``。
|
||||
EasyTier 支持共享公网节点进行组网。目前已部署共享的公网节点 ``tcp://public.easytier.top:11010``。
|
||||
|
||||
使用共享节点时,需要每个入网节点提供相同的 ``--network-name`` 和 ``--network-secret`` 参数,作为网络的唯一标识。
|
||||
|
||||
以双节点为例,节点 A 执行:
|
||||
|
||||
```sh
|
||||
sudo easytier-core -i 10.144.144.1 --network-name abc --network-secret abc -e tcp://easytier.public.kkrainbow.top:11010
|
||||
sudo easytier-core -i 10.144.144.1 --network-name abc --network-secret abc -e tcp://public.easytier.top:11010
|
||||
```
|
||||
|
||||
节点 B 执行
|
||||
|
||||
```sh
|
||||
sudo easytier-core --ipv4 10.144.144.2 --network-name abc --network-secret abc -e tcp://easytier.public.kkrainbow.top:11010
|
||||
sudo easytier-core --ipv4 10.144.144.2 --network-name abc --network-secret abc -e tcp://public.easytier.top:11010
|
||||
```
|
||||
|
||||
命令执行成功后,节点 A 即可通过虚拟 IP 10.144.144.2 访问节点 B。
|
||||
@@ -282,7 +282,15 @@ connected_clients:
|
||||
|
||||
### 自建公共中转服务器
|
||||
|
||||
每个节点都可作为其他用户网络的中转节点。不带任何参数直接启动 EasyTier 即可。
|
||||
每个虚拟网络(通过相同的网络名称和密钥建链)都可以充当公共服务器集群。其他网络的节点可以连接到公共服务器集群中的任意节点,无需公共 IP 即可发现彼此。
|
||||
|
||||
运行自建的公共服务器集群与运行虚拟网络完全相同,不过可以跳过配置 ipv4 地址。
|
||||
|
||||
也可以使用以下命令加入官方公共服务器集群,后续将实现公共服务器集群的节点间负载均衡:
|
||||
|
||||
```
|
||||
sudo easytier-core --network-name easytier --network-secret easytier -p tcp://public.easytier.top:11010
|
||||
```
|
||||
|
||||
### 其他配置
|
||||
|
||||
|
82
easytier-gui/.vscode/settings.json
vendored
82
easytier-gui/.vscode/settings.json
vendored
@@ -1,5 +1,81 @@
|
||||
{
|
||||
"i18n-ally.localesPaths": [
|
||||
"locales"
|
||||
"cSpell.words": [
|
||||
"easytier",
|
||||
"Vite",
|
||||
"vueuse",
|
||||
"pinia",
|
||||
"demi",
|
||||
"antfu",
|
||||
"iconify",
|
||||
"intlify",
|
||||
"vitejs",
|
||||
"unplugin",
|
||||
"pnpm"
|
||||
],
|
||||
"i18n-ally.localesPaths": "locales",
|
||||
"editor.formatOnSave": false,
|
||||
// Auto fix
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.fixAll.eslint": "explicit",
|
||||
"source.organizeImports": "never"
|
||||
},
|
||||
// Silent the stylistic rules in you IDE, but still auto fix them
|
||||
"eslint.rules.customizations": [
|
||||
{
|
||||
"rule": "style/*",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "format/*",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*-indent",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*-spacing",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*-spaces",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*-order",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*-dangle",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*-newline",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*quotes",
|
||||
"severity": "off"
|
||||
},
|
||||
{
|
||||
"rule": "*semi",
|
||||
"severity": "off"
|
||||
}
|
||||
],
|
||||
// The following is optional.
|
||||
// It's better to put under project setting `.vscode/settings.json`
|
||||
// to avoid conflicts with working with different eslint configs
|
||||
// that does not support all formats.
|
||||
"eslint.validate": [
|
||||
"javascript",
|
||||
"javascriptreact",
|
||||
"typescript",
|
||||
"typescriptreact",
|
||||
"vue",
|
||||
"html",
|
||||
"markdown",
|
||||
"json",
|
||||
"jsonc",
|
||||
"yaml"
|
||||
]
|
||||
}
|
||||
}
|
@@ -72,6 +72,8 @@ loss_rate: 丢包率
|
||||
status:
|
||||
version: 内核版本
|
||||
local: 本机
|
||||
server: 服务器
|
||||
relay: 中继
|
||||
|
||||
run_network: 运行网络
|
||||
stop_network: 停止网络
|
||||
@@ -91,3 +93,23 @@ about:
|
||||
license: 许可证
|
||||
description: 一个简单、安全、去中心化的内网穿透 VPN 组网方案,使用 Rust 语言和 Tokio 框架实现。
|
||||
check_update: 检查更新
|
||||
|
||||
event:
|
||||
Unknown: 未知
|
||||
TunDeviceReady: Tun设备就绪
|
||||
TunDeviceError: Tun设备错误
|
||||
PeerAdded: 对端添加
|
||||
PeerRemoved: 对端移除
|
||||
PeerConnAdded: 对端连接添加
|
||||
PeerConnRemoved: 对端连接移除
|
||||
ListenerAdded: 监听器添加
|
||||
ListenerAddFailed: 监听器添加失败
|
||||
ListenerAcceptFailed: 监听器接受连接失败
|
||||
ConnectionAccepted: 连接已接受
|
||||
ConnectionError: 连接错误
|
||||
Connecting: 正在连接
|
||||
ConnectError: 连接错误
|
||||
VpnPortalClientConnected: VPN门户客户端已连接
|
||||
VpnPortalClientDisconnected: VPN门户客户端已断开连接
|
||||
DhcpIpv4Changed: DHCP IPv4地址更改
|
||||
DhcpIpv4Conflicted: DHCP IPv4地址冲突
|
||||
|
@@ -71,6 +71,8 @@ loss_rate: Loss Rate
|
||||
status:
|
||||
version: Version
|
||||
local: Local
|
||||
server: Server
|
||||
relay: Relay
|
||||
|
||||
run_network: Run Network
|
||||
stop_network: Stop Network
|
||||
@@ -90,3 +92,23 @@ about:
|
||||
license: License
|
||||
description: 'EasyTier is a simple, safe and decentralized VPN networking solution implemented with the Rust language and Tokio framework.'
|
||||
check_update: Check Update
|
||||
|
||||
event:
|
||||
Unknown: Unknown
|
||||
TunDeviceReady: TunDeviceReady
|
||||
TunDeviceError: TunDeviceError
|
||||
PeerAdded: PeerAdded
|
||||
PeerRemoved: PeerRemoved
|
||||
PeerConnAdded: PeerConnAdded
|
||||
PeerConnRemoved: PeerConnRemoved
|
||||
ListenerAdded: ListenerAdded
|
||||
ListenerAddFailed: ListenerAddFailed
|
||||
ListenerAcceptFailed: ListenerAcceptFailed
|
||||
ConnectionAccepted: ConnectionAccepted
|
||||
ConnectionError: ConnectionError
|
||||
Connecting: Connecting
|
||||
ConnectError: ConnectError
|
||||
VpnPortalClientConnected: VpnPortalClientConnected
|
||||
VpnPortalClientDisconnected: VpnPortalClientDisconnected
|
||||
DhcpIpv4Changed: DhcpIpv4Changed
|
||||
DhcpIpv4Conflicted: DhcpIpv4Conflicted
|
||||
|
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "easytier-gui",
|
||||
"type": "module",
|
||||
"version": "2.0.0",
|
||||
"version": "2.0.3",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
@@ -12,50 +12,52 @@
|
||||
"lint:fix": "eslint . --ignore-pattern src-tauri --fix"
|
||||
},
|
||||
"dependencies": {
|
||||
"@primevue/themes": "^4.0.5",
|
||||
"@primevue/themes": "^4.1.0",
|
||||
"@tauri-apps/plugin-autostart": "2.0.0-rc.1",
|
||||
"@tauri-apps/plugin-clipboard-manager": "2.0.0-rc.1",
|
||||
"@tauri-apps/plugin-os": "2.0.0-rc.1",
|
||||
"@tauri-apps/plugin-process": "2.0.0-rc.1",
|
||||
"@tauri-apps/plugin-shell": "2.0.0-rc.1",
|
||||
"aura": "link:@primevue/themes/aura",
|
||||
"@vueuse/core": "^11.1.0",
|
||||
"aura": "link:@primevue\\themes\\aura",
|
||||
"ip-num": "1.5.1",
|
||||
"pinia": "^2.2.2",
|
||||
"pinia": "^2.2.4",
|
||||
"primeflex": "^3.3.1",
|
||||
"primeicons": "^7.0.0",
|
||||
"primevue": "^4.0.5",
|
||||
"tauri-plugin-vpnservice-api": "link:../tauri-plugin-vpnservice",
|
||||
"vue": "^3.5.3",
|
||||
"vue-i18n": "^10.0.0",
|
||||
"vue-router": "^4.4.3"
|
||||
"primevue": "^4.1.0",
|
||||
"tauri-plugin-vpnservice-api": "link:..\\tauri-plugin-vpnservice",
|
||||
"vue": "^3.5.11",
|
||||
"vue-i18n": "^10.0.4",
|
||||
"vue-router": "^4.4.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@antfu/eslint-config": "^3.5.0",
|
||||
"@intlify/unplugin-vue-i18n": "^5.0.0",
|
||||
"@primevue/auto-import-resolver": "^4.0.5",
|
||||
"@antfu/eslint-config": "^3.7.3",
|
||||
"@intlify/unplugin-vue-i18n": "^5.2.0",
|
||||
"@primevue/auto-import-resolver": "^4.1.0",
|
||||
"@tauri-apps/api": "2.0.0-rc.0",
|
||||
"@tauri-apps/cli": "2.0.0-rc.3",
|
||||
"@types/node": "^22.5.4",
|
||||
"@types/node": "^22.7.4",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@vitejs/plugin-vue": "^5.1.3",
|
||||
"@vue-macros/volar": "^0.29.1",
|
||||
"@vitejs/plugin-vue": "^5.1.4",
|
||||
"@vue-macros/volar": "0.30.3",
|
||||
"autoprefixer": "^10.4.20",
|
||||
"eslint": "^9.10.0",
|
||||
"eslint": "^9.12.0",
|
||||
"eslint-plugin-format": "^0.1.2",
|
||||
"internal-ip": "^8.0.0",
|
||||
"postcss": "^8.4.45",
|
||||
"tailwindcss": "^3.4.10",
|
||||
"postcss": "^8.4.47",
|
||||
"tailwindcss": "^3.4.13",
|
||||
"typescript": "^5.6.2",
|
||||
"unplugin-auto-import": "^0.18.2",
|
||||
"unplugin-auto-import": "^0.18.3",
|
||||
"unplugin-vue-components": "^0.27.4",
|
||||
"unplugin-vue-macros": "^2.11.11",
|
||||
"unplugin-vue-macros": "^2.12.3",
|
||||
"unplugin-vue-markdown": "^0.26.2",
|
||||
"unplugin-vue-router": "^0.10.8",
|
||||
"uuid": "^10.0.0",
|
||||
"vite": "^5.4.3",
|
||||
"vite-plugin-vue-devtools": "^7.4.4",
|
||||
"vite": "^5.4.8",
|
||||
"vite-plugin-vue-devtools": "^7.4.6",
|
||||
"vite-plugin-vue-layouts": "^0.11.0",
|
||||
"vue-i18n": "^10.0.0",
|
||||
"vue-tsc": "^2.1.6"
|
||||
}
|
||||
},
|
||||
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4"
|
||||
}
|
||||
|
3106
easytier-gui/pnpm-lock.yaml
generated
3106
easytier-gui/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -1,4 +0,0 @@
|
||||
[build]
|
||||
target = "x86_64-unknown-linux-gnu"
|
||||
|
||||
[target]
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "easytier-gui"
|
||||
version = "2.0.0"
|
||||
version = "2.0.3"
|
||||
description = "EasyTier GUI"
|
||||
authors = ["you"]
|
||||
edition = "2021"
|
||||
|
@@ -1,4 +1,5 @@
|
||||
{
|
||||
"$schema": "../gen/schemas/desktop-schema.json",
|
||||
"identifier": "migrated",
|
||||
"description": "permissions that were migrated from v1",
|
||||
"local": true,
|
||||
@@ -13,6 +14,7 @@
|
||||
"core:window:allow-show",
|
||||
"core:window:allow-hide",
|
||||
"core:window:allow-set-focus",
|
||||
"core:window:allow-set-title",
|
||||
"core:app:default",
|
||||
"core:resources:default",
|
||||
"core:menu:default",
|
||||
@@ -24,7 +26,6 @@
|
||||
"shell:default",
|
||||
"process:default",
|
||||
"clipboard-manager:default",
|
||||
"core:tray:default",
|
||||
"core:tray:allow-new",
|
||||
"core:tray:allow-set-menu",
|
||||
"core:tray:allow-set-title",
|
||||
|
@@ -41,6 +41,7 @@ struct NetworkConfig {
|
||||
|
||||
dhcp: bool,
|
||||
virtual_ipv4: String,
|
||||
network_length: i32,
|
||||
hostname: Option<String>,
|
||||
network_name: String,
|
||||
network_secret: String,
|
||||
@@ -83,9 +84,15 @@ impl NetworkConfig {
|
||||
|
||||
if !self.dhcp {
|
||||
if self.virtual_ipv4.len() > 0 {
|
||||
cfg.set_ipv4(Some(self.virtual_ipv4.parse().with_context(|| {
|
||||
format!("failed to parse ipv4 address: {}", self.virtual_ipv4)
|
||||
})?))
|
||||
let ip = format!("{}/{}", self.virtual_ipv4, self.network_length)
|
||||
.parse()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to parse ipv4 inet address: {}, {}",
|
||||
self.virtual_ipv4, self.network_length
|
||||
)
|
||||
})?;
|
||||
cfg.set_ipv4(Some(ip));
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -17,7 +17,7 @@
|
||||
"createUpdaterArtifacts": false
|
||||
},
|
||||
"productName": "easytier-gui",
|
||||
"version": "2.0.0",
|
||||
"version": "2.0.3",
|
||||
"identifier": "com.kkrainbow.easytier",
|
||||
"plugins": {},
|
||||
"app": {
|
||||
|
@@ -1,3 +1,12 @@
|
||||
<script setup lang="ts">
|
||||
import { getCurrentWindow } from '@tauri-apps/api/window'
|
||||
import pkg from '~/../package.json'
|
||||
|
||||
onBeforeMount(async () => {
|
||||
await getCurrentWindow().setTitle(`Easytier GUI: v${pkg.version}`)
|
||||
})
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<RouterView />
|
||||
</template>
|
||||
|
17
easytier-gui/src/auto-imports.d.ts
vendored
17
easytier-gui/src/auto-imports.d.ts
vendored
@@ -3,6 +3,7 @@
|
||||
// @ts-nocheck
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
// Generated by unplugin-auto-import
|
||||
// biome-ignore lint: disable
|
||||
export {}
|
||||
declare global {
|
||||
const EffectScope: typeof import('vue')['EffectScope']
|
||||
@@ -20,6 +21,7 @@ declare global {
|
||||
const definePage: typeof import('unplugin-vue-router/runtime')['definePage']
|
||||
const defineStore: typeof import('pinia')['defineStore']
|
||||
const effectScope: typeof import('vue')['effectScope']
|
||||
const event2human: typeof import('./composables/utils')['event2human']
|
||||
const generateMenuItem: typeof import('./composables/tray')['generateMenuItem']
|
||||
const getActivePinia: typeof import('pinia')['getActivePinia']
|
||||
const getCurrentInstance: typeof import('vue')['getCurrentInstance']
|
||||
@@ -43,6 +45,8 @@ declare global {
|
||||
const mapWritableState: typeof import('pinia')['mapWritableState']
|
||||
const markRaw: typeof import('vue')['markRaw']
|
||||
const nextTick: typeof import('vue')['nextTick']
|
||||
const num2ipv4: typeof import('./composables/utils')['num2ipv4']
|
||||
const num2ipv6: typeof import('./composables/utils')['num2ipv6']
|
||||
const onActivated: typeof import('vue')['onActivated']
|
||||
const onBeforeMount: typeof import('vue')['onBeforeMount']
|
||||
const onBeforeRouteLeave: typeof import('vue-router')['onBeforeRouteLeave']
|
||||
@@ -58,6 +62,7 @@ declare global {
|
||||
const onServerPrefetch: typeof import('vue')['onServerPrefetch']
|
||||
const onUnmounted: typeof import('vue')['onUnmounted']
|
||||
const onUpdated: typeof import('vue')['onUpdated']
|
||||
const onWatcherCleanup: typeof import('vue')['onWatcherCleanup']
|
||||
const parseNetworkConfig: typeof import('./composables/network')['parseNetworkConfig']
|
||||
const prepareVpnService: typeof import('./composables/mobile_vpn')['prepareVpnService']
|
||||
const provide: typeof import('vue')['provide']
|
||||
@@ -79,6 +84,7 @@ declare global {
|
||||
const shallowReadonly: typeof import('vue')['shallowReadonly']
|
||||
const shallowRef: typeof import('vue')['shallowRef']
|
||||
const storeToRefs: typeof import('pinia')['storeToRefs']
|
||||
const timeAgoCn: typeof import('./composables/utils')['timeAgoCn']
|
||||
const toRaw: typeof import('vue')['toRaw']
|
||||
const toRef: typeof import('vue')['toRef']
|
||||
const toRefs: typeof import('vue')['toRefs']
|
||||
@@ -89,11 +95,14 @@ declare global {
|
||||
const useCssModule: typeof import('vue')['useCssModule']
|
||||
const useCssVars: typeof import('vue')['useCssVars']
|
||||
const useI18n: typeof import('vue-i18n')['useI18n']
|
||||
const useId: typeof import('vue')['useId']
|
||||
const useLink: typeof import('vue-router/auto')['useLink']
|
||||
const useModel: typeof import('vue')['useModel']
|
||||
const useNetworkStore: typeof import('./stores/network')['useNetworkStore']
|
||||
const useRoute: typeof import('vue-router')['useRoute']
|
||||
const useRouter: typeof import('vue-router')['useRouter']
|
||||
const useSlots: typeof import('vue')['useSlots']
|
||||
const useTemplateRef: typeof import('vue')['useTemplateRef']
|
||||
const useTray: typeof import('./composables/tray')['useTray']
|
||||
const watch: typeof import('vue')['watch']
|
||||
const watchEffect: typeof import('vue')['watchEffect']
|
||||
@@ -103,7 +112,7 @@ declare global {
|
||||
// for type re-export
|
||||
declare global {
|
||||
// @ts-ignore
|
||||
export type { Component, ComponentPublicInstance, ComputedRef, ExtractDefaultPropTypes, ExtractPropTypes, ExtractPublicPropTypes, InjectionKey, PropType, Ref, VNode, WritableComputedRef } from 'vue'
|
||||
export type { Component, ComponentPublicInstance, ComputedRef, DirectiveBinding, ExtractDefaultPropTypes, ExtractPropTypes, ExtractPublicPropTypes, InjectionKey, PropType, Ref, MaybeRef, MaybeRefOrGetter, VNode, WritableComputedRef } from 'vue'
|
||||
import('vue')
|
||||
}
|
||||
// for vue template auto import
|
||||
@@ -145,6 +154,8 @@ declare module 'vue' {
|
||||
readonly mapWritableState: UnwrapRef<typeof import('pinia')['mapWritableState']>
|
||||
readonly markRaw: UnwrapRef<typeof import('vue')['markRaw']>
|
||||
readonly nextTick: UnwrapRef<typeof import('vue')['nextTick']>
|
||||
readonly num2ipv4: UnwrapRef<typeof import('./composables/utils')['num2ipv4']>
|
||||
readonly num2ipv6: UnwrapRef<typeof import('./composables/utils')['num2ipv6']>
|
||||
readonly onActivated: UnwrapRef<typeof import('vue')['onActivated']>
|
||||
readonly onBeforeMount: UnwrapRef<typeof import('vue')['onBeforeMount']>
|
||||
readonly onBeforeRouteLeave: UnwrapRef<typeof import('vue-router')['onBeforeRouteLeave']>
|
||||
@@ -160,6 +171,7 @@ declare module 'vue' {
|
||||
readonly onServerPrefetch: UnwrapRef<typeof import('vue')['onServerPrefetch']>
|
||||
readonly onUnmounted: UnwrapRef<typeof import('vue')['onUnmounted']>
|
||||
readonly onUpdated: UnwrapRef<typeof import('vue')['onUpdated']>
|
||||
readonly onWatcherCleanup: UnwrapRef<typeof import('vue')['onWatcherCleanup']>
|
||||
readonly parseNetworkConfig: UnwrapRef<typeof import('./composables/network')['parseNetworkConfig']>
|
||||
readonly prepareVpnService: UnwrapRef<typeof import('./composables/mobile_vpn')['prepareVpnService']>
|
||||
readonly provide: UnwrapRef<typeof import('vue')['provide']>
|
||||
@@ -190,11 +202,14 @@ declare module 'vue' {
|
||||
readonly useCssModule: UnwrapRef<typeof import('vue')['useCssModule']>
|
||||
readonly useCssVars: UnwrapRef<typeof import('vue')['useCssVars']>
|
||||
readonly useI18n: UnwrapRef<typeof import('vue-i18n')['useI18n']>
|
||||
readonly useId: UnwrapRef<typeof import('vue')['useId']>
|
||||
readonly useLink: UnwrapRef<typeof import('vue-router/auto')['useLink']>
|
||||
readonly useModel: UnwrapRef<typeof import('vue')['useModel']>
|
||||
readonly useNetworkStore: UnwrapRef<typeof import('./stores/network')['useNetworkStore']>
|
||||
readonly useRoute: UnwrapRef<typeof import('vue-router')['useRoute']>
|
||||
readonly useRouter: UnwrapRef<typeof import('vue-router')['useRouter']>
|
||||
readonly useSlots: UnwrapRef<typeof import('vue')['useSlots']>
|
||||
readonly useTemplateRef: UnwrapRef<typeof import('vue')['useTemplateRef']>
|
||||
readonly useTray: UnwrapRef<typeof import('./composables/tray')['useTray']>
|
||||
readonly watch: UnwrapRef<typeof import('vue')['watch']>
|
||||
readonly watchEffect: UnwrapRef<typeof import('vue')['watchEffect']>
|
||||
|
@@ -1,7 +1,6 @@
|
||||
<script setup lang="ts">
|
||||
import InputGroup from 'primevue/inputgroup'
|
||||
import InputGroupAddon from 'primevue/inputgroupaddon'
|
||||
import { ping } from 'tauri-plugin-vpnservice-api'
|
||||
import { getOsHostname } from '~/composables/network'
|
||||
|
||||
import { NetworkingMethod } from '~/types/network'
|
||||
@@ -42,10 +41,11 @@ function searchUrlSuggestions(e: { query: string }): string[] {
|
||||
if (query.match(/^\w+:.*/)) {
|
||||
// if query is a valid url, then add to suggestions
|
||||
try {
|
||||
// eslint-disable-next-line no-new
|
||||
new URL(query)
|
||||
ret.push(query)
|
||||
}
|
||||
catch (e) {}
|
||||
catch {}
|
||||
}
|
||||
else {
|
||||
for (const proto in protos) {
|
||||
@@ -65,7 +65,7 @@ const publicServerSuggestions = ref([''])
|
||||
|
||||
function searchPresetPublicServers(e: { query: string }) {
|
||||
const presetPublicServers = [
|
||||
'tcp://easytier.public.kkrainbow.top:11010',
|
||||
'tcp://public.easytier.top:11010',
|
||||
]
|
||||
|
||||
const query = e.query
|
||||
@@ -85,6 +85,20 @@ function searchPeerSuggestions(e: { query: string }) {
|
||||
peerSuggestions.value = searchUrlSuggestions(e)
|
||||
}
|
||||
|
||||
const inetSuggestions = ref([''])
|
||||
|
||||
function searchInetSuggestions(e: { query: string }) {
|
||||
if (e.query.search('/') >= 0) {
|
||||
inetSuggestions.value = [e.query]
|
||||
} else {
|
||||
const ret = []
|
||||
for (let i = 0; i < 32; i++) {
|
||||
ret.push(`${e.query}/${i}`)
|
||||
}
|
||||
inetSuggestions.value = ret
|
||||
}
|
||||
}
|
||||
|
||||
const listenerSuggestions = ref([''])
|
||||
|
||||
function searchListenerSuggestiong(e: { query: string }) {
|
||||
@@ -128,18 +142,12 @@ const osHostname = ref<string>('')
|
||||
|
||||
onMounted(async () => {
|
||||
osHostname.value = await getOsHostname()
|
||||
osHostname.value = await ping('ffdklsajflkdsjl') || ''
|
||||
})
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<div class="flex flex-column h-full">
|
||||
<div class="flex flex-column">
|
||||
<div class="w-10/12 self-center mb-3">
|
||||
<Message severity="warn">
|
||||
{{ t('dhcp_experimental_warning') }}
|
||||
</Message>
|
||||
</div>
|
||||
<div class="w-10/12 self-center ">
|
||||
<Panel :header="t('basic_settings')">
|
||||
<div class="flex flex-column gap-y-2">
|
||||
@@ -159,8 +167,9 @@ onMounted(async () => {
|
||||
aria-describedby="virtual_ipv4-help"
|
||||
/>
|
||||
<InputGroupAddon>
|
||||
<span>/24</span>
|
||||
<span>/</span>
|
||||
</InputGroupAddon>
|
||||
<InputNumber v-model="curNetwork.network_length" :disabled="curNetwork.dhcp" inputId="horizontal-buttons" showButtons :step="1" mode="decimal" :min="1" :max="32" fluid class="max-w-20"/>
|
||||
</InputGroup>
|
||||
</div>
|
||||
</div>
|
||||
@@ -227,9 +236,10 @@ onMounted(async () => {
|
||||
<div class="flex flex-row gap-x-9 flex-wrap w-full">
|
||||
<div class="flex flex-column gap-2 grow p-fluid">
|
||||
<label for="username">{{ t('proxy_cidrs') }}</label>
|
||||
<Chips
|
||||
id="chips" v-model="curNetwork.proxy_cidrs"
|
||||
:placeholder="t('chips_placeholder', ['10.0.0.0/24'])" separator=" " class="w-full"
|
||||
<AutoComplete
|
||||
id="subnet-proxy"
|
||||
v-model="curNetwork.proxy_cidrs" :placeholder="t('chips_placeholder', ['10.0.0.0/24'])"
|
||||
class="w-full" multiple fluid :suggestions="inetSuggestions" @complete="searchInetSuggestions"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
32
easytier-gui/src/components/HumanEvent.vue
Normal file
32
easytier-gui/src/components/HumanEvent.vue
Normal file
@@ -0,0 +1,32 @@
|
||||
<script setup lang="ts">
|
||||
import { EventType } from '~/types/network'
|
||||
|
||||
const props = defineProps<{
|
||||
event: {
|
||||
[key: string]: any
|
||||
}
|
||||
}>()
|
||||
const { t } = useI18n()
|
||||
|
||||
const eventKey = computed(() => {
|
||||
const key = Object.keys(props.event)[0]
|
||||
return Object.keys(EventType).includes(key) ? key : 'Unknown'
|
||||
})
|
||||
|
||||
const eventValue = computed(() => {
|
||||
const value = props.event[eventKey.value]
|
||||
return typeof value === 'object' ? value : value
|
||||
})
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<Fieldset :legend="t(`event.${eventKey}`)">
|
||||
<template v-if="eventKey !== 'Unknown'">
|
||||
<div v-if="event.DhcpIpv4Changed">
|
||||
{{ `${eventValue[0]} -> ${eventValue[1]}` }}
|
||||
</div>
|
||||
<pre v-else>{{ eventValue }}</pre>
|
||||
</template>
|
||||
<pre v-else>{{ eventValue }}</pre>
|
||||
</Fieldset>
|
||||
</template>
|
@@ -1,4 +1,5 @@
|
||||
<script setup lang="ts">
|
||||
import { useTimeAgo } from '@vueuse/core'
|
||||
import { IPv4, IPv6 } from 'ip-num/IPNumber'
|
||||
import type { NodeInfo, PeerRoutePair } from '~/types/network'
|
||||
|
||||
@@ -111,6 +112,13 @@ function version(info: PeerRoutePair) {
|
||||
return info.route.version === '' ? 'unknown' : info.route.version
|
||||
}
|
||||
|
||||
function ipFormat(info: PeerRoutePair) {
|
||||
const ip = info.route.ipv4_addr
|
||||
if (typeof ip === 'string')
|
||||
return ip
|
||||
return ip ? `${num2ipv4(ip.address)}/${ip.network_length}` : ''
|
||||
}
|
||||
|
||||
const myNodeInfo = computed(() => {
|
||||
if (!curNetworkInst.value)
|
||||
return {} as NodeInfo
|
||||
@@ -151,7 +159,7 @@ const myNodeInfoChips = computed(() => {
|
||||
const local_ipv4s = my_node_info.ips?.interface_ipv4s
|
||||
for (const [idx, ip] of local_ipv4s?.entries()) {
|
||||
chips.push({
|
||||
label: `Local IPv4 ${idx}: ${IPv4.fromNumber(ip.addr)}`,
|
||||
label: `Local IPv4 ${idx}: ${num2ipv4(ip)}`,
|
||||
icon: '',
|
||||
} as Chip)
|
||||
}
|
||||
@@ -160,11 +168,7 @@ const myNodeInfoChips = computed(() => {
|
||||
const local_ipv6s = my_node_info.ips?.interface_ipv6s
|
||||
for (const [idx, ip] of local_ipv6s?.entries()) {
|
||||
chips.push({
|
||||
label: `Local IPv6 ${idx}: ${IPv6.fromBigInt((BigInt(ip.part1) << BigInt(96))
|
||||
+ (BigInt(ip.part2) << BigInt(64))
|
||||
+ (BigInt(ip.part3) << BigInt(32))
|
||||
+ BigInt(ip.part4),
|
||||
)}`,
|
||||
label: `Local IPv6 ${idx}: ${num2ipv6(ip)}`,
|
||||
icon: '',
|
||||
} as Chip)
|
||||
}
|
||||
@@ -210,6 +214,8 @@ const myNodeInfoChips = computed(() => {
|
||||
PortRestricted = 5,
|
||||
Symmetric = 6,
|
||||
SymUdpFirewall = 7,
|
||||
SymmetricEasyInc = 8,
|
||||
SymmetricEasyDec = 9,
|
||||
};
|
||||
const udpNatType: NatType = my_node_info.stun_info?.udp_nat_type
|
||||
if (udpNatType !== undefined) {
|
||||
@@ -222,6 +228,8 @@ const myNodeInfoChips = computed(() => {
|
||||
[NatType.PortRestricted]: 'Port Restricted',
|
||||
[NatType.Symmetric]: 'Symmetric',
|
||||
[NatType.SymUdpFirewall]: 'Symmetric UDP Firewall',
|
||||
[NatType.SymmetricEasyInc]: 'Symmetric Easy Inc',
|
||||
[NatType.SymmetricEasyDec]: 'Symmetric Easy Dec',
|
||||
}
|
||||
|
||||
chips.push({
|
||||
@@ -312,16 +320,18 @@ function showEventLogs() {
|
||||
|
||||
<template>
|
||||
<div>
|
||||
<Dialog v-model:visible="dialogVisible" modal :header="t(dialogHeader)" :style="{ width: '70%' }">
|
||||
<Panel>
|
||||
<ScrollPanel style="width: 100%; height: 400px">
|
||||
<pre>{{ dialogContent }}</pre>
|
||||
</ScrollPanel>
|
||||
</Panel>
|
||||
<Divider />
|
||||
<div class="flex justify-content-end gap-2">
|
||||
<Button type="button" :label="t('close')" @click="dialogVisible = false" />
|
||||
</div>
|
||||
<Dialog v-model:visible="dialogVisible" modal :header="t(dialogHeader)" class="w-2/3 h-auto">
|
||||
<ScrollPanel v-if="dialogHeader === 'vpn_portal_config'">
|
||||
<pre>{{ dialogContent }}</pre>
|
||||
</ScrollPanel>
|
||||
<Timeline v-else :value="dialogContent">
|
||||
<template #opposite="slotProps">
|
||||
<small class="text-surface-500 dark:text-surface-400">{{ useTimeAgo(Date.parse(slotProps.item[0])) }}</small>
|
||||
</template>
|
||||
<template #content="slotProps">
|
||||
<HumanEvent :event="slotProps.item[1]" />
|
||||
</template>
|
||||
</Timeline>
|
||||
</Dialog>
|
||||
|
||||
<Card v-if="curNetworkInst?.error_msg">
|
||||
@@ -404,18 +414,46 @@ function showEventLogs() {
|
||||
{{ t('peer_info') }}
|
||||
</template>
|
||||
<template #content>
|
||||
<DataTable :value="peerRouteInfos" column-resize-mode="fit" table-style="width: 100%">
|
||||
<Column field="route.ipv4_addr" style="width: 100px;" :header="t('virtual_ipv4')" />
|
||||
<Column field="route.hostname" style="max-width: 250px;" :header="t('hostname')" />
|
||||
<Column :field="routeCost" style="width: 100px;" :header="t('route_cost')" />
|
||||
<Column :field="latencyMs" style="width: 80px;" :header="t('latency')" />
|
||||
<Column :field="txBytes" style="width: 80px;" :header="t('upload_bytes')" />
|
||||
<Column :field="rxBytes" style="width: 80px;" :header="t('download_bytes')" />
|
||||
<Column :field="lossRate" style="width: 100px;" :header="t('loss_rate')" />
|
||||
<Column :field="version" style="width: 100px;" :header="t('status.version')" />
|
||||
<DataTable :value="peerRouteInfos" column-resize-mode="fit" table-class="w-full">
|
||||
<Column :field="ipFormat" :header="t('virtual_ipv4')" />
|
||||
<Column :header="t('hostname')">
|
||||
<template #body="slotProps">
|
||||
<div
|
||||
v-if="!slotProps.data.route.cost || !slotProps.data.route.feature_flag.is_public_server"
|
||||
v-tooltip="slotProps.data.route.hostname"
|
||||
>
|
||||
{{
|
||||
slotProps.data.route.hostname }}
|
||||
</div>
|
||||
<div v-else v-tooltip="slotProps.data.route.hostname" class="space-x-1">
|
||||
<Tag v-if="slotProps.data.route.feature_flag.is_public_server" severity="info" value="Info">
|
||||
{{ t('status.server') }}
|
||||
</Tag>
|
||||
<Tag v-if="slotProps.data.route.no_relay_data" severity="warn" value="Warn">
|
||||
{{ t('status.relay') }}
|
||||
</Tag>
|
||||
</div>
|
||||
</template>
|
||||
</Column>
|
||||
<Column :field="routeCost" :header="t('route_cost')" />
|
||||
<Column :field="latencyMs" :header="t('latency')" />
|
||||
<Column :field="txBytes" :header="t('upload_bytes')" />
|
||||
<Column :field="rxBytes" :header="t('download_bytes')" />
|
||||
<Column :field="lossRate" :header="t('loss_rate')" />
|
||||
<Column :header="t('status.version')">
|
||||
<template #body="slotProps">
|
||||
<span>{{ version(slotProps.data) }}</span>
|
||||
</template>
|
||||
</Column>
|
||||
</DataTable>
|
||||
</template>
|
||||
</Card>
|
||||
</template>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<style lang="postcss" scoped>
|
||||
.p-timeline :deep(.p-timeline-event-opposite) {
|
||||
@apply flex-none;
|
||||
}
|
||||
</style>
|
||||
|
@@ -48,7 +48,7 @@ async function doStartVpn(ipv4Addr: string, cidr: number, routes: string[]) {
|
||||
|
||||
console.log('start vpn')
|
||||
const start_ret = await start_vpn({
|
||||
ipv4Addr: `${ipv4Addr}/${cidr}`,
|
||||
ipv4Addr: `${ipv4Addr}`,
|
||||
routes,
|
||||
disallowedApplications: ['com.kkrainbow.easytier'],
|
||||
mtu: 1300,
|
||||
|
15
easytier-gui/src/composables/utils.ts
Normal file
15
easytier-gui/src/composables/utils.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import { IPv4, IPv6 } from 'ip-num/IPNumber'
|
||||
import type { Ipv4Addr, Ipv6Addr } from '~/types/network'
|
||||
|
||||
export function num2ipv4(ip: Ipv4Addr) {
|
||||
return IPv4.fromNumber(ip.addr)
|
||||
}
|
||||
|
||||
export function num2ipv6(ip: Ipv6Addr) {
|
||||
return IPv6.fromBigInt(
|
||||
(BigInt(ip.part1) << BigInt(96))
|
||||
+ (BigInt(ip.part2) << BigInt(64))
|
||||
+ (BigInt(ip.part3) << BigInt(32))
|
||||
+ BigInt(ip.part4),
|
||||
)
|
||||
}
|
@@ -2,7 +2,16 @@ import { disable, enable, isEnabled } from '@tauri-apps/plugin-autostart'
|
||||
|
||||
export async function loadAutoLaunchStatusAsync(target_enable: boolean): Promise<boolean> {
|
||||
try {
|
||||
target_enable ? await enable() : await disable()
|
||||
if (target_enable) {
|
||||
await enable()
|
||||
}
|
||||
else {
|
||||
// 消除没有配置自启动时进行关闭操作报错
|
||||
try {
|
||||
await disable()
|
||||
}
|
||||
catch { }
|
||||
}
|
||||
localStorage.setItem('auto_launch', JSON.stringify(await isEnabled()))
|
||||
return isEnabled()
|
||||
}
|
||||
|
@@ -181,7 +181,7 @@ const setting_menu_items = ref([
|
||||
label: () => t('logging_open_dir'),
|
||||
icon: 'pi pi-folder-open',
|
||||
command: async () => {
|
||||
console.log('open log dir', await appLogDir())
|
||||
// console.log('open log dir', await appLogDir())
|
||||
await open(await appLogDir())
|
||||
},
|
||||
})
|
||||
|
@@ -11,6 +11,7 @@ export interface NetworkConfig {
|
||||
|
||||
dhcp: boolean
|
||||
virtual_ipv4: string
|
||||
network_length: number,
|
||||
hostname?: string
|
||||
network_name: string
|
||||
network_secret: string
|
||||
@@ -42,12 +43,13 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
|
||||
|
||||
dhcp: true,
|
||||
virtual_ipv4: '',
|
||||
network_length: 24,
|
||||
network_name: 'easytier',
|
||||
network_secret: '',
|
||||
|
||||
networking_method: NetworkingMethod.PublicServer,
|
||||
|
||||
public_server_url: 'tcp://easytier.public.kkrainbow.top:11010',
|
||||
public_server_url: 'tcp://public.easytier.top:11010',
|
||||
peer_urls: [],
|
||||
|
||||
proxy_cidrs: [],
|
||||
@@ -137,7 +139,10 @@ export interface StunInfo {
|
||||
|
||||
export interface Route {
|
||||
peer_id: number
|
||||
ipv4_addr: string
|
||||
ipv4_addr: {
|
||||
address: Ipv4Addr
|
||||
network_length: number
|
||||
} | string | null
|
||||
next_hop_peer_id: number
|
||||
cost: number
|
||||
proxy_cidrs: string[]
|
||||
@@ -155,6 +160,7 @@ export interface PeerInfo {
|
||||
export interface PeerConnInfo {
|
||||
conn_id: string
|
||||
my_peer_id: number
|
||||
is_client: boolean
|
||||
peer_id: number
|
||||
features: string[]
|
||||
tunnel?: TunnelInfo
|
||||
@@ -180,3 +186,28 @@ export interface PeerConnStats {
|
||||
tx_packets: number
|
||||
latency_us: number
|
||||
}
|
||||
|
||||
export enum EventType {
|
||||
TunDeviceReady = 'TunDeviceReady', // string
|
||||
TunDeviceError = 'TunDeviceError', // string
|
||||
|
||||
PeerAdded = 'PeerAdded', // number
|
||||
PeerRemoved = 'PeerRemoved', // number
|
||||
PeerConnAdded = 'PeerConnAdded', // PeerConnInfo
|
||||
PeerConnRemoved = 'PeerConnRemoved', // PeerConnInfo
|
||||
|
||||
ListenerAdded = 'ListenerAdded', // any
|
||||
ListenerAddFailed = 'ListenerAddFailed', // any, string
|
||||
ListenerAcceptFailed = 'ListenerAcceptFailed', // any, string
|
||||
ConnectionAccepted = 'ConnectionAccepted', // string, string
|
||||
ConnectionError = 'ConnectionError', // string, string, string
|
||||
|
||||
Connecting = 'Connecting', // any
|
||||
ConnectError = 'ConnectError', // string, string, string
|
||||
|
||||
VpnPortalClientConnected = 'VpnPortalClientConnected', // string, string
|
||||
VpnPortalClientDisconnected = 'VpnPortalClientDisconnected', // string, string, string
|
||||
|
||||
DhcpIpv4Changed = 'DhcpIpv4Changed', // ipv4 | null, ipv4 | null
|
||||
DhcpIpv4Conflicted = 'DhcpIpv4Conflicted', // ipv4 | null
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ name = "easytier"
|
||||
description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
|
||||
homepage = "https://github.com/EasyTier/EasyTier"
|
||||
repository = "https://github.com/EasyTier/EasyTier"
|
||||
version = "2.0.0"
|
||||
version = "2.0.3"
|
||||
edition = "2021"
|
||||
authors = ["kkrainbow"]
|
||||
keywords = ["vpn", "p2p", "network", "easytier"]
|
||||
|
@@ -1,10 +1,5 @@
|
||||
#[cfg(target_os = "windows")]
|
||||
use std::{
|
||||
env,
|
||||
fs::File,
|
||||
io::{copy, Cursor},
|
||||
path::PathBuf,
|
||||
};
|
||||
use std::{env, io::Cursor, path::PathBuf};
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
struct WindowsBuild {}
|
||||
@@ -46,8 +41,8 @@ impl WindowsBuild {
|
||||
|
||||
fn download_protoc() -> PathBuf {
|
||||
println!("cargo:info=use exist protoc: {:?}", "k");
|
||||
let out_dir = Self::get_cargo_target_dir().unwrap();
|
||||
let fname = out_dir.join("protoc");
|
||||
let out_dir = Self::get_cargo_target_dir().unwrap().join("protobuf");
|
||||
let fname = out_dir.join("bin/protoc.exe");
|
||||
if fname.exists() {
|
||||
println!("cargo:info=use exist protoc: {:?}", fname);
|
||||
return fname;
|
||||
@@ -65,10 +60,7 @@ impl WindowsBuild {
|
||||
.map(zip::ZipArchive::new)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let protoc_zipped_file = content.by_name("bin/protoc.exe").unwrap();
|
||||
let mut content = protoc_zipped_file;
|
||||
|
||||
copy(&mut content, &mut File::create(&fname).unwrap()).unwrap();
|
||||
content.extract(out_dir).unwrap();
|
||||
|
||||
fname
|
||||
}
|
||||
|
@@ -116,4 +116,7 @@ core_clap:
|
||||
zh-CN: "转发所有对等节点的RPC数据包,即使对等节点不在转发网络白名单中。这可以帮助白名单外网络中的对等节点建立P2P连接。"
|
||||
socks5:
|
||||
en: "enable socks5 server, allow socks5 client to access virtual network. format: <port>, e.g.: 1080"
|
||||
zh-CN: "启用 socks5 服务器,允许 socks5 客户端访问虚拟网络. 格式: <端口>,例如:1080"
|
||||
zh-CN: "启用 socks5 服务器,允许 socks5 客户端访问虚拟网络. 格式: <端口>,例如:1080"
|
||||
ipv6_listener:
|
||||
en: "the url of the ipv6 listener, e.g.: tcp://[::]:11010, if not set, will listen on random udp port"
|
||||
zh-CN: "IPv6 监听器的URL,例如:tcp://[::]:11010,如果未设置,将在随机UDP端口上监听"
|
@@ -23,8 +23,8 @@ pub trait ConfigLoader: Send + Sync {
|
||||
fn get_netns(&self) -> Option<String>;
|
||||
fn set_netns(&self, ns: Option<String>);
|
||||
|
||||
fn get_ipv4(&self) -> Option<std::net::Ipv4Addr>;
|
||||
fn set_ipv4(&self, addr: Option<std::net::Ipv4Addr>);
|
||||
fn get_ipv4(&self) -> Option<cidr::Ipv4Inet>;
|
||||
fn set_ipv4(&self, addr: Option<cidr::Ipv4Inet>);
|
||||
|
||||
fn get_dhcp(&self) -> bool;
|
||||
fn set_dhcp(&self, dhcp: bool);
|
||||
@@ -180,6 +180,8 @@ pub struct Flags {
|
||||
pub relay_all_peer_rpc: bool,
|
||||
#[derivative(Default(value = "false"))]
|
||||
pub disable_udp_hole_punching: bool,
|
||||
#[derivative(Default(value = "\"udp://[::]:0\".to_string()"))]
|
||||
pub ipv6_listener: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
|
||||
@@ -260,8 +262,6 @@ impl TomlConfigLoader {
|
||||
serde_json::from_str::<serde_json::Map<String, serde_json::Value>>(&default_flags_json)
|
||||
.unwrap();
|
||||
|
||||
tracing::debug!("default_flags_hashmap: {:?}", default_flags_hashmap);
|
||||
|
||||
let mut merged_hashmap = serde_json::Map::new();
|
||||
for (key, value) in default_flags_hashmap {
|
||||
if let Some(v) = flags_hashmap.remove(&key) {
|
||||
@@ -271,8 +271,6 @@ impl TomlConfigLoader {
|
||||
}
|
||||
}
|
||||
|
||||
tracing::debug!("merged_hashmap: {:?}", merged_hashmap);
|
||||
|
||||
serde_json::from_value(serde_json::Value::Object(merged_hashmap)).unwrap()
|
||||
}
|
||||
}
|
||||
@@ -326,16 +324,23 @@ impl ConfigLoader for TomlConfigLoader {
|
||||
self.config.lock().unwrap().netns = ns;
|
||||
}
|
||||
|
||||
fn get_ipv4(&self) -> Option<std::net::Ipv4Addr> {
|
||||
fn get_ipv4(&self) -> Option<cidr::Ipv4Inet> {
|
||||
let locked_config = self.config.lock().unwrap();
|
||||
locked_config
|
||||
.ipv4
|
||||
.as_ref()
|
||||
.map(|s| s.parse().ok())
|
||||
.flatten()
|
||||
.map(|c: cidr::Ipv4Inet| {
|
||||
if c.network_length() == 32 {
|
||||
cidr::Ipv4Inet::new(c.address(), 24).unwrap()
|
||||
} else {
|
||||
c
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn set_ipv4(&self, addr: Option<std::net::Ipv4Addr>) {
|
||||
fn set_ipv4(&self, addr: Option<cidr::Ipv4Inet>) {
|
||||
self.config.lock().unwrap().ipv4 = if let Some(addr) = addr {
|
||||
Some(addr.to_string())
|
||||
} else {
|
||||
@@ -592,7 +597,7 @@ level = "warn"
|
||||
assert!(ret.is_ok());
|
||||
|
||||
let ret = ret.unwrap();
|
||||
assert_eq!("10.144.144.10", ret.get_ipv4().unwrap().to_string());
|
||||
assert_eq!("10.144.144.10/24", ret.get_ipv4().unwrap().to_string());
|
||||
|
||||
assert_eq!(
|
||||
vec!["tcp://0.0.0.0:11010", "udp://0.0.0.0:11010"],
|
||||
|
@@ -40,8 +40,8 @@ pub enum GlobalCtxEvent {
|
||||
VpnPortalClientConnected(String, String), // (portal, client ip)
|
||||
VpnPortalClientDisconnected(String, String), // (portal, client ip)
|
||||
|
||||
DhcpIpv4Changed(Option<std::net::Ipv4Addr>, Option<std::net::Ipv4Addr>), // (old, new)
|
||||
DhcpIpv4Conflicted(Option<std::net::Ipv4Addr>),
|
||||
DhcpIpv4Changed(Option<cidr::Ipv4Inet>, Option<cidr::Ipv4Inet>), // (old, new)
|
||||
DhcpIpv4Conflicted(Option<cidr::Ipv4Inet>),
|
||||
}
|
||||
|
||||
type EventBus = tokio::sync::broadcast::Sender<GlobalCtxEvent>;
|
||||
@@ -56,7 +56,7 @@ pub struct GlobalCtx {
|
||||
|
||||
event_bus: EventBus,
|
||||
|
||||
cached_ipv4: AtomicCell<Option<std::net::Ipv4Addr>>,
|
||||
cached_ipv4: AtomicCell<Option<cidr::Ipv4Inet>>,
|
||||
cached_proxy_cidrs: AtomicCell<Option<Vec<cidr::IpCidr>>>,
|
||||
|
||||
ip_collector: Arc<IPCollector>,
|
||||
@@ -139,7 +139,7 @@ impl GlobalCtx {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_ipv4(&self) -> Option<std::net::Ipv4Addr> {
|
||||
pub fn get_ipv4(&self) -> Option<cidr::Ipv4Inet> {
|
||||
if let Some(ret) = self.cached_ipv4.load() {
|
||||
return Some(ret);
|
||||
}
|
||||
@@ -148,7 +148,7 @@ impl GlobalCtx {
|
||||
return addr;
|
||||
}
|
||||
|
||||
pub fn set_ipv4(&self, addr: Option<std::net::Ipv4Addr>) {
|
||||
pub fn set_ipv4(&self, addr: Option<cidr::Ipv4Inet>) {
|
||||
self.config.set_ipv4(addr);
|
||||
self.cached_ipv4.store(None);
|
||||
}
|
||||
|
@@ -56,6 +56,8 @@ impl HostResolverIter {
|
||||
self.ips = ips
|
||||
.filter(|x| x.is_ipv4())
|
||||
.choose_multiple(&mut rand::thread_rng(), self.max_ip_per_domain as usize);
|
||||
|
||||
if self.ips.is_empty() {return self.next().await;}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!(?host, ?e, "lookup host for stun failed");
|
||||
@@ -343,6 +345,8 @@ impl StunClientBuilder {
|
||||
pub struct UdpNatTypeDetectResult {
|
||||
source_addr: SocketAddr,
|
||||
stun_resps: Vec<BindRequestResponse>,
|
||||
// if we are easy symmetric nat, we need to test with another port to check inc or dec
|
||||
extra_bind_test: Option<BindRequestResponse>,
|
||||
}
|
||||
|
||||
impl UdpNatTypeDetectResult {
|
||||
@@ -350,6 +354,7 @@ impl UdpNatTypeDetectResult {
|
||||
Self {
|
||||
source_addr,
|
||||
stun_resps,
|
||||
extra_bind_test: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -406,7 +411,7 @@ impl UdpNatTypeDetectResult {
|
||||
.filter_map(|x| x.mapped_socket_addr)
|
||||
.collect::<BTreeSet<_>>()
|
||||
.len();
|
||||
mapped_addr_count < self.stun_server_count()
|
||||
mapped_addr_count == 1
|
||||
}
|
||||
|
||||
pub fn nat_type(&self) -> NatType {
|
||||
@@ -429,7 +434,32 @@ impl UdpNatTypeDetectResult {
|
||||
return NatType::PortRestricted;
|
||||
}
|
||||
} else if !self.stun_resps.is_empty() {
|
||||
return NatType::Symmetric;
|
||||
if self.public_ips().len() != 1
|
||||
|| self.usable_stun_resp_count() <= 1
|
||||
|| self.max_port() - self.min_port() > 15
|
||||
|| self.extra_bind_test.is_none()
|
||||
|| self
|
||||
.extra_bind_test
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.mapped_socket_addr
|
||||
.is_none()
|
||||
{
|
||||
return NatType::Symmetric;
|
||||
} else {
|
||||
let extra_bind_test = self.extra_bind_test.as_ref().unwrap();
|
||||
let extra_port = extra_bind_test.mapped_socket_addr.unwrap().port();
|
||||
|
||||
let max_port_diff = extra_port.saturating_sub(self.max_port());
|
||||
let min_port_diff = self.min_port().saturating_sub(extra_port);
|
||||
if max_port_diff != 0 && max_port_diff < 100 {
|
||||
return NatType::SymmetricEasyInc;
|
||||
} else if min_port_diff != 0 && min_port_diff < 100 {
|
||||
return NatType::SymmetricEasyDec;
|
||||
} else {
|
||||
return NatType::Symmetric;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return NatType::Unknown;
|
||||
}
|
||||
@@ -477,6 +507,13 @@ impl UdpNatTypeDetectResult {
|
||||
.max()
|
||||
.unwrap_or(u16::MAX)
|
||||
}
|
||||
|
||||
pub fn usable_stun_resp_count(&self) -> usize {
|
||||
self.stun_resps
|
||||
.iter()
|
||||
.filter(|x| x.mapped_socket_addr.is_some())
|
||||
.count()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UdpNatTypeDetector {
|
||||
@@ -492,6 +529,19 @@ impl UdpNatTypeDetector {
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_extra_bind_result(
|
||||
&self,
|
||||
source_port: u16,
|
||||
stun_server: SocketAddr,
|
||||
) -> Result<BindRequestResponse, Error> {
|
||||
let udp = Arc::new(UdpSocket::bind(format!("0.0.0.0:{}", source_port)).await?);
|
||||
let client_builder = StunClientBuilder::new(udp.clone());
|
||||
client_builder
|
||||
.new_stun_client(stun_server)
|
||||
.bind_request(false, false)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn detect_nat_type(&self, source_port: u16) -> Result<UdpNatTypeDetectResult, Error> {
|
||||
let udp = Arc::new(UdpSocket::bind(format!("0.0.0.0:{}", source_port)).await?);
|
||||
self.detect_nat_type_with_socket(udp).await
|
||||
@@ -578,13 +628,28 @@ impl StunInfoCollectorTrait for StunInfoCollector {
|
||||
async fn get_udp_port_mapping(&self, local_port: u16) -> Result<SocketAddr, Error> {
|
||||
self.start_stun_routine();
|
||||
|
||||
let stun_servers = self
|
||||
let mut stun_servers = self
|
||||
.udp_nat_test_result
|
||||
.read()
|
||||
.unwrap()
|
||||
.clone()
|
||||
.map(|x| x.collect_available_stun_server())
|
||||
.ok_or(Error::NotFound)?;
|
||||
.unwrap_or(vec![]);
|
||||
|
||||
if stun_servers.is_empty() {
|
||||
let mut host_resolver =
|
||||
HostResolverIter::new(self.stun_servers.read().unwrap().clone(), 2);
|
||||
while let Some(addr) = host_resolver.next().await {
|
||||
stun_servers.push(addr);
|
||||
if stun_servers.len() >= 2 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if stun_servers.is_empty() {
|
||||
return Err(Error::NotFound);
|
||||
}
|
||||
|
||||
let udp = Arc::new(UdpSocket::bind(format!("0.0.0.0:{}", local_port)).await?);
|
||||
let mut client_builder = StunClientBuilder::new(udp.clone());
|
||||
@@ -630,9 +695,9 @@ impl StunInfoCollector {
|
||||
// stun server cross nation may return a external ip address with high latency and loss rate
|
||||
vec![
|
||||
"stun.miwifi.com",
|
||||
"stun.cdnbye.com",
|
||||
"stun.hitv.com",
|
||||
"stun.chat.bilibili.com",
|
||||
"stun.hitv.com",
|
||||
"stun.cdnbye.com",
|
||||
"stun.douyucdn.cn:18000",
|
||||
"fwa.lifesizecloud.com",
|
||||
"global.turn.twilio.com",
|
||||
@@ -673,38 +738,41 @@ impl StunInfoCollector {
|
||||
.map(|x| x.to_string())
|
||||
.collect();
|
||||
let detector = UdpNatTypeDetector::new(servers, 1);
|
||||
let ret = detector.detect_nat_type(0).await;
|
||||
let mut ret = detector.detect_nat_type(0).await;
|
||||
tracing::debug!(?ret, "finish udp nat type detect");
|
||||
|
||||
let mut nat_type = NatType::Unknown;
|
||||
let sleep_sec = match &ret {
|
||||
Ok(resp) => {
|
||||
*udp_nat_test_result.write().unwrap() = Some(resp.clone());
|
||||
udp_test_time.store(Local::now());
|
||||
nat_type = resp.nat_type();
|
||||
if nat_type == NatType::Unknown {
|
||||
15
|
||||
} else {
|
||||
600
|
||||
}
|
||||
}
|
||||
_ => 15,
|
||||
};
|
||||
if let Ok(resp) = &ret {
|
||||
tracing::debug!(?resp, "got udp nat type detect result");
|
||||
nat_type = resp.nat_type();
|
||||
}
|
||||
|
||||
// if nat type is symmtric, detect with another port to gather more info
|
||||
if nat_type == NatType::Symmetric {
|
||||
let old_resp = ret.unwrap();
|
||||
let old_local_port = old_resp.local_addr().port();
|
||||
let new_port = if old_local_port >= 65535 {
|
||||
old_local_port - 1
|
||||
} else {
|
||||
old_local_port + 1
|
||||
};
|
||||
let ret = detector.detect_nat_type(new_port).await;
|
||||
tracing::debug!(?ret, "finish udp nat type detect with another port");
|
||||
if let Ok(resp) = ret {
|
||||
udp_nat_test_result.write().unwrap().as_mut().map(|x| {
|
||||
x.extend_result(resp);
|
||||
});
|
||||
let old_resp = ret.as_mut().unwrap();
|
||||
tracing::debug!(?old_resp, "start get extra bind result");
|
||||
let available_stun_servers = old_resp.collect_available_stun_server();
|
||||
for server in available_stun_servers.iter() {
|
||||
let ret = detector
|
||||
.get_extra_bind_result(0, *server)
|
||||
.await
|
||||
.with_context(|| "get extra bind result failed");
|
||||
tracing::debug!(?ret, "finish udp nat type detect with another port");
|
||||
if let Ok(resp) = ret {
|
||||
old_resp.extra_bind_test = Some(resp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut sleep_sec = 10;
|
||||
if let Ok(resp) = &ret {
|
||||
udp_test_time.store(Local::now());
|
||||
*udp_nat_test_result.write().unwrap() = Some(resp.clone());
|
||||
if nat_type != NatType::Unknown
|
||||
&& (nat_type != NatType::Symmetric || resp.extra_bind_test.is_some())
|
||||
{
|
||||
sleep_sec = 600
|
||||
}
|
||||
}
|
||||
|
||||
@@ -734,7 +802,7 @@ impl StunInfoCollectorTrait for MockStunInfoCollector {
|
||||
last_update_time: std::time::Instant::now().elapsed().as_secs() as i64,
|
||||
min_port: 100,
|
||||
max_port: 200,
|
||||
..Default::default()
|
||||
public_ip: vec!["127.0.0.1".to_string()],
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// try connect peers directly, with either its public ip or lan ip
|
||||
|
||||
use std::{net::SocketAddr, sync::Arc};
|
||||
use std::{net::SocketAddr, sync::Arc, time::Duration};
|
||||
|
||||
use crate::{
|
||||
common::{error::Error, global_ctx::ArcGlobalCtx, PeerId},
|
||||
@@ -19,6 +19,7 @@ use crate::{
|
||||
|
||||
use crate::proto::cli::PeerConnInfo;
|
||||
use anyhow::Context;
|
||||
use rand::Rng;
|
||||
use tokio::{task::JoinSet, time::timeout};
|
||||
use tracing::Instrument;
|
||||
use url::Host;
|
||||
@@ -64,13 +65,13 @@ impl PeerManagerForDirectConnector for PeerManager {
|
||||
struct DstBlackListItem(PeerId, String);
|
||||
|
||||
#[derive(Hash, Eq, PartialEq, Clone)]
|
||||
struct DstSchemeBlackListItem(PeerId, String);
|
||||
struct DstListenerUrlBlackListItem(PeerId, url::Url);
|
||||
|
||||
struct DirectConnectorManagerData {
|
||||
global_ctx: ArcGlobalCtx,
|
||||
peer_manager: Arc<PeerManager>,
|
||||
dst_blacklist: timedmap::TimedMap<DstBlackListItem, ()>,
|
||||
dst_sceme_blacklist: timedmap::TimedMap<DstSchemeBlackListItem, ()>,
|
||||
dst_listener_blacklist: timedmap::TimedMap<DstListenerUrlBlackListItem, ()>,
|
||||
}
|
||||
|
||||
impl DirectConnectorManagerData {
|
||||
@@ -79,7 +80,7 @@ impl DirectConnectorManagerData {
|
||||
global_ctx,
|
||||
peer_manager,
|
||||
dst_blacklist: timedmap::TimedMap::new(),
|
||||
dst_sceme_blacklist: timedmap::TimedMap::new(),
|
||||
dst_listener_blacklist: timedmap::TimedMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -147,7 +148,7 @@ impl DirectConnectorManager {
|
||||
}
|
||||
|
||||
while let Some(task_ret) = tasks.join_next().await {
|
||||
tracing::trace!(?task_ret, "direct connect task ret");
|
||||
tracing::debug!(?task_ret, ?my_peer_id, "direct connect task ret");
|
||||
}
|
||||
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
@@ -168,7 +169,7 @@ impl DirectConnectorManager {
|
||||
.dst_blacklist
|
||||
.contains(&DstBlackListItem(dst_peer_id.clone(), addr.clone()))
|
||||
{
|
||||
tracing::trace!("try_connect_to_ip failed, addr in blacklist: {}", addr);
|
||||
tracing::debug!("try_connect_to_ip failed, addr in blacklist: {}", addr);
|
||||
return Err(Error::UrlInBlacklist);
|
||||
}
|
||||
|
||||
@@ -203,24 +204,38 @@ impl DirectConnectorManager {
|
||||
dst_peer_id: PeerId,
|
||||
addr: String,
|
||||
) -> Result<(), Error> {
|
||||
let ret = Self::do_try_connect_to_ip(data.clone(), dst_peer_id, addr.clone()).await;
|
||||
if let Err(e) = ret {
|
||||
if !matches!(e, Error::UrlInBlacklist) {
|
||||
tracing::info!(
|
||||
"try_connect_to_ip failed: {:?}, peer_id: {}",
|
||||
e,
|
||||
dst_peer_id
|
||||
);
|
||||
let mut rand_gen = rand::rngs::OsRng::default();
|
||||
let backoff_ms = vec![1000, 2000, 4000];
|
||||
let mut backoff_idx = 0;
|
||||
|
||||
loop {
|
||||
let ret = Self::do_try_connect_to_ip(data.clone(), dst_peer_id, addr.clone()).await;
|
||||
tracing::debug!(?ret, ?dst_peer_id, ?addr, "try_connect_to_ip return");
|
||||
if matches!(ret, Err(Error::UrlInBlacklist) | Ok(_)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if backoff_idx < backoff_ms.len() {
|
||||
let delta = backoff_ms[backoff_idx] >> 1;
|
||||
assert!(delta > 0);
|
||||
assert!(delta < backoff_ms[backoff_idx]);
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(
|
||||
(backoff_ms[backoff_idx] + rand_gen.gen_range(-delta..delta)) as u64,
|
||||
))
|
||||
.await;
|
||||
|
||||
backoff_idx += 1;
|
||||
continue;
|
||||
} else {
|
||||
data.dst_blacklist.insert(
|
||||
DstBlackListItem(dst_peer_id.clone(), addr.clone()),
|
||||
(),
|
||||
std::time::Duration::from_secs(DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC),
|
||||
);
|
||||
|
||||
return ret;
|
||||
}
|
||||
return Err(e);
|
||||
} else {
|
||||
tracing::info!("try_connect_to_ip success, peer_id: {}", dst_peer_id);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -230,6 +245,8 @@ impl DirectConnectorManager {
|
||||
dst_peer_id: PeerId,
|
||||
ip_list: GetIpListResponse,
|
||||
) -> Result<(), Error> {
|
||||
data.dst_listener_blacklist.cleanup();
|
||||
|
||||
let enable_ipv6 = data.global_ctx.get_flags().enable_ipv6;
|
||||
let available_listeners = ip_list
|
||||
.listeners
|
||||
@@ -238,14 +255,15 @@ impl DirectConnectorManager {
|
||||
.filter_map(|l| if l.scheme() != "ring" { Some(l) } else { None })
|
||||
.filter(|l| l.port().is_some() && l.host().is_some())
|
||||
.filter(|l| {
|
||||
!data.dst_sceme_blacklist.contains(&DstSchemeBlackListItem(
|
||||
dst_peer_id.clone(),
|
||||
l.scheme().to_string(),
|
||||
))
|
||||
!data
|
||||
.dst_listener_blacklist
|
||||
.contains(&DstListenerUrlBlackListItem(dst_peer_id.clone(), l.clone()))
|
||||
})
|
||||
.filter(|l| enable_ipv6 || !matches!(l.host().unwrap().to_owned(), Host::Ipv6(_)))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
tracing::debug!(?available_listeners, "got available listeners");
|
||||
|
||||
let mut listener = available_listeners.get(0).ok_or(anyhow::anyhow!(
|
||||
"peer {} have no valid listener",
|
||||
dst_peer_id
|
||||
@@ -270,6 +288,13 @@ impl DirectConnectorManager {
|
||||
dst_peer_id.clone(),
|
||||
addr.to_string(),
|
||||
));
|
||||
} else {
|
||||
tracing::error!(
|
||||
?ip,
|
||||
?listener,
|
||||
?dst_peer_id,
|
||||
"failed to set host for interface ipv4"
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -284,6 +309,13 @@ impl DirectConnectorManager {
|
||||
dst_peer_id.clone(),
|
||||
addr.to_string(),
|
||||
));
|
||||
} else {
|
||||
tracing::error!(
|
||||
?public_ipv4,
|
||||
?listener,
|
||||
?dst_peer_id,
|
||||
"failed to set host for public ipv4"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -299,6 +331,13 @@ impl DirectConnectorManager {
|
||||
dst_peer_id.clone(),
|
||||
addr.to_string(),
|
||||
));
|
||||
} else {
|
||||
tracing::error!(
|
||||
?ip,
|
||||
?listener,
|
||||
?dst_peer_id,
|
||||
"failed to set host for interface ipv6"
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -313,6 +352,13 @@ impl DirectConnectorManager {
|
||||
dst_peer_id.clone(),
|
||||
addr.to_string(),
|
||||
));
|
||||
} else {
|
||||
tracing::error!(
|
||||
?public_ipv6,
|
||||
?listener,
|
||||
?dst_peer_id,
|
||||
"failed to set host for public ipv6"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -323,16 +369,28 @@ impl DirectConnectorManager {
|
||||
|
||||
let mut has_succ = false;
|
||||
while let Some(ret) = tasks.join_next().await {
|
||||
if let Err(e) = ret {
|
||||
tracing::error!("join direct connect task failed: {:?}", e);
|
||||
} else if let Ok(Ok(_)) = ret {
|
||||
has_succ = true;
|
||||
match ret {
|
||||
Ok(Ok(_)) => {
|
||||
has_succ = true;
|
||||
tracing::info!(
|
||||
?dst_peer_id,
|
||||
?listener,
|
||||
"try direct connect to peer success"
|
||||
);
|
||||
break;
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
tracing::info!(?e, "try direct connect to peer failed");
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(?e, "try direct connect to peer task join failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !has_succ {
|
||||
data.dst_sceme_blacklist.insert(
|
||||
DstSchemeBlackListItem(dst_peer_id.clone(), listener.scheme().to_string()),
|
||||
data.dst_listener_blacklist.insert(
|
||||
DstListenerUrlBlackListItem(dst_peer_id.clone(), listener.clone()),
|
||||
(),
|
||||
std::time::Duration::from_secs(DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC),
|
||||
);
|
||||
@@ -355,7 +413,7 @@ impl DirectConnectorManager {
|
||||
}
|
||||
}
|
||||
|
||||
tracing::trace!("try direct connect to peer: {}", dst_peer_id);
|
||||
tracing::debug!("try direct connect to peer: {}", dst_peer_id);
|
||||
|
||||
let rpc_stub = peer_manager
|
||||
.get_peer_rpc_mgr()
|
||||
@@ -367,7 +425,7 @@ impl DirectConnectorManager {
|
||||
);
|
||||
|
||||
let ip_list = rpc_stub
|
||||
.get_ip_list(BaseController {}, GetIpListRequest {})
|
||||
.get_ip_list(BaseController::default(), GetIpListRequest {})
|
||||
.await
|
||||
.with_context(|| format!("get ip list from peer {}", dst_peer_id))?;
|
||||
|
||||
@@ -384,7 +442,7 @@ mod tests {
|
||||
use crate::{
|
||||
connector::direct::{
|
||||
DirectConnectorManager, DirectConnectorManagerData, DstBlackListItem,
|
||||
DstSchemeBlackListItem,
|
||||
DstListenerUrlBlackListItem,
|
||||
},
|
||||
instance::listeners::ListenerManager,
|
||||
peers::tests::{
|
||||
@@ -461,8 +519,11 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
assert!(data
|
||||
.dst_sceme_blacklist
|
||||
.contains(&DstSchemeBlackListItem(1, "tcp".into())));
|
||||
.dst_listener_blacklist
|
||||
.contains(&DstListenerUrlBlackListItem(
|
||||
1,
|
||||
"tcp://127.0.0.1:10222".parse().unwrap()
|
||||
)));
|
||||
|
||||
assert!(data
|
||||
.dst_blacklist
|
||||
|
File diff suppressed because it is too large
Load Diff
399
easytier/src/connector/udp_hole_punch/both_easy_sym.rs
Normal file
399
easytier/src/connector/udp_hole_punch/both_easy_sym.rs
Normal file
@@ -0,0 +1,399 @@
|
||||
use std::{
|
||||
net::{IpAddr, SocketAddr, SocketAddrV4},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
common::{scoped_task::ScopedTask, stun::StunInfoCollectorTrait, PeerId},
|
||||
connector::udp_hole_punch::common::{
|
||||
try_connect_with_socket, UdpHolePunchListener, HOLE_PUNCH_PACKET_BODY_LEN,
|
||||
},
|
||||
peers::peer_manager::PeerManager,
|
||||
proto::{
|
||||
peer_rpc::{
|
||||
SendPunchPacketBothEasySymRequest, SendPunchPacketBothEasySymResponse,
|
||||
UdpHolePunchRpcClientFactory,
|
||||
},
|
||||
rpc_types::{self, controller::BaseController},
|
||||
},
|
||||
tunnel::{udp::new_hole_punch_packet, Tunnel},
|
||||
};
|
||||
|
||||
use super::common::{PunchHoleServerCommon, UdpNatType, UdpSocketArray};
|
||||
|
||||
const UDP_ARRAY_SIZE_FOR_BOTH_EASY_SYM: usize = 25;
|
||||
const DST_PORT_OFFSET: u16 = 20;
|
||||
const REMOTE_WAIT_TIME_MS: u64 = 5000;
|
||||
|
||||
pub(crate) struct PunchBothEasySymHoleServer {
|
||||
common: Arc<PunchHoleServerCommon>,
|
||||
task: Mutex<Option<ScopedTask<()>>>,
|
||||
}
|
||||
|
||||
impl PunchBothEasySymHoleServer {
|
||||
pub(crate) fn new(common: Arc<PunchHoleServerCommon>) -> Self {
|
||||
Self {
|
||||
common,
|
||||
task: Mutex::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
// hard sym means public port is random and cannot be predicted
|
||||
#[tracing::instrument(skip(self), ret, err)]
|
||||
pub(crate) async fn send_punch_packet_both_easy_sym(
|
||||
&self,
|
||||
request: SendPunchPacketBothEasySymRequest,
|
||||
) -> Result<SendPunchPacketBothEasySymResponse, rpc_types::error::Error> {
|
||||
tracing::info!("send_punch_packet_both_easy_sym start");
|
||||
let busy_resp = Ok(SendPunchPacketBothEasySymResponse {
|
||||
is_busy: true,
|
||||
..Default::default()
|
||||
});
|
||||
let Ok(mut locked_task) = self.task.try_lock() else {
|
||||
return busy_resp;
|
||||
};
|
||||
if locked_task.is_some() && !locked_task.as_ref().unwrap().is_finished() {
|
||||
return busy_resp;
|
||||
}
|
||||
|
||||
let global_ctx = self.common.get_global_ctx();
|
||||
let cur_mapped_addr = global_ctx
|
||||
.get_stun_info_collector()
|
||||
.get_udp_port_mapping(0)
|
||||
.await
|
||||
.with_context(|| "failed to get udp port mapping")?;
|
||||
|
||||
tracing::info!("send_punch_packet_hard_sym start");
|
||||
let socket_count = request.udp_socket_count as usize;
|
||||
let public_ips = request
|
||||
.public_ip
|
||||
.ok_or(anyhow::anyhow!("public_ip is required"))?;
|
||||
let transaction_id = request.transaction_id;
|
||||
|
||||
let udp_array =
|
||||
UdpSocketArray::new(socket_count, self.common.get_global_ctx().net_ns.clone());
|
||||
udp_array.start().await?;
|
||||
udp_array.add_intreast_tid(transaction_id);
|
||||
let peer_mgr = self.common.get_peer_mgr();
|
||||
|
||||
let punch_packet =
|
||||
new_hole_punch_packet(transaction_id, HOLE_PUNCH_PACKET_BODY_LEN).into_bytes();
|
||||
let mut punched = vec![];
|
||||
let common = self.common.clone();
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
let mut listeners = Vec::new();
|
||||
let start_time = Instant::now();
|
||||
let wait_time_ms = request.wait_time_ms.min(8000);
|
||||
while start_time.elapsed() < Duration::from_millis(wait_time_ms as u64) {
|
||||
if let Err(e) = udp_array
|
||||
.send_with_all(
|
||||
&punch_packet,
|
||||
SocketAddr::V4(SocketAddrV4::new(
|
||||
public_ips.into(),
|
||||
request.dst_port_num as u16,
|
||||
)),
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::error!(?e, "failed to send hole punch packet");
|
||||
break;
|
||||
}
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
if let Some(s) = udp_array.try_fetch_punched_socket(transaction_id) {
|
||||
tracing::info!(?s, ?transaction_id, "got punched socket in both easy sym");
|
||||
assert!(Arc::strong_count(&s.socket) == 1);
|
||||
let Some(port) = s.socket.local_addr().ok().map(|addr| addr.port()) else {
|
||||
tracing::warn!("failed to get local addr from punched socket");
|
||||
continue;
|
||||
};
|
||||
let remote_addr = s.remote_addr;
|
||||
drop(s);
|
||||
|
||||
let listener =
|
||||
match UdpHolePunchListener::new_ext(peer_mgr.clone(), false, Some(port))
|
||||
.await
|
||||
{
|
||||
Ok(l) => l,
|
||||
Err(e) => {
|
||||
tracing::warn!(?e, "failed to create listener");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
punched.push((listener.get_socket().await, remote_addr));
|
||||
listeners.push(listener);
|
||||
}
|
||||
|
||||
// if any listener is punched, we can break the loop
|
||||
for l in &listeners {
|
||||
if l.get_conn_count().await > 0 {
|
||||
tracing::info!(?l, "got punched listener");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !punched.is_empty() {
|
||||
tracing::debug!(?punched, "got punched socket and keep sending punch packet");
|
||||
}
|
||||
|
||||
for p in &punched {
|
||||
let (socket, remote_addr) = p;
|
||||
let send_remote_ret = socket.send_to(&punch_packet, remote_addr).await;
|
||||
tracing::debug!(
|
||||
?send_remote_ret,
|
||||
?socket,
|
||||
"send hole punch packet to punched remote"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
for l in listeners {
|
||||
if l.get_conn_count().await > 0 {
|
||||
common.add_listener(l).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
*locked_task = Some(task.into());
|
||||
return Ok(SendPunchPacketBothEasySymResponse {
|
||||
is_busy: false,
|
||||
base_mapped_addr: Some(cur_mapped_addr.into()),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct PunchBothEasySymHoleClient {
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
}
|
||||
|
||||
impl PunchBothEasySymHoleClient {
|
||||
pub(crate) fn new(peer_mgr: Arc<PeerManager>) -> Self {
|
||||
Self { peer_mgr }
|
||||
}
|
||||
|
||||
#[tracing::instrument(ret)]
|
||||
pub(crate) async fn do_hole_punching(
|
||||
&self,
|
||||
dst_peer_id: PeerId,
|
||||
my_nat_info: UdpNatType,
|
||||
peer_nat_info: UdpNatType,
|
||||
is_busy: &mut bool,
|
||||
) -> Result<Option<Box<dyn Tunnel>>, anyhow::Error> {
|
||||
*is_busy = false;
|
||||
|
||||
let udp_array = UdpSocketArray::new(
|
||||
UDP_ARRAY_SIZE_FOR_BOTH_EASY_SYM,
|
||||
self.peer_mgr.get_global_ctx().net_ns.clone(),
|
||||
);
|
||||
udp_array.start().await?;
|
||||
|
||||
let global_ctx = self.peer_mgr.get_global_ctx();
|
||||
let cur_mapped_addr = global_ctx
|
||||
.get_stun_info_collector()
|
||||
.get_udp_port_mapping(0)
|
||||
.await
|
||||
.with_context(|| "failed to get udp port mapping")?;
|
||||
let my_public_ip = match cur_mapped_addr.ip() {
|
||||
IpAddr::V4(v4) => v4,
|
||||
_ => {
|
||||
anyhow::bail!("ipv6 is not supported");
|
||||
}
|
||||
};
|
||||
let me_is_incremental = my_nat_info
|
||||
.get_inc_of_easy_sym()
|
||||
.ok_or(anyhow::anyhow!("me_is_incremental is required"))?;
|
||||
let peer_is_incremental = peer_nat_info
|
||||
.get_inc_of_easy_sym()
|
||||
.ok_or(anyhow::anyhow!("peer_is_incremental is required"))?;
|
||||
|
||||
let rpc_stub = self
|
||||
.peer_mgr
|
||||
.get_peer_rpc_mgr()
|
||||
.rpc_client()
|
||||
.scoped_client::<UdpHolePunchRpcClientFactory<BaseController>>(
|
||||
self.peer_mgr.my_peer_id(),
|
||||
dst_peer_id,
|
||||
global_ctx.get_network_name(),
|
||||
);
|
||||
|
||||
let tid = rand::random();
|
||||
udp_array.add_intreast_tid(tid);
|
||||
|
||||
let remote_ret = rpc_stub
|
||||
.send_punch_packet_both_easy_sym(
|
||||
BaseController {
|
||||
timeout_ms: 2000,
|
||||
..Default::default()
|
||||
},
|
||||
SendPunchPacketBothEasySymRequest {
|
||||
transaction_id: tid,
|
||||
public_ip: Some(my_public_ip.into()),
|
||||
dst_port_num: if me_is_incremental {
|
||||
cur_mapped_addr.port().saturating_add(DST_PORT_OFFSET)
|
||||
} else {
|
||||
cur_mapped_addr.port().saturating_sub(DST_PORT_OFFSET)
|
||||
} as u32,
|
||||
udp_socket_count: UDP_ARRAY_SIZE_FOR_BOTH_EASY_SYM as u32,
|
||||
wait_time_ms: REMOTE_WAIT_TIME_MS as u32,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
if remote_ret.is_busy {
|
||||
*is_busy = true;
|
||||
anyhow::bail!("remote is busy");
|
||||
}
|
||||
|
||||
let mut remote_mapped_addr = remote_ret
|
||||
.base_mapped_addr
|
||||
.ok_or(anyhow::anyhow!("remote_mapped_addr is required"))?;
|
||||
|
||||
let now = Instant::now();
|
||||
remote_mapped_addr.port = if peer_is_incremental {
|
||||
remote_mapped_addr
|
||||
.port
|
||||
.saturating_add(DST_PORT_OFFSET as u32)
|
||||
} else {
|
||||
remote_mapped_addr
|
||||
.port
|
||||
.saturating_sub(DST_PORT_OFFSET as u32)
|
||||
};
|
||||
tracing::debug!(
|
||||
?remote_mapped_addr,
|
||||
?remote_ret,
|
||||
"start send hole punch packet for both easy sym"
|
||||
);
|
||||
|
||||
while now.elapsed().as_millis() < (REMOTE_WAIT_TIME_MS + 1000).into() {
|
||||
udp_array
|
||||
.send_with_all(
|
||||
&new_hole_punch_packet(tid, HOLE_PUNCH_PACKET_BODY_LEN).into_bytes(),
|
||||
remote_mapped_addr.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let Some(socket) = udp_array.try_fetch_punched_socket(tid) else {
|
||||
tracing::trace!(
|
||||
?remote_mapped_addr,
|
||||
?tid,
|
||||
"no punched socket found, send some more hole punch packets"
|
||||
);
|
||||
continue;
|
||||
};
|
||||
|
||||
tracing::info!(
|
||||
?socket,
|
||||
?remote_mapped_addr,
|
||||
?tid,
|
||||
"got punched socket in both easy sym"
|
||||
);
|
||||
|
||||
for _ in 0..2 {
|
||||
match try_connect_with_socket(socket.socket.clone(), remote_mapped_addr.into())
|
||||
.await
|
||||
{
|
||||
Ok(tunnel) => {
|
||||
return Ok(Some(tunnel));
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(?e, "failed to connect with socket");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
udp_array.add_new_socket(socket.socket).await?;
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use std::{
|
||||
sync::{atomic::AtomicU32, Arc},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use tokio::net::UdpSocket;
|
||||
|
||||
use crate::connector::udp_hole_punch::RUN_TESTING;
|
||||
use crate::{
|
||||
connector::udp_hole_punch::{
|
||||
tests::create_mock_peer_manager_with_mock_stun, UdpHolePunchConnector,
|
||||
},
|
||||
peers::tests::{connect_peer_manager, wait_route_appear},
|
||||
proto::common::NatType,
|
||||
tunnel::common::tests::wait_for_condition,
|
||||
};
|
||||
|
||||
#[rstest::rstest]
|
||||
#[tokio::test]
|
||||
#[serial_test::serial(hole_punch)]
|
||||
async fn hole_punching_easy_sym(#[values("true", "false")] is_inc: bool) {
|
||||
RUN_TESTING.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
let p_a = create_mock_peer_manager_with_mock_stun(if is_inc {
|
||||
NatType::SymmetricEasyInc
|
||||
} else {
|
||||
NatType::SymmetricEasyDec
|
||||
})
|
||||
.await;
|
||||
let p_b = create_mock_peer_manager_with_mock_stun(NatType::PortRestricted).await;
|
||||
let p_c = create_mock_peer_manager_with_mock_stun(if !is_inc {
|
||||
NatType::SymmetricEasyInc
|
||||
} else {
|
||||
NatType::SymmetricEasyDec
|
||||
})
|
||||
.await;
|
||||
connect_peer_manager(p_a.clone(), p_b.clone()).await;
|
||||
connect_peer_manager(p_b.clone(), p_c.clone()).await;
|
||||
wait_route_appear(p_a.clone(), p_c.clone()).await.unwrap();
|
||||
|
||||
let mut hole_punching_a = UdpHolePunchConnector::new(p_a.clone());
|
||||
let mut hole_punching_c = UdpHolePunchConnector::new(p_c.clone());
|
||||
|
||||
hole_punching_a.run().await.unwrap();
|
||||
hole_punching_c.run().await.unwrap();
|
||||
|
||||
// 144 + DST_PORT_OFFSET = 164
|
||||
let udp1 = Arc::new(UdpSocket::bind("0.0.0.0:40164").await.unwrap());
|
||||
// 144 - DST_PORT_OFFSET = 124
|
||||
let udp2 = Arc::new(UdpSocket::bind("0.0.0.0:40124").await.unwrap());
|
||||
let udps = vec![udp1, udp2];
|
||||
|
||||
let counter = Arc::new(AtomicU32::new(0));
|
||||
|
||||
// all these sockets should receive hole punching packet
|
||||
for udp in udps.iter().map(Arc::clone) {
|
||||
let counter = counter.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut buf = [0u8; 1024];
|
||||
let (len, addr) = udp.recv_from(&mut buf).await.unwrap();
|
||||
println!(
|
||||
"got predictable punch packet, {:?} {:?} {:?}",
|
||||
len,
|
||||
addr,
|
||||
udp.local_addr()
|
||||
);
|
||||
counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
|
||||
hole_punching_a.client.run_immediately().await;
|
||||
let udp_len = udps.len();
|
||||
wait_for_condition(
|
||||
|| async { counter.load(std::sync::atomic::Ordering::Relaxed) == udp_len as u32 },
|
||||
Duration::from_secs(30),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
589
easytier/src/connector/udp_hole_punch/common.rs
Normal file
589
easytier/src/connector/udp_hole_punch/common.rs
Normal file
@@ -0,0 +1,589 @@
|
||||
use std::{
|
||||
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
use dashmap::{DashMap, DashSet};
|
||||
use rand::seq::SliceRandom as _;
|
||||
use tokio::{net::UdpSocket, sync::Mutex, task::JoinSet};
|
||||
use tracing::{instrument, Instrument, Level};
|
||||
use zerocopy::FromBytes as _;
|
||||
|
||||
use crate::{
|
||||
common::{
|
||||
error::Error, global_ctx::ArcGlobalCtx, join_joinset_background, netns::NetNS,
|
||||
stun::StunInfoCollectorTrait as _, PeerId,
|
||||
},
|
||||
defer,
|
||||
peers::peer_manager::PeerManager,
|
||||
proto::common::NatType,
|
||||
tunnel::{
|
||||
packet_def::{UDPTunnelHeader, UdpPacketType, UDP_TUNNEL_HEADER_SIZE},
|
||||
udp::{new_hole_punch_packet, UdpTunnelConnector, UdpTunnelListener},
|
||||
Tunnel, TunnelConnCounter, TunnelListener as _,
|
||||
},
|
||||
};
|
||||
|
||||
pub(crate) const HOLE_PUNCH_PACKET_BODY_LEN: u16 = 16;
|
||||
|
||||
fn generate_shuffled_port_vec() -> Vec<u16> {
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut port_vec: Vec<u16> = (1..=65535).collect();
|
||||
port_vec.shuffle(&mut rng);
|
||||
port_vec
|
||||
}
|
||||
|
||||
pub(crate) enum UdpPunchClientMethod {
|
||||
None,
|
||||
ConeToCone,
|
||||
SymToCone,
|
||||
EasySymToEasySym,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub(crate) enum UdpNatType {
|
||||
Unknown,
|
||||
Open(NatType),
|
||||
Cone(NatType),
|
||||
// bool means if it is incremental
|
||||
EasySymmetric(NatType, bool),
|
||||
HardSymmetric(NatType),
|
||||
}
|
||||
|
||||
impl From<NatType> for UdpNatType {
|
||||
fn from(nat_type: NatType) -> Self {
|
||||
match nat_type {
|
||||
NatType::Unknown => UdpNatType::Unknown,
|
||||
NatType::NoPat | NatType::OpenInternet => UdpNatType::Open(nat_type),
|
||||
NatType::FullCone | NatType::Restricted | NatType::PortRestricted => {
|
||||
UdpNatType::Cone(nat_type)
|
||||
}
|
||||
NatType::Symmetric | NatType::SymUdpFirewall => UdpNatType::HardSymmetric(nat_type),
|
||||
NatType::SymmetricEasyInc => UdpNatType::EasySymmetric(nat_type, true),
|
||||
NatType::SymmetricEasyDec => UdpNatType::EasySymmetric(nat_type, false),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<NatType> for UdpNatType {
|
||||
fn into(self) -> NatType {
|
||||
match self {
|
||||
UdpNatType::Unknown => NatType::Unknown,
|
||||
UdpNatType::Open(nat_type) => nat_type,
|
||||
UdpNatType::Cone(nat_type) => nat_type,
|
||||
UdpNatType::EasySymmetric(nat_type, _) => nat_type,
|
||||
UdpNatType::HardSymmetric(nat_type) => nat_type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UdpNatType {
|
||||
pub(crate) fn is_open(&self) -> bool {
|
||||
matches!(self, UdpNatType::Open(_))
|
||||
}
|
||||
|
||||
pub(crate) fn is_unknown(&self) -> bool {
|
||||
matches!(self, UdpNatType::Unknown)
|
||||
}
|
||||
|
||||
pub(crate) fn is_sym(&self) -> bool {
|
||||
self.is_hard_sym() || self.is_easy_sym()
|
||||
}
|
||||
|
||||
pub(crate) fn is_hard_sym(&self) -> bool {
|
||||
matches!(self, UdpNatType::HardSymmetric(_))
|
||||
}
|
||||
|
||||
pub(crate) fn is_easy_sym(&self) -> bool {
|
||||
matches!(self, UdpNatType::EasySymmetric(_, _))
|
||||
}
|
||||
|
||||
pub(crate) fn is_cone(&self) -> bool {
|
||||
matches!(self, UdpNatType::Cone(_))
|
||||
}
|
||||
|
||||
pub(crate) fn get_inc_of_easy_sym(&self) -> Option<bool> {
|
||||
match self {
|
||||
UdpNatType::EasySymmetric(_, inc) => Some(*inc),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_punch_hole_method(&self, other: Self) -> UdpPunchClientMethod {
|
||||
if other.is_unknown() {
|
||||
if self.is_sym() {
|
||||
return UdpPunchClientMethod::SymToCone;
|
||||
} else {
|
||||
return UdpPunchClientMethod::ConeToCone;
|
||||
}
|
||||
}
|
||||
|
||||
if self.is_unknown() {
|
||||
if other.is_sym() {
|
||||
return UdpPunchClientMethod::None;
|
||||
} else {
|
||||
return UdpPunchClientMethod::ConeToCone;
|
||||
}
|
||||
}
|
||||
|
||||
if self.is_open() || other.is_open() {
|
||||
// open nat does not need to punch hole
|
||||
return UdpPunchClientMethod::None;
|
||||
}
|
||||
|
||||
if self.is_cone() {
|
||||
if other.is_sym() {
|
||||
return UdpPunchClientMethod::None;
|
||||
} else {
|
||||
return UdpPunchClientMethod::ConeToCone;
|
||||
}
|
||||
} else if self.is_easy_sym() {
|
||||
if other.is_hard_sym() {
|
||||
return UdpPunchClientMethod::None;
|
||||
} else if other.is_easy_sym() {
|
||||
return UdpPunchClientMethod::EasySymToEasySym;
|
||||
} else {
|
||||
return UdpPunchClientMethod::SymToCone;
|
||||
}
|
||||
} else if self.is_hard_sym() {
|
||||
if other.is_sym() {
|
||||
return UdpPunchClientMethod::None;
|
||||
} else {
|
||||
return UdpPunchClientMethod::SymToCone;
|
||||
}
|
||||
}
|
||||
|
||||
unreachable!("invalid nat type");
|
||||
}
|
||||
|
||||
pub(crate) fn can_punch_hole_as_client(
|
||||
&self,
|
||||
other: Self,
|
||||
my_peer_id: PeerId,
|
||||
dst_peer_id: PeerId,
|
||||
) -> bool {
|
||||
match self.get_punch_hole_method(other) {
|
||||
UdpPunchClientMethod::None => false,
|
||||
UdpPunchClientMethod::ConeToCone | UdpPunchClientMethod::SymToCone => true,
|
||||
UdpPunchClientMethod::EasySymToEasySym => my_peer_id < dst_peer_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct PunchedUdpSocket {
|
||||
pub(crate) socket: Arc<UdpSocket>,
|
||||
pub(crate) tid: u32,
|
||||
pub(crate) remote_addr: SocketAddr,
|
||||
}
|
||||
|
||||
// used for symmetric hole punching, binding to multiple ports to increase the chance of success
|
||||
pub(crate) struct UdpSocketArray {
|
||||
sockets: Arc<DashMap<SocketAddr, Arc<UdpSocket>>>,
|
||||
max_socket_count: usize,
|
||||
net_ns: NetNS,
|
||||
tasks: Arc<std::sync::Mutex<JoinSet<()>>>,
|
||||
|
||||
intreast_tids: Arc<DashSet<u32>>,
|
||||
tid_to_socket: Arc<DashMap<u32, Vec<PunchedUdpSocket>>>,
|
||||
}
|
||||
|
||||
impl UdpSocketArray {
|
||||
pub fn new(max_socket_count: usize, net_ns: NetNS) -> Self {
|
||||
let tasks = Arc::new(std::sync::Mutex::new(JoinSet::new()));
|
||||
join_joinset_background(tasks.clone(), "UdpSocketArray".to_owned());
|
||||
|
||||
Self {
|
||||
sockets: Arc::new(DashMap::new()),
|
||||
max_socket_count,
|
||||
net_ns,
|
||||
tasks,
|
||||
|
||||
intreast_tids: Arc::new(DashSet::new()),
|
||||
tid_to_socket: Arc::new(DashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn started(&self) -> bool {
|
||||
!self.sockets.is_empty()
|
||||
}
|
||||
|
||||
pub async fn add_new_socket(&self, socket: Arc<UdpSocket>) -> Result<(), anyhow::Error> {
|
||||
let socket_map = self.sockets.clone();
|
||||
let local_addr = socket.local_addr()?;
|
||||
let intreast_tids = self.intreast_tids.clone();
|
||||
let tid_to_socket = self.tid_to_socket.clone();
|
||||
socket_map.insert(local_addr, socket.clone());
|
||||
self.tasks.lock().unwrap().spawn(
|
||||
async move {
|
||||
defer!(socket_map.remove(&local_addr););
|
||||
let mut buf = [0u8; UDP_TUNNEL_HEADER_SIZE + HOLE_PUNCH_PACKET_BODY_LEN as usize];
|
||||
tracing::trace!(?local_addr, "udp socket added");
|
||||
loop {
|
||||
let Ok((len, addr)) = socket.recv_from(&mut buf).await else {
|
||||
break;
|
||||
};
|
||||
|
||||
tracing::debug!(?len, ?addr, "got raw packet");
|
||||
|
||||
if len != UDP_TUNNEL_HEADER_SIZE + HOLE_PUNCH_PACKET_BODY_LEN as usize {
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(p) = UDPTunnelHeader::ref_from_prefix(&buf) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let tid = p.conn_id.get();
|
||||
let valid = p.msg_type == UdpPacketType::HolePunch as u8
|
||||
&& p.len.get() == HOLE_PUNCH_PACKET_BODY_LEN;
|
||||
tracing::debug!(?p, ?addr, ?tid, ?valid, ?p, "got udp hole punch packet");
|
||||
|
||||
if !valid {
|
||||
continue;
|
||||
}
|
||||
|
||||
if intreast_tids.contains(&tid) {
|
||||
tracing::info!(?addr, ?tid, "got hole punching packet with intreast tid");
|
||||
tid_to_socket
|
||||
.entry(tid)
|
||||
.or_insert_with(Vec::new)
|
||||
.push(PunchedUdpSocket {
|
||||
socket: socket.clone(),
|
||||
tid,
|
||||
remote_addr: addr,
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
tracing::debug!(?local_addr, "udp socket recv loop end");
|
||||
}
|
||||
.instrument(tracing::info_span!("udp array socket recv loop")),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(err)]
|
||||
pub async fn start(&self) -> Result<(), anyhow::Error> {
|
||||
tracing::info!("starting udp socket array");
|
||||
|
||||
while self.sockets.len() < self.max_socket_count {
|
||||
let socket = {
|
||||
let _g = self.net_ns.guard();
|
||||
Arc::new(UdpSocket::bind("0.0.0.0:0").await?)
|
||||
};
|
||||
|
||||
self.add_new_socket(socket).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(err)]
|
||||
pub async fn send_with_all(&self, data: &[u8], addr: SocketAddr) -> Result<(), anyhow::Error> {
|
||||
tracing::info!(?addr, "sending hole punching packet");
|
||||
|
||||
for socket in self.sockets.iter() {
|
||||
let socket = socket.value();
|
||||
socket.send_to(data, addr).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(ret(level = Level::DEBUG))]
|
||||
pub fn try_fetch_punched_socket(&self, tid: u32) -> Option<PunchedUdpSocket> {
|
||||
tracing::debug!(?tid, "try fetch punched socket");
|
||||
self.tid_to_socket.get_mut(&tid)?.value_mut().pop()
|
||||
}
|
||||
|
||||
pub fn add_intreast_tid(&self, tid: u32) {
|
||||
self.intreast_tids.insert(tid);
|
||||
}
|
||||
|
||||
pub fn remove_intreast_tid(&self, tid: u32) {
|
||||
self.intreast_tids.remove(&tid);
|
||||
self.tid_to_socket.remove(&tid);
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for UdpSocketArray {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("UdpSocketArray")
|
||||
.field("sockets", &self.sockets.len())
|
||||
.field("max_socket_count", &self.max_socket_count)
|
||||
.field("started", &self.started())
|
||||
.field("intreast_tids", &self.intreast_tids.len())
|
||||
.field("tid_to_socket", &self.tid_to_socket.len())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct UdpHolePunchListener {
|
||||
socket: Arc<UdpSocket>,
|
||||
tasks: JoinSet<()>,
|
||||
running: Arc<AtomicCell<bool>>,
|
||||
mapped_addr: SocketAddr,
|
||||
conn_counter: Arc<Box<dyn TunnelConnCounter>>,
|
||||
|
||||
listen_time: std::time::Instant,
|
||||
last_select_time: AtomicCell<std::time::Instant>,
|
||||
last_active_time: Arc<AtomicCell<std::time::Instant>>,
|
||||
}
|
||||
|
||||
impl UdpHolePunchListener {
|
||||
async fn get_avail_port() -> Result<u16, Error> {
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").await?;
|
||||
Ok(socket.local_addr()?.port())
|
||||
}
|
||||
|
||||
#[instrument(err)]
|
||||
pub async fn new(peer_mgr: Arc<PeerManager>) -> Result<Self, Error> {
|
||||
Self::new_ext(peer_mgr, true, None).await
|
||||
}
|
||||
|
||||
#[instrument(err)]
|
||||
pub async fn new_ext(
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
with_mapped_addr: bool,
|
||||
port: Option<u16>,
|
||||
) -> Result<Self, Error> {
|
||||
let port = port.unwrap_or(Self::get_avail_port().await?);
|
||||
let listen_url = format!("udp://0.0.0.0:{}", port);
|
||||
|
||||
let mapped_addr = if with_mapped_addr {
|
||||
let gctx = peer_mgr.get_global_ctx();
|
||||
let stun_info_collect = gctx.get_stun_info_collector();
|
||||
stun_info_collect.get_udp_port_mapping(port).await?
|
||||
} else {
|
||||
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), port))
|
||||
};
|
||||
|
||||
let mut listener = UdpTunnelListener::new(listen_url.parse().unwrap());
|
||||
|
||||
{
|
||||
let _g = peer_mgr.get_global_ctx().net_ns.guard();
|
||||
listener.listen().await?;
|
||||
}
|
||||
let socket = listener.get_socket().unwrap();
|
||||
|
||||
let running = Arc::new(AtomicCell::new(true));
|
||||
let running_clone = running.clone();
|
||||
|
||||
let conn_counter = listener.get_conn_counter();
|
||||
let mut tasks = JoinSet::new();
|
||||
|
||||
tasks.spawn(async move {
|
||||
while let Ok(conn) = listener.accept().await {
|
||||
tracing::warn!(?conn, "udp hole punching listener got peer connection");
|
||||
let peer_mgr = peer_mgr.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = peer_mgr.add_tunnel_as_server(conn).await {
|
||||
tracing::error!(
|
||||
?e,
|
||||
"failed to add tunnel as server in hole punch listener"
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
running_clone.store(false);
|
||||
});
|
||||
|
||||
let last_active_time = Arc::new(AtomicCell::new(std::time::Instant::now()));
|
||||
let conn_counter_clone = conn_counter.clone();
|
||||
let last_active_time_clone = last_active_time.clone();
|
||||
tasks.spawn(async move {
|
||||
loop {
|
||||
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
|
||||
if conn_counter_clone.get().unwrap_or(0) != 0 {
|
||||
last_active_time_clone.store(std::time::Instant::now());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
tracing::warn!(?mapped_addr, ?socket, "udp hole punching listener started");
|
||||
|
||||
Ok(Self {
|
||||
tasks,
|
||||
socket,
|
||||
running,
|
||||
mapped_addr,
|
||||
conn_counter,
|
||||
|
||||
listen_time: std::time::Instant::now(),
|
||||
last_select_time: AtomicCell::new(std::time::Instant::now()),
|
||||
last_active_time,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_socket(&self) -> Arc<UdpSocket> {
|
||||
self.last_select_time.store(std::time::Instant::now());
|
||||
self.socket.clone()
|
||||
}
|
||||
|
||||
pub async fn get_conn_count(&self) -> usize {
|
||||
self.conn_counter.get().unwrap_or(0) as usize
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct PunchHoleServerCommon {
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
|
||||
listeners: Arc<Mutex<Vec<UdpHolePunchListener>>>,
|
||||
tasks: Arc<std::sync::Mutex<JoinSet<()>>>,
|
||||
}
|
||||
|
||||
impl PunchHoleServerCommon {
|
||||
pub(crate) fn new(peer_mgr: Arc<PeerManager>) -> Self {
|
||||
let tasks = Arc::new(std::sync::Mutex::new(JoinSet::new()));
|
||||
join_joinset_background(tasks.clone(), "PunchHoleServerCommon".to_owned());
|
||||
|
||||
let listeners = Arc::new(Mutex::new(Vec::<UdpHolePunchListener>::new()));
|
||||
|
||||
let l = listeners.clone();
|
||||
tasks.lock().unwrap().spawn(async move {
|
||||
loop {
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
{
|
||||
// remove listener that is not active for 40 seconds but keep listeners that are selected less than 30 seconds
|
||||
l.lock().await.retain(|listener| {
|
||||
listener.last_active_time.load().elapsed().as_secs() < 40
|
||||
|| listener.last_select_time.load().elapsed().as_secs() < 30
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Self {
|
||||
peer_mgr,
|
||||
|
||||
listeners,
|
||||
tasks,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn add_listener(&self, listener: UdpHolePunchListener) {
|
||||
self.listeners.lock().await.push(listener);
|
||||
}
|
||||
|
||||
pub(crate) async fn find_listener(&self, addr: &SocketAddr) -> Option<Arc<UdpSocket>> {
|
||||
let all_listener_sockets = self.listeners.lock().await;
|
||||
|
||||
let listener = all_listener_sockets
|
||||
.iter()
|
||||
.find(|listener| listener.mapped_addr == *addr && listener.running.load())?;
|
||||
|
||||
Some(listener.get_socket().await)
|
||||
}
|
||||
|
||||
pub(crate) async fn my_udp_nat_type(&self) -> i32 {
|
||||
self.peer_mgr
|
||||
.get_global_ctx()
|
||||
.get_stun_info_collector()
|
||||
.get_stun_info()
|
||||
.udp_nat_type
|
||||
}
|
||||
|
||||
pub(crate) async fn select_listener(
|
||||
&self,
|
||||
use_new_listener: bool,
|
||||
) -> Option<(Arc<UdpSocket>, SocketAddr)> {
|
||||
let all_listener_sockets = &self.listeners;
|
||||
|
||||
let mut use_last = false;
|
||||
if all_listener_sockets.lock().await.len() < 16 || use_new_listener {
|
||||
tracing::warn!("creating new udp hole punching listener");
|
||||
all_listener_sockets.lock().await.push(
|
||||
UdpHolePunchListener::new(self.peer_mgr.clone())
|
||||
.await
|
||||
.ok()?,
|
||||
);
|
||||
use_last = true;
|
||||
}
|
||||
|
||||
let mut locked = all_listener_sockets.lock().await;
|
||||
|
||||
let listener = if use_last {
|
||||
locked.last_mut()?
|
||||
} else {
|
||||
// use the listener that is active most recently
|
||||
locked
|
||||
.iter_mut()
|
||||
.max_by_key(|listener| listener.last_active_time.load())?
|
||||
};
|
||||
|
||||
if listener.mapped_addr.ip().is_unspecified() {
|
||||
tracing::info!("listener mapped addr is unspecified, trying to get mapped addr");
|
||||
listener.mapped_addr = self
|
||||
.get_global_ctx()
|
||||
.get_stun_info_collector()
|
||||
.get_udp_port_mapping(listener.mapped_addr.port())
|
||||
.await
|
||||
.ok()?;
|
||||
}
|
||||
|
||||
Some((listener.get_socket().await, listener.mapped_addr))
|
||||
}
|
||||
|
||||
pub(crate) fn get_joinset(&self) -> Arc<std::sync::Mutex<JoinSet<()>>> {
|
||||
self.tasks.clone()
|
||||
}
|
||||
|
||||
pub(crate) fn get_global_ctx(&self) -> ArcGlobalCtx {
|
||||
self.peer_mgr.get_global_ctx()
|
||||
}
|
||||
|
||||
pub(crate) fn get_peer_mgr(&self) -> Arc<PeerManager> {
|
||||
self.peer_mgr.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(err, ret(level=Level::DEBUG), skip(ports))]
|
||||
pub(crate) async fn send_symmetric_hole_punch_packet(
|
||||
ports: &Vec<u16>,
|
||||
udp: Arc<UdpSocket>,
|
||||
transaction_id: u32,
|
||||
public_ips: &Vec<Ipv4Addr>,
|
||||
port_start_idx: usize,
|
||||
max_packets: usize,
|
||||
) -> Result<usize, Error> {
|
||||
tracing::debug!("sending hard symmetric hole punching packet");
|
||||
let mut sent_packets = 0;
|
||||
let mut cur_port_idx = port_start_idx;
|
||||
while sent_packets < max_packets {
|
||||
let port = ports[cur_port_idx % ports.len()];
|
||||
for pub_ip in public_ips {
|
||||
let addr = SocketAddr::V4(SocketAddrV4::new(*pub_ip, port));
|
||||
let packet = new_hole_punch_packet(transaction_id, HOLE_PUNCH_PACKET_BODY_LEN);
|
||||
udp.send_to(&packet.into_bytes(), addr).await?;
|
||||
sent_packets += 1;
|
||||
}
|
||||
cur_port_idx = cur_port_idx.wrapping_add(1);
|
||||
tokio::time::sleep(Duration::from_millis(3)).await;
|
||||
}
|
||||
Ok(cur_port_idx % ports.len())
|
||||
}
|
||||
|
||||
pub(crate) async fn try_connect_with_socket(
|
||||
socket: Arc<UdpSocket>,
|
||||
remote_mapped_addr: SocketAddr,
|
||||
) -> Result<Box<dyn Tunnel>, Error> {
|
||||
let connector = UdpTunnelConnector::new(
|
||||
format!(
|
||||
"udp://{}:{}",
|
||||
remote_mapped_addr.ip(),
|
||||
remote_mapped_addr.port()
|
||||
)
|
||||
.to_string()
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
connector
|
||||
.try_connect_with_socket(socket, remote_mapped_addr)
|
||||
.await
|
||||
.map_err(|e| Error::from(e))
|
||||
}
|
264
easytier/src/connector/udp_hole_punch/cone.rs
Normal file
264
easytier/src/connector/udp_hole_punch/cone.rs
Normal file
@@ -0,0 +1,264 @@
|
||||
use std::{
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use tokio::net::UdpSocket;
|
||||
|
||||
use crate::{
|
||||
common::{scoped_task::ScopedTask, stun::StunInfoCollectorTrait, PeerId},
|
||||
connector::udp_hole_punch::common::{
|
||||
try_connect_with_socket, UdpSocketArray, HOLE_PUNCH_PACKET_BODY_LEN,
|
||||
},
|
||||
peers::peer_manager::PeerManager,
|
||||
proto::{
|
||||
common::Void,
|
||||
peer_rpc::{
|
||||
SelectPunchListenerRequest, SendPunchPacketConeRequest, UdpHolePunchRpcClientFactory,
|
||||
},
|
||||
rpc_types::{self, controller::BaseController},
|
||||
},
|
||||
tunnel::{udp::new_hole_punch_packet, Tunnel},
|
||||
};
|
||||
|
||||
use super::common::PunchHoleServerCommon;
|
||||
|
||||
pub(crate) struct PunchConeHoleServer {
|
||||
common: Arc<PunchHoleServerCommon>,
|
||||
}
|
||||
|
||||
impl PunchConeHoleServer {
|
||||
pub(crate) fn new(common: Arc<PunchHoleServerCommon>) -> Self {
|
||||
Self { common }
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), ret, err)]
|
||||
pub(crate) async fn send_punch_packet_cone(
|
||||
&self,
|
||||
_: BaseController,
|
||||
request: SendPunchPacketConeRequest,
|
||||
) -> Result<Void, rpc_types::error::Error> {
|
||||
let listener_addr = request.listener_mapped_addr.ok_or(anyhow::anyhow!(
|
||||
"send_punch_packet_for_cone request missing listener_mapped_addr"
|
||||
))?;
|
||||
let listener_addr = std::net::SocketAddr::from(listener_addr);
|
||||
let listener = self
|
||||
.common
|
||||
.find_listener(&listener_addr)
|
||||
.await
|
||||
.ok_or(anyhow::anyhow!(
|
||||
"send_punch_packet_for_cone failed to find listener"
|
||||
))?;
|
||||
|
||||
let dest_addr = request.dest_addr.ok_or(anyhow::anyhow!(
|
||||
"send_punch_packet_for_cone request missing dest_addr"
|
||||
))?;
|
||||
let dest_addr = std::net::SocketAddr::from(dest_addr);
|
||||
let dest_ip = dest_addr.ip();
|
||||
if dest_ip.is_unspecified() || dest_ip.is_multicast() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"send_punch_packet_for_cone dest_ip is malformed, {:?}",
|
||||
request
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
for _ in 0..request.packet_batch_count {
|
||||
tracing::info!(?request, "sending hole punching packet");
|
||||
|
||||
for _ in 0..request.packet_count_per_batch {
|
||||
let udp_packet =
|
||||
new_hole_punch_packet(request.transaction_id, HOLE_PUNCH_PACKET_BODY_LEN);
|
||||
if let Err(e) = listener.send_to(&udp_packet.into_bytes(), &dest_addr).await {
|
||||
tracing::error!(?e, "failed to send hole punch packet to dest addr");
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(request.packet_interval_ms as u64)).await;
|
||||
}
|
||||
|
||||
Ok(Void::default())
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct PunchConeHoleClient {
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
}
|
||||
|
||||
impl PunchConeHoleClient {
|
||||
pub(crate) fn new(peer_mgr: Arc<PeerManager>) -> Self {
|
||||
Self { peer_mgr }
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub(crate) async fn do_hole_punching(
|
||||
&self,
|
||||
dst_peer_id: PeerId,
|
||||
) -> Result<Option<Box<dyn Tunnel>>, anyhow::Error> {
|
||||
tracing::info!(?dst_peer_id, "start hole punching");
|
||||
let tid = rand::random();
|
||||
|
||||
let global_ctx = self.peer_mgr.get_global_ctx();
|
||||
let udp_array = UdpSocketArray::new(1, global_ctx.net_ns.clone());
|
||||
let local_socket = {
|
||||
let _g = self.peer_mgr.get_global_ctx().net_ns.guard();
|
||||
Arc::new(UdpSocket::bind("0.0.0.0:0").await?)
|
||||
};
|
||||
|
||||
let local_addr = local_socket
|
||||
.local_addr()
|
||||
.with_context(|| anyhow::anyhow!("failed to get local port from udp array"))?;
|
||||
let local_port = local_addr.port();
|
||||
|
||||
drop(local_socket);
|
||||
let local_mapped_addr = global_ctx
|
||||
.get_stun_info_collector()
|
||||
.get_udp_port_mapping(local_port)
|
||||
.await
|
||||
.with_context(|| "failed to get udp port mapping")?;
|
||||
|
||||
let local_socket = {
|
||||
let _g = self.peer_mgr.get_global_ctx().net_ns.guard();
|
||||
Arc::new(UdpSocket::bind(local_addr).await?)
|
||||
};
|
||||
|
||||
// client -> server: tell server the mapped port, server will return the mapped address of listening port.
|
||||
let rpc_stub = self
|
||||
.peer_mgr
|
||||
.get_peer_rpc_mgr()
|
||||
.rpc_client()
|
||||
.scoped_client::<UdpHolePunchRpcClientFactory<BaseController>>(
|
||||
self.peer_mgr.my_peer_id(),
|
||||
dst_peer_id,
|
||||
global_ctx.get_network_name(),
|
||||
);
|
||||
|
||||
let resp = rpc_stub
|
||||
.select_punch_listener(
|
||||
BaseController::default(),
|
||||
SelectPunchListenerRequest { force_new: false },
|
||||
)
|
||||
.await
|
||||
.with_context(|| "failed to select punch listener")?;
|
||||
let remote_mapped_addr = resp.listener_mapped_addr.ok_or(anyhow::anyhow!(
|
||||
"select_punch_listener response missing listener_mapped_addr"
|
||||
))?;
|
||||
|
||||
tracing::debug!(
|
||||
?local_mapped_addr,
|
||||
?remote_mapped_addr,
|
||||
"hole punch got remote listener"
|
||||
);
|
||||
|
||||
udp_array.add_new_socket(local_socket).await?;
|
||||
udp_array.add_intreast_tid(tid);
|
||||
let send_from_local = || async {
|
||||
udp_array
|
||||
.send_with_all(
|
||||
&new_hole_punch_packet(tid, HOLE_PUNCH_PACKET_BODY_LEN).into_bytes(),
|
||||
remote_mapped_addr.clone().into(),
|
||||
)
|
||||
.await
|
||||
.with_context(|| "failed to send hole punch packet from local")
|
||||
};
|
||||
|
||||
send_from_local().await?;
|
||||
|
||||
let scoped_punch_task: ScopedTask<()> = tokio::spawn(async move {
|
||||
if let Err(e) = rpc_stub
|
||||
.send_punch_packet_cone(
|
||||
BaseController {
|
||||
timeout_ms: 4000,
|
||||
..Default::default()
|
||||
},
|
||||
SendPunchPacketConeRequest {
|
||||
listener_mapped_addr: Some(remote_mapped_addr.into()),
|
||||
dest_addr: Some(local_mapped_addr.into()),
|
||||
transaction_id: tid,
|
||||
packet_count_per_batch: 2,
|
||||
packet_batch_count: 5,
|
||||
packet_interval_ms: 400,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::error!(?e, "failed to call remote send punch packet");
|
||||
}
|
||||
})
|
||||
.into();
|
||||
|
||||
// server: will send some punching resps, total 10 packets.
|
||||
// client: use the socket to create UdpTunnel with UdpTunnelConnector
|
||||
// NOTICE: UdpTunnelConnector will ignore the punching resp packet sent by remote.
|
||||
let mut finish_time: Option<Instant> = None;
|
||||
while finish_time.is_none() || finish_time.as_ref().unwrap().elapsed().as_millis() < 1000 {
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
|
||||
if finish_time.is_none() && (*scoped_punch_task).is_finished() {
|
||||
finish_time = Some(Instant::now());
|
||||
}
|
||||
|
||||
let Some(socket) = udp_array.try_fetch_punched_socket(tid) else {
|
||||
tracing::debug!("no punched socket found, send some more hole punch packets");
|
||||
send_from_local().await?;
|
||||
continue;
|
||||
};
|
||||
|
||||
tracing::debug!(?socket, ?tid, "punched socket found, try connect with it");
|
||||
|
||||
for _ in 0..2 {
|
||||
match try_connect_with_socket(socket.socket.clone(), remote_mapped_addr.into())
|
||||
.await
|
||||
{
|
||||
Ok(tunnel) => {
|
||||
tracing::info!(?tunnel, "hole punched");
|
||||
return Ok(Some(tunnel));
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(?e, "failed to connect with socket");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use crate::{
|
||||
connector::udp_hole_punch::{
|
||||
tests::create_mock_peer_manager_with_mock_stun, UdpHolePunchConnector,
|
||||
},
|
||||
peers::tests::{connect_peer_manager, wait_route_appear, wait_route_appear_with_cost},
|
||||
proto::common::NatType,
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
async fn hole_punching_cone() {
|
||||
let p_a = create_mock_peer_manager_with_mock_stun(NatType::Restricted).await;
|
||||
let p_b = create_mock_peer_manager_with_mock_stun(NatType::PortRestricted).await;
|
||||
let p_c = create_mock_peer_manager_with_mock_stun(NatType::Restricted).await;
|
||||
connect_peer_manager(p_a.clone(), p_b.clone()).await;
|
||||
connect_peer_manager(p_b.clone(), p_c.clone()).await;
|
||||
|
||||
wait_route_appear(p_a.clone(), p_c.clone()).await.unwrap();
|
||||
|
||||
println!("{:?}", p_a.list_routes().await);
|
||||
|
||||
let mut hole_punching_a = UdpHolePunchConnector::new(p_a.clone());
|
||||
let mut hole_punching_c = UdpHolePunchConnector::new(p_c.clone());
|
||||
|
||||
hole_punching_a.run_as_client().await.unwrap();
|
||||
hole_punching_c.run_as_server().await.unwrap();
|
||||
|
||||
hole_punching_a.client.run_immediately().await;
|
||||
|
||||
wait_route_appear_with_cost(p_a.clone(), p_c.my_peer_id(), Some(1))
|
||||
.await
|
||||
.unwrap();
|
||||
println!("{:?}", p_a.list_routes().await);
|
||||
}
|
||||
}
|
559
easytier/src/connector/udp_hole_punch/mod.rs
Normal file
559
easytier/src/connector/udp_hole_punch/mod.rs
Normal file
@@ -0,0 +1,559 @@
|
||||
use std::sync::{atomic::AtomicBool, Arc};
|
||||
|
||||
use anyhow::{Context, Error};
|
||||
use both_easy_sym::{PunchBothEasySymHoleClient, PunchBothEasySymHoleServer};
|
||||
use common::{PunchHoleServerCommon, UdpNatType, UdpPunchClientMethod};
|
||||
use cone::{PunchConeHoleClient, PunchConeHoleServer};
|
||||
use dashmap::DashMap;
|
||||
use once_cell::sync::Lazy;
|
||||
use sym_to_cone::{PunchSymToConeHoleClient, PunchSymToConeHoleServer};
|
||||
use tokio::{sync::Mutex, task::JoinHandle};
|
||||
|
||||
use crate::{
|
||||
common::{stun::StunInfoCollectorTrait, PeerId},
|
||||
connector::direct::PeerManagerForDirectConnector,
|
||||
peers::{
|
||||
peer_manager::PeerManager,
|
||||
peer_task::{PeerTaskLauncher, PeerTaskManager},
|
||||
},
|
||||
proto::{
|
||||
common::{NatType, Void},
|
||||
peer_rpc::{
|
||||
SelectPunchListenerRequest, SelectPunchListenerResponse,
|
||||
SendPunchPacketBothEasySymRequest, SendPunchPacketBothEasySymResponse,
|
||||
SendPunchPacketConeRequest, SendPunchPacketEasySymRequest,
|
||||
SendPunchPacketHardSymRequest, SendPunchPacketHardSymResponse, UdpHolePunchRpc,
|
||||
UdpHolePunchRpcServer,
|
||||
},
|
||||
rpc_types::{self, controller::BaseController},
|
||||
},
|
||||
tunnel::Tunnel,
|
||||
};
|
||||
|
||||
pub(crate) mod both_easy_sym;
|
||||
pub(crate) mod common;
|
||||
pub(crate) mod cone;
|
||||
pub(crate) mod sym_to_cone;
|
||||
|
||||
// sym punch should be serialized
|
||||
static SYM_PUNCH_LOCK: Lazy<DashMap<PeerId, Arc<Mutex<()>>>> = Lazy::new(|| DashMap::new());
|
||||
static RUN_TESTING: Lazy<AtomicBool> = Lazy::new(|| AtomicBool::new(false));
|
||||
|
||||
fn get_sym_punch_lock(peer_id: PeerId) -> Arc<Mutex<()>> {
|
||||
SYM_PUNCH_LOCK
|
||||
.entry(peer_id)
|
||||
.or_insert_with(|| Arc::new(Mutex::new(())))
|
||||
.value()
|
||||
.clone()
|
||||
}
|
||||
|
||||
struct UdpHolePunchServer {
|
||||
common: Arc<PunchHoleServerCommon>,
|
||||
cone_server: PunchConeHoleServer,
|
||||
sym_to_cone_server: PunchSymToConeHoleServer,
|
||||
both_easy_sym_server: PunchBothEasySymHoleServer,
|
||||
}
|
||||
|
||||
impl UdpHolePunchServer {
|
||||
pub fn new(peer_mgr: Arc<PeerManager>) -> Arc<Self> {
|
||||
let common = Arc::new(PunchHoleServerCommon::new(peer_mgr.clone()));
|
||||
let cone_server = PunchConeHoleServer::new(common.clone());
|
||||
let sym_to_cone_server = PunchSymToConeHoleServer::new(common.clone());
|
||||
let both_easy_sym_server = PunchBothEasySymHoleServer::new(common.clone());
|
||||
|
||||
Arc::new(Self {
|
||||
common,
|
||||
cone_server,
|
||||
sym_to_cone_server,
|
||||
both_easy_sym_server,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl UdpHolePunchRpc for UdpHolePunchServer {
|
||||
type Controller = BaseController;
|
||||
|
||||
async fn select_punch_listener(
|
||||
&self,
|
||||
_ctrl: Self::Controller,
|
||||
input: SelectPunchListenerRequest,
|
||||
) -> rpc_types::error::Result<SelectPunchListenerResponse> {
|
||||
let (_, addr) = self
|
||||
.common
|
||||
.select_listener(input.force_new)
|
||||
.await
|
||||
.ok_or(anyhow::anyhow!("no listener available"))?;
|
||||
|
||||
Ok(SelectPunchListenerResponse {
|
||||
listener_mapped_addr: Some(addr.into()),
|
||||
})
|
||||
}
|
||||
|
||||
/// send packet to one remote_addr, used by nat1-3 to nat1-3
|
||||
async fn send_punch_packet_cone(
|
||||
&self,
|
||||
ctrl: Self::Controller,
|
||||
input: SendPunchPacketConeRequest,
|
||||
) -> rpc_types::error::Result<Void> {
|
||||
self.cone_server.send_punch_packet_cone(ctrl, input).await
|
||||
}
|
||||
|
||||
/// send packet to multiple remote_addr (birthday attack), used by nat4 to nat1-3
|
||||
async fn send_punch_packet_hard_sym(
|
||||
&self,
|
||||
_ctrl: Self::Controller,
|
||||
input: SendPunchPacketHardSymRequest,
|
||||
) -> rpc_types::error::Result<SendPunchPacketHardSymResponse> {
|
||||
let _locked = get_sym_punch_lock(self.common.get_peer_mgr().my_peer_id())
|
||||
.try_lock_owned()
|
||||
.with_context(|| "sym punch lock is busy")?;
|
||||
self.sym_to_cone_server
|
||||
.send_punch_packet_hard_sym(input)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn send_punch_packet_easy_sym(
|
||||
&self,
|
||||
_ctrl: Self::Controller,
|
||||
input: SendPunchPacketEasySymRequest,
|
||||
) -> rpc_types::error::Result<Void> {
|
||||
let _locked = get_sym_punch_lock(self.common.get_peer_mgr().my_peer_id())
|
||||
.try_lock_owned()
|
||||
.with_context(|| "sym punch lock is busy")?;
|
||||
self.sym_to_cone_server
|
||||
.send_punch_packet_easy_sym(input)
|
||||
.await
|
||||
.map(|_| Void {})
|
||||
}
|
||||
|
||||
/// nat4 to nat4 (both predictably)
|
||||
async fn send_punch_packet_both_easy_sym(
|
||||
&self,
|
||||
_ctrl: Self::Controller,
|
||||
input: SendPunchPacketBothEasySymRequest,
|
||||
) -> rpc_types::error::Result<SendPunchPacketBothEasySymResponse> {
|
||||
let _locked = get_sym_punch_lock(self.common.get_peer_mgr().my_peer_id())
|
||||
.try_lock_owned()
|
||||
.with_context(|| "sym punch lock is busy")?;
|
||||
self.both_easy_sym_server
|
||||
.send_punch_packet_both_easy_sym(input)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct BackOff {
|
||||
backoffs_ms: Vec<u64>,
|
||||
current_idx: usize,
|
||||
}
|
||||
|
||||
impl BackOff {
|
||||
pub fn new(backoffs_ms: Vec<u64>) -> Self {
|
||||
Self {
|
||||
backoffs_ms,
|
||||
current_idx: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next_backoff(&mut self) -> u64 {
|
||||
let backoff = self.backoffs_ms[self.current_idx];
|
||||
self.current_idx = (self.current_idx + 1).min(self.backoffs_ms.len() - 1);
|
||||
backoff
|
||||
}
|
||||
|
||||
pub fn rollback(&mut self) {
|
||||
self.current_idx = self.current_idx.saturating_sub(1);
|
||||
}
|
||||
|
||||
pub async fn sleep_for_next_backoff(&mut self) {
|
||||
let backoff = self.next_backoff();
|
||||
if backoff > 0 {
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(backoff)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct UdpHoePunchConnectorData {
|
||||
cone_client: PunchConeHoleClient,
|
||||
sym_to_cone_client: PunchSymToConeHoleClient,
|
||||
both_easy_sym_client: PunchBothEasySymHoleClient,
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
}
|
||||
|
||||
impl UdpHoePunchConnectorData {
|
||||
pub fn new(peer_mgr: Arc<PeerManager>) -> Arc<Self> {
|
||||
let cone_client = PunchConeHoleClient::new(peer_mgr.clone());
|
||||
let sym_to_cone_client = PunchSymToConeHoleClient::new(peer_mgr.clone());
|
||||
let both_easy_sym_client = PunchBothEasySymHoleClient::new(peer_mgr.clone());
|
||||
|
||||
Arc::new(Self {
|
||||
cone_client,
|
||||
sym_to_cone_client,
|
||||
both_easy_sym_client,
|
||||
peer_mgr,
|
||||
})
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn handle_punch_result(
|
||||
self: &Self,
|
||||
ret: Result<Option<Box<dyn Tunnel>>, Error>,
|
||||
backoff: Option<&mut BackOff>,
|
||||
round: Option<&mut u32>,
|
||||
) -> bool {
|
||||
let op = |rollback: bool| {
|
||||
if rollback {
|
||||
if let Some(backoff) = backoff {
|
||||
backoff.rollback();
|
||||
}
|
||||
if let Some(round) = round {
|
||||
*round = round.saturating_sub(1);
|
||||
}
|
||||
} else {
|
||||
if let Some(round) = round {
|
||||
*round += 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match ret {
|
||||
Ok(Some(tunnel)) => {
|
||||
tracing::info!(?tunnel, "hole punching get tunnel success");
|
||||
|
||||
if let Err(e) = self.peer_mgr.add_client_tunnel(tunnel).await {
|
||||
tracing::warn!(?e, "add client tunnel failed");
|
||||
op(true);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
tracing::info!("hole punching failed, no punch tunnel");
|
||||
op(false);
|
||||
false
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::info!(?e, "hole punching failed");
|
||||
op(true);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn cone_to_cone(self: Arc<Self>, task_info: PunchTaskInfo) -> Result<(), Error> {
|
||||
let mut backoff = BackOff::new(vec![0, 1000, 2000, 4000, 4000, 8000, 8000, 16000]);
|
||||
|
||||
loop {
|
||||
backoff.sleep_for_next_backoff().await;
|
||||
|
||||
let ret = self
|
||||
.cone_client
|
||||
.do_hole_punching(task_info.dst_peer_id)
|
||||
.await;
|
||||
|
||||
if self
|
||||
.handle_punch_result(ret, Some(&mut backoff), None)
|
||||
.await
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn sym_to_cone(self: Arc<Self>, task_info: PunchTaskInfo) -> Result<(), Error> {
|
||||
let mut backoff = BackOff::new(vec![0, 1000, 2000, 4000, 4000, 8000, 8000, 16000, 64000]);
|
||||
let mut round = 0;
|
||||
let mut port_idx = rand::random();
|
||||
|
||||
loop {
|
||||
backoff.sleep_for_next_backoff().await;
|
||||
|
||||
// always try cone first
|
||||
if !RUN_TESTING.load(std::sync::atomic::Ordering::Relaxed) {
|
||||
let ret = self
|
||||
.cone_client
|
||||
.do_hole_punching(task_info.dst_peer_id)
|
||||
.await;
|
||||
if self.handle_punch_result(ret, None, None).await {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let ret = {
|
||||
let _lock = get_sym_punch_lock(self.peer_mgr.my_peer_id())
|
||||
.lock_owned()
|
||||
.await;
|
||||
self.sym_to_cone_client
|
||||
.do_hole_punching(
|
||||
task_info.dst_peer_id,
|
||||
round,
|
||||
&mut port_idx,
|
||||
task_info.my_nat_type,
|
||||
)
|
||||
.await
|
||||
};
|
||||
|
||||
if self
|
||||
.handle_punch_result(ret, Some(&mut backoff), Some(&mut round))
|
||||
.await
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn both_easy_sym(self: Arc<Self>, task_info: PunchTaskInfo) -> Result<(), Error> {
|
||||
let mut backoff = BackOff::new(vec![0, 1000, 2000, 4000, 4000, 8000, 8000, 16000, 64000]);
|
||||
|
||||
loop {
|
||||
backoff.sleep_for_next_backoff().await;
|
||||
|
||||
// always try cone first
|
||||
if !RUN_TESTING.load(std::sync::atomic::Ordering::Relaxed) {
|
||||
let ret = self
|
||||
.cone_client
|
||||
.do_hole_punching(task_info.dst_peer_id)
|
||||
.await;
|
||||
if self.handle_punch_result(ret, None, None).await {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let mut is_busy = false;
|
||||
|
||||
let ret = {
|
||||
let _lock = get_sym_punch_lock(self.peer_mgr.my_peer_id())
|
||||
.lock_owned()
|
||||
.await;
|
||||
self.both_easy_sym_client
|
||||
.do_hole_punching(
|
||||
task_info.dst_peer_id,
|
||||
task_info.my_nat_type,
|
||||
task_info.dst_nat_type,
|
||||
&mut is_busy,
|
||||
)
|
||||
.await
|
||||
};
|
||||
|
||||
if is_busy {
|
||||
backoff.rollback();
|
||||
} else if self
|
||||
.handle_punch_result(ret, Some(&mut backoff), None)
|
||||
.await
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct UdpHolePunchPeerTaskLauncher {}
|
||||
|
||||
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
|
||||
struct PunchTaskInfo {
|
||||
dst_peer_id: PeerId,
|
||||
dst_nat_type: UdpNatType,
|
||||
my_nat_type: UdpNatType,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl PeerTaskLauncher for UdpHolePunchPeerTaskLauncher {
|
||||
type Data = Arc<UdpHoePunchConnectorData>;
|
||||
type CollectPeerItem = PunchTaskInfo;
|
||||
type TaskRet = ();
|
||||
|
||||
fn new_data(&self, peer_mgr: Arc<PeerManager>) -> Self::Data {
|
||||
UdpHoePunchConnectorData::new(peer_mgr)
|
||||
}
|
||||
|
||||
async fn collect_peers_need_task(&self, data: &Self::Data) -> Vec<Self::CollectPeerItem> {
|
||||
let my_nat_type = data
|
||||
.peer_mgr
|
||||
.get_global_ctx()
|
||||
.get_stun_info_collector()
|
||||
.get_stun_info()
|
||||
.udp_nat_type;
|
||||
let my_nat_type: UdpNatType = NatType::try_from(my_nat_type)
|
||||
.unwrap_or(NatType::Unknown)
|
||||
.into();
|
||||
if !my_nat_type.is_sym() {
|
||||
data.sym_to_cone_client.clear_udp_array().await;
|
||||
}
|
||||
|
||||
let mut peers_to_connect: Vec<Self::CollectPeerItem> = Vec::new();
|
||||
// do not do anything if:
|
||||
// 1. our nat type is OpenInternet or NoPat, which means we can wait other peers to connect us
|
||||
// notice that if we are unknown, we treat ourselves as cone
|
||||
if my_nat_type.is_open() {
|
||||
return peers_to_connect;
|
||||
}
|
||||
|
||||
let my_peer_id = data.peer_mgr.my_peer_id();
|
||||
|
||||
// collect peer list from peer manager and do some filter:
|
||||
// 1. peers without direct conns;
|
||||
// 2. peers is full cone (any restricted type);
|
||||
for route in data.peer_mgr.list_routes().await.iter() {
|
||||
if route
|
||||
.feature_flag
|
||||
.map(|x| x.is_public_server)
|
||||
.unwrap_or(false)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let peer_nat_type = route
|
||||
.stun_info
|
||||
.as_ref()
|
||||
.map(|x| x.udp_nat_type)
|
||||
.unwrap_or(0);
|
||||
let Ok(peer_nat_type) = NatType::try_from(peer_nat_type) else {
|
||||
continue;
|
||||
};
|
||||
let peer_nat_type = peer_nat_type.into();
|
||||
|
||||
let peer_id: PeerId = route.peer_id;
|
||||
let conns = data.peer_mgr.list_peer_conns(peer_id).await;
|
||||
if conns.is_some() && conns.unwrap().len() > 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !my_nat_type.can_punch_hole_as_client(peer_nat_type, my_peer_id, peer_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
tracing::info!(
|
||||
?peer_id,
|
||||
?peer_nat_type,
|
||||
?my_nat_type,
|
||||
"found peer to do hole punching"
|
||||
);
|
||||
|
||||
peers_to_connect.push(PunchTaskInfo {
|
||||
dst_peer_id: peer_id,
|
||||
dst_nat_type: peer_nat_type,
|
||||
my_nat_type,
|
||||
});
|
||||
}
|
||||
|
||||
peers_to_connect
|
||||
}
|
||||
|
||||
async fn launch_task(
|
||||
&self,
|
||||
data: &Self::Data,
|
||||
item: Self::CollectPeerItem,
|
||||
) -> JoinHandle<Result<Self::TaskRet, Error>> {
|
||||
let data = data.clone();
|
||||
let punch_method = item.my_nat_type.get_punch_hole_method(item.dst_nat_type);
|
||||
match punch_method {
|
||||
UdpPunchClientMethod::ConeToCone => tokio::spawn(data.cone_to_cone(item)),
|
||||
UdpPunchClientMethod::SymToCone => tokio::spawn(data.sym_to_cone(item)),
|
||||
UdpPunchClientMethod::EasySymToEasySym => tokio::spawn(data.both_easy_sym(item)),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn all_task_done(&self, data: &Self::Data) {
|
||||
data.sym_to_cone_client.clear_udp_array().await;
|
||||
}
|
||||
|
||||
fn loop_interval_ms(&self) -> u64 {
|
||||
5000
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UdpHolePunchConnector {
|
||||
server: Arc<UdpHolePunchServer>,
|
||||
client: PeerTaskManager<UdpHolePunchPeerTaskLauncher>,
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
}
|
||||
|
||||
// Currently support:
|
||||
// Symmetric -> Full Cone
|
||||
// Any Type of Full Cone -> Any Type of Full Cone
|
||||
|
||||
// if same level of full cone, node with smaller peer_id will be the initiator
|
||||
// if different level of full cone, node with more strict level will be the initiator
|
||||
|
||||
impl UdpHolePunchConnector {
|
||||
pub fn new(peer_mgr: Arc<PeerManager>) -> Self {
|
||||
Self {
|
||||
server: UdpHolePunchServer::new(peer_mgr.clone()),
|
||||
client: PeerTaskManager::new(UdpHolePunchPeerTaskLauncher {}, peer_mgr.clone()),
|
||||
peer_mgr,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_as_client(&mut self) -> Result<(), Error> {
|
||||
self.client.start();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run_as_server(&mut self) -> Result<(), Error> {
|
||||
self.peer_mgr
|
||||
.get_peer_rpc_mgr()
|
||||
.rpc_server()
|
||||
.registry()
|
||||
.register(
|
||||
UdpHolePunchRpcServer::new(self.server.clone()),
|
||||
&self.peer_mgr.get_global_ctx().get_network_name(),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run(&mut self) -> Result<(), Error> {
|
||||
let global_ctx = self.peer_mgr.get_global_ctx();
|
||||
|
||||
if global_ctx.get_flags().disable_p2p {
|
||||
return Ok(());
|
||||
}
|
||||
if global_ctx.get_flags().disable_udp_hole_punching {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.run_as_client().await?;
|
||||
self.run_as_server().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::common::stun::MockStunInfoCollector;
|
||||
use crate::proto::common::NatType;
|
||||
|
||||
use crate::peers::{peer_manager::PeerManager, tests::create_mock_peer_manager};
|
||||
|
||||
pub fn replace_stun_info_collector(peer_mgr: Arc<PeerManager>, udp_nat_type: NatType) {
|
||||
let collector = Box::new(MockStunInfoCollector { udp_nat_type });
|
||||
peer_mgr
|
||||
.get_global_ctx()
|
||||
.replace_stun_info_collector(collector);
|
||||
}
|
||||
|
||||
pub async fn create_mock_peer_manager_with_mock_stun(
|
||||
udp_nat_type: NatType,
|
||||
) -> Arc<PeerManager> {
|
||||
let p_a = create_mock_peer_manager().await;
|
||||
replace_stun_info_collector(p_a.clone(), udp_nat_type);
|
||||
p_a
|
||||
}
|
||||
}
|
589
easytier/src/connector/udp_hole_punch/sym_to_cone.rs
Normal file
589
easytier/src/connector/udp_hole_punch/sym_to_cone.rs
Normal file
@@ -0,0 +1,589 @@
|
||||
use std::{
|
||||
net::Ipv4Addr,
|
||||
ops::{Div, Mul},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use rand::{seq::SliceRandom, Rng};
|
||||
use tokio::{net::UdpSocket, sync::RwLock};
|
||||
use tracing::Level;
|
||||
|
||||
use crate::{
|
||||
common::{scoped_task::ScopedTask, stun::StunInfoCollectorTrait, PeerId},
|
||||
connector::udp_hole_punch::common::{
|
||||
send_symmetric_hole_punch_packet, try_connect_with_socket, HOLE_PUNCH_PACKET_BODY_LEN,
|
||||
},
|
||||
defer,
|
||||
peers::peer_manager::PeerManager,
|
||||
proto::{
|
||||
peer_rpc::{
|
||||
SelectPunchListenerRequest, SendPunchPacketEasySymRequest,
|
||||
SendPunchPacketHardSymRequest, SendPunchPacketHardSymResponse,
|
||||
UdpHolePunchRpcClientFactory,
|
||||
},
|
||||
rpc_types::{self, controller::BaseController},
|
||||
},
|
||||
tunnel::{udp::new_hole_punch_packet, Tunnel},
|
||||
};
|
||||
|
||||
use super::common::{PunchHoleServerCommon, UdpNatType, UdpSocketArray};
|
||||
|
||||
const UDP_ARRAY_SIZE_FOR_HARD_SYM: usize = 84;
|
||||
|
||||
pub(crate) struct PunchSymToConeHoleServer {
|
||||
common: Arc<PunchHoleServerCommon>,
|
||||
|
||||
shuffled_port_vec: Arc<Vec<u16>>,
|
||||
}
|
||||
|
||||
impl PunchSymToConeHoleServer {
|
||||
pub(crate) fn new(common: Arc<PunchHoleServerCommon>) -> Self {
|
||||
let mut shuffled_port_vec: Vec<u16> = (1..=65535).collect();
|
||||
shuffled_port_vec.shuffle(&mut rand::thread_rng());
|
||||
|
||||
Self {
|
||||
common,
|
||||
shuffled_port_vec: Arc::new(shuffled_port_vec),
|
||||
}
|
||||
}
|
||||
|
||||
// hard sym means public port is random and cannot be predicted
|
||||
#[tracing::instrument(skip(self), ret)]
|
||||
pub(crate) async fn send_punch_packet_easy_sym(
|
||||
&self,
|
||||
request: SendPunchPacketEasySymRequest,
|
||||
) -> Result<(), rpc_types::error::Error> {
|
||||
tracing::info!("send_punch_packet_easy_sym start");
|
||||
|
||||
let listener_addr = request.listener_mapped_addr.ok_or(anyhow::anyhow!(
|
||||
"send_punch_packet_easy_sym request missing listener_addr"
|
||||
))?;
|
||||
let listener_addr = std::net::SocketAddr::from(listener_addr);
|
||||
let listener = self
|
||||
.common
|
||||
.find_listener(&listener_addr)
|
||||
.await
|
||||
.ok_or(anyhow::anyhow!(
|
||||
"send_punch_packet_easy_sym failed to find listener"
|
||||
))?;
|
||||
|
||||
let public_ips = request
|
||||
.public_ips
|
||||
.into_iter()
|
||||
.map(|ip| std::net::Ipv4Addr::from(ip))
|
||||
.collect::<Vec<_>>();
|
||||
if public_ips.len() == 0 {
|
||||
tracing::warn!("send_punch_packet_easy_sym got zero len public ip");
|
||||
return Err(
|
||||
anyhow::anyhow!("send_punch_packet_easy_sym got zero len public ip").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let transaction_id = request.transaction_id;
|
||||
let base_port_num = request.base_port_num;
|
||||
let max_port_num = request.max_port_num.max(1);
|
||||
let is_incremental = request.is_incremental;
|
||||
|
||||
let port_start = if is_incremental {
|
||||
base_port_num.saturating_add(1)
|
||||
} else {
|
||||
base_port_num.saturating_sub(max_port_num)
|
||||
};
|
||||
|
||||
let port_end = if is_incremental {
|
||||
base_port_num.saturating_add(max_port_num)
|
||||
} else {
|
||||
base_port_num.saturating_sub(1)
|
||||
};
|
||||
|
||||
if port_end <= port_start {
|
||||
return Err(anyhow::anyhow!("send_punch_packet_easy_sym invalid port range").into());
|
||||
}
|
||||
|
||||
let ports = (port_start..=port_end)
|
||||
.map(|x| x as u16)
|
||||
.collect::<Vec<_>>();
|
||||
tracing::debug!(
|
||||
?ports,
|
||||
?public_ips,
|
||||
"send_punch_packet_easy_sym send to ports"
|
||||
);
|
||||
send_symmetric_hole_punch_packet(
|
||||
&ports,
|
||||
listener,
|
||||
transaction_id,
|
||||
&public_ips,
|
||||
0,
|
||||
ports.len(),
|
||||
)
|
||||
.await
|
||||
.with_context(|| "failed to send symmetric hole punch packet")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// hard sym means public port is random and cannot be predicted
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub(crate) async fn send_punch_packet_hard_sym(
|
||||
&self,
|
||||
request: SendPunchPacketHardSymRequest,
|
||||
) -> Result<SendPunchPacketHardSymResponse, rpc_types::error::Error> {
|
||||
tracing::info!("try_punch_symmetric start");
|
||||
|
||||
let listener_addr = request.listener_mapped_addr.ok_or(anyhow::anyhow!(
|
||||
"try_punch_symmetric request missing listener_addr"
|
||||
))?;
|
||||
let listener_addr = std::net::SocketAddr::from(listener_addr);
|
||||
let listener = self
|
||||
.common
|
||||
.find_listener(&listener_addr)
|
||||
.await
|
||||
.ok_or(anyhow::anyhow!(
|
||||
"send_punch_packet_for_cone failed to find listener"
|
||||
))?;
|
||||
|
||||
let public_ips = request
|
||||
.public_ips
|
||||
.into_iter()
|
||||
.map(|ip| std::net::Ipv4Addr::from(ip))
|
||||
.collect::<Vec<_>>();
|
||||
if public_ips.len() == 0 {
|
||||
tracing::warn!("try_punch_symmetric got zero len public ip");
|
||||
return Err(anyhow::anyhow!("try_punch_symmetric got zero len public ip").into());
|
||||
}
|
||||
|
||||
let transaction_id = request.transaction_id;
|
||||
let last_port_index = request.port_index as usize;
|
||||
|
||||
let round = std::cmp::max(request.round, 1);
|
||||
|
||||
// send max k1 packets if we are predicting the dst port
|
||||
let max_k1: u32 = 180;
|
||||
// send max k2 packets if we are sending to random port
|
||||
let mut max_k2: u32 = rand::thread_rng().gen_range(600..800);
|
||||
if round > 2 {
|
||||
max_k2 = max_k2.mul(2).div(round).max(max_k1);
|
||||
}
|
||||
|
||||
let next_port_index = send_symmetric_hole_punch_packet(
|
||||
&self.shuffled_port_vec,
|
||||
listener.clone(),
|
||||
transaction_id,
|
||||
&public_ips,
|
||||
last_port_index,
|
||||
max_k2 as usize,
|
||||
)
|
||||
.await
|
||||
.with_context(|| "failed to send symmetric hole punch packet randomly")?;
|
||||
|
||||
return Ok(SendPunchPacketHardSymResponse {
|
||||
next_port_index: next_port_index as u32,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct PunchSymToConeHoleClient {
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
udp_array: RwLock<Option<Arc<UdpSocketArray>>>,
|
||||
try_direct_connect: AtomicBool,
|
||||
punch_predicablely: AtomicBool,
|
||||
punch_randomly: AtomicBool,
|
||||
}
|
||||
|
||||
impl PunchSymToConeHoleClient {
|
||||
pub(crate) fn new(peer_mgr: Arc<PeerManager>) -> Self {
|
||||
Self {
|
||||
peer_mgr,
|
||||
udp_array: RwLock::new(None),
|
||||
try_direct_connect: AtomicBool::new(true),
|
||||
punch_predicablely: AtomicBool::new(true),
|
||||
punch_randomly: AtomicBool::new(true),
|
||||
}
|
||||
}
|
||||
|
||||
async fn prepare_udp_array(&self) -> Result<Arc<UdpSocketArray>, anyhow::Error> {
|
||||
let rlocked = self.udp_array.read().await;
|
||||
if let Some(udp_array) = rlocked.clone() {
|
||||
return Ok(udp_array);
|
||||
}
|
||||
|
||||
drop(rlocked);
|
||||
let mut wlocked = self.udp_array.write().await;
|
||||
if let Some(udp_array) = wlocked.clone() {
|
||||
return Ok(udp_array);
|
||||
}
|
||||
|
||||
let udp_array = Arc::new(UdpSocketArray::new(
|
||||
UDP_ARRAY_SIZE_FOR_HARD_SYM,
|
||||
self.peer_mgr.get_global_ctx().net_ns.clone(),
|
||||
));
|
||||
udp_array.start().await?;
|
||||
wlocked.replace(udp_array.clone());
|
||||
Ok(udp_array)
|
||||
}
|
||||
|
||||
pub(crate) async fn clear_udp_array(&self) {
|
||||
let mut wlocked = self.udp_array.write().await;
|
||||
wlocked.take();
|
||||
}
|
||||
|
||||
async fn get_base_port_for_easy_sym(&self, my_nat_info: UdpNatType) -> Option<u16> {
|
||||
let global_ctx = self.peer_mgr.get_global_ctx();
|
||||
if my_nat_info.is_easy_sym() {
|
||||
match global_ctx
|
||||
.get_stun_info_collector()
|
||||
.get_udp_port_mapping(0)
|
||||
.await
|
||||
{
|
||||
Ok(addr) => Some(addr.port()),
|
||||
ret => {
|
||||
tracing::warn!(?ret, "failed to get udp port mapping for easy sym");
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(err(level = Level::ERROR), skip(self))]
|
||||
pub(crate) async fn do_hole_punching(
|
||||
&self,
|
||||
dst_peer_id: PeerId,
|
||||
round: u32,
|
||||
last_port_idx: &mut usize,
|
||||
my_nat_info: UdpNatType,
|
||||
) -> Result<Option<Box<dyn Tunnel>>, anyhow::Error> {
|
||||
let udp_array = self.prepare_udp_array().await?;
|
||||
let global_ctx = self.peer_mgr.get_global_ctx();
|
||||
|
||||
let rpc_stub = self
|
||||
.peer_mgr
|
||||
.get_peer_rpc_mgr()
|
||||
.rpc_client()
|
||||
.scoped_client::<UdpHolePunchRpcClientFactory<BaseController>>(
|
||||
self.peer_mgr.my_peer_id(),
|
||||
dst_peer_id,
|
||||
global_ctx.get_network_name(),
|
||||
);
|
||||
|
||||
let resp = rpc_stub
|
||||
.select_punch_listener(
|
||||
BaseController::default(),
|
||||
SelectPunchListenerRequest { force_new: false },
|
||||
)
|
||||
.await
|
||||
.with_context(|| "failed to select punch listener")?;
|
||||
let remote_mapped_addr = resp.listener_mapped_addr.ok_or(anyhow::anyhow!(
|
||||
"select_punch_listener response missing listener_mapped_addr"
|
||||
))?;
|
||||
|
||||
// try direct connect first
|
||||
if self.try_direct_connect.load(Ordering::Relaxed) {
|
||||
if let Ok(tunnel) = try_connect_with_socket(
|
||||
Arc::new(UdpSocket::bind("0.0.0.0:0").await?),
|
||||
remote_mapped_addr.into(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
return Ok(Some(tunnel));
|
||||
}
|
||||
}
|
||||
|
||||
let stun_info = global_ctx.get_stun_info_collector().get_stun_info();
|
||||
let public_ips: Vec<Ipv4Addr> = stun_info
|
||||
.public_ip
|
||||
.iter()
|
||||
.map(|x| x.parse().unwrap())
|
||||
.collect();
|
||||
if public_ips.is_empty() {
|
||||
return Err(anyhow::anyhow!("failed to get public ips"));
|
||||
}
|
||||
|
||||
let tid = rand::thread_rng().gen();
|
||||
let packet = new_hole_punch_packet(tid, HOLE_PUNCH_PACKET_BODY_LEN).into_bytes();
|
||||
udp_array.add_intreast_tid(tid);
|
||||
defer! { udp_array.remove_intreast_tid(tid);}
|
||||
udp_array
|
||||
.send_with_all(&packet, remote_mapped_addr.into())
|
||||
.await?;
|
||||
|
||||
let port_index = *last_port_idx as u32;
|
||||
let base_port_for_easy_sym = self.get_base_port_for_easy_sym(my_nat_info).await;
|
||||
let punch_random = self.punch_randomly.load(Ordering::Relaxed);
|
||||
let punch_predicable = self.punch_predicablely.load(Ordering::Relaxed);
|
||||
let scoped_punch_task: ScopedTask<Option<u32>> = tokio::spawn(async move {
|
||||
if punch_predicable {
|
||||
if let Some(inc) = my_nat_info.get_inc_of_easy_sym() {
|
||||
let req = SendPunchPacketEasySymRequest {
|
||||
listener_mapped_addr: remote_mapped_addr.clone().into(),
|
||||
public_ips: public_ips.clone().into_iter().map(|x| x.into()).collect(),
|
||||
transaction_id: tid,
|
||||
base_port_num: base_port_for_easy_sym.unwrap() as u32,
|
||||
max_port_num: 50,
|
||||
is_incremental: inc,
|
||||
};
|
||||
tracing::debug!(?req, "send punch packet for easy sym start");
|
||||
let ret = rpc_stub
|
||||
.send_punch_packet_easy_sym(
|
||||
BaseController {
|
||||
timeout_ms: 4000,
|
||||
trace_id: 0,
|
||||
},
|
||||
req,
|
||||
)
|
||||
.await;
|
||||
tracing::debug!(?ret, "send punch packet for easy sym return");
|
||||
}
|
||||
}
|
||||
|
||||
if punch_random {
|
||||
let req = SendPunchPacketHardSymRequest {
|
||||
listener_mapped_addr: remote_mapped_addr.clone().into(),
|
||||
public_ips: public_ips.clone().into_iter().map(|x| x.into()).collect(),
|
||||
transaction_id: tid,
|
||||
round,
|
||||
port_index,
|
||||
};
|
||||
tracing::debug!(?req, "send punch packet for hard sym start");
|
||||
match rpc_stub
|
||||
.send_punch_packet_hard_sym(
|
||||
BaseController {
|
||||
timeout_ms: 4000,
|
||||
trace_id: 0,
|
||||
},
|
||||
req,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Err(e) => {
|
||||
tracing::error!(?e, "failed to send punch packet for hard sym");
|
||||
return None;
|
||||
}
|
||||
Ok(resp) => return Some(resp.next_port_index),
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
})
|
||||
.into();
|
||||
|
||||
// no matter what the result is, we should check if we received any hole punching packet
|
||||
let mut ret_tunnel: Option<Box<dyn Tunnel>> = None;
|
||||
let mut finish_time: Option<Instant> = None;
|
||||
while finish_time.is_none() || finish_time.as_ref().unwrap().elapsed().as_millis() < 1000 {
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
|
||||
if finish_time.is_none() && (*scoped_punch_task).is_finished() {
|
||||
finish_time = Some(Instant::now());
|
||||
}
|
||||
|
||||
let Some(socket) = udp_array.try_fetch_punched_socket(tid) else {
|
||||
tracing::debug!("no punched socket found, wait for more time");
|
||||
continue;
|
||||
};
|
||||
|
||||
// if hole punched but tunnel creation failed, need to retry entire process.
|
||||
match try_connect_with_socket(socket.socket.clone(), remote_mapped_addr.into()).await {
|
||||
Ok(tunnel) => {
|
||||
ret_tunnel.replace(tunnel);
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(?e, "failed to connect with socket");
|
||||
udp_array.add_new_socket(socket.socket).await?;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let punch_task_result = scoped_punch_task.await;
|
||||
tracing::debug!(?punch_task_result, ?ret_tunnel, "punch task got result");
|
||||
|
||||
if let Ok(Some(next_port_idx)) = punch_task_result {
|
||||
*last_port_idx = next_port_idx as usize;
|
||||
} else {
|
||||
*last_port_idx = rand::random();
|
||||
}
|
||||
|
||||
Ok(ret_tunnel)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use std::{
|
||||
sync::{atomic::AtomicU32, Arc},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use tokio::net::UdpSocket;
|
||||
|
||||
use crate::{
|
||||
connector::udp_hole_punch::{
|
||||
tests::create_mock_peer_manager_with_mock_stun, UdpHolePunchConnector, RUN_TESTING,
|
||||
},
|
||||
peers::tests::{connect_peer_manager, wait_route_appear, wait_route_appear_with_cost},
|
||||
proto::common::NatType,
|
||||
tunnel::common::tests::wait_for_condition,
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
#[serial_test::serial(hole_punch)]
|
||||
async fn hole_punching_symmetric_only_random() {
|
||||
RUN_TESTING.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
let p_a = create_mock_peer_manager_with_mock_stun(NatType::Symmetric).await;
|
||||
let p_b = create_mock_peer_manager_with_mock_stun(NatType::PortRestricted).await;
|
||||
let p_c = create_mock_peer_manager_with_mock_stun(NatType::PortRestricted).await;
|
||||
connect_peer_manager(p_a.clone(), p_b.clone()).await;
|
||||
connect_peer_manager(p_b.clone(), p_c.clone()).await;
|
||||
wait_route_appear(p_a.clone(), p_c.clone()).await.unwrap();
|
||||
|
||||
let mut hole_punching_a = UdpHolePunchConnector::new(p_a.clone());
|
||||
let mut hole_punching_c = UdpHolePunchConnector::new(p_c.clone());
|
||||
|
||||
hole_punching_a
|
||||
.client
|
||||
.data()
|
||||
.sym_to_cone_client
|
||||
.try_direct_connect
|
||||
.store(false, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
hole_punching_a
|
||||
.client
|
||||
.data()
|
||||
.sym_to_cone_client
|
||||
.punch_predicablely
|
||||
.store(false, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
hole_punching_a.run().await.unwrap();
|
||||
hole_punching_c.run().await.unwrap();
|
||||
|
||||
hole_punching_a.client.run_immediately().await;
|
||||
|
||||
wait_for_condition(
|
||||
|| async {
|
||||
hole_punching_a
|
||||
.client
|
||||
.data()
|
||||
.sym_to_cone_client
|
||||
.udp_array
|
||||
.read()
|
||||
.await
|
||||
.is_some()
|
||||
},
|
||||
Duration::from_secs(5),
|
||||
)
|
||||
.await;
|
||||
|
||||
wait_for_condition(
|
||||
|| async {
|
||||
wait_route_appear_with_cost(p_a.clone(), p_c.my_peer_id(), Some(1))
|
||||
.await
|
||||
.is_ok()
|
||||
},
|
||||
Duration::from_secs(5),
|
||||
)
|
||||
.await;
|
||||
println!("{:?}", p_a.list_routes().await);
|
||||
|
||||
wait_for_condition(
|
||||
|| async {
|
||||
hole_punching_a
|
||||
.client
|
||||
.data()
|
||||
.sym_to_cone_client
|
||||
.udp_array
|
||||
.read()
|
||||
.await
|
||||
.is_none()
|
||||
},
|
||||
Duration::from_secs(10),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[rstest::rstest]
|
||||
#[tokio::test]
|
||||
#[serial_test::serial(hole_punch)]
|
||||
async fn hole_punching_symmetric_only_predict(#[values("true", "false")] is_inc: bool) {
|
||||
RUN_TESTING.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
let p_a = create_mock_peer_manager_with_mock_stun(if is_inc {
|
||||
NatType::SymmetricEasyInc
|
||||
} else {
|
||||
NatType::SymmetricEasyDec
|
||||
})
|
||||
.await;
|
||||
let p_b = create_mock_peer_manager_with_mock_stun(NatType::PortRestricted).await;
|
||||
let p_c = create_mock_peer_manager_with_mock_stun(NatType::PortRestricted).await;
|
||||
connect_peer_manager(p_a.clone(), p_b.clone()).await;
|
||||
connect_peer_manager(p_b.clone(), p_c.clone()).await;
|
||||
wait_route_appear(p_a.clone(), p_c.clone()).await.unwrap();
|
||||
|
||||
let mut hole_punching_a = UdpHolePunchConnector::new(p_a.clone());
|
||||
let mut hole_punching_c = UdpHolePunchConnector::new(p_c.clone());
|
||||
|
||||
hole_punching_a
|
||||
.client
|
||||
.data()
|
||||
.sym_to_cone_client
|
||||
.try_direct_connect
|
||||
.store(false, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
hole_punching_a
|
||||
.client
|
||||
.data()
|
||||
.sym_to_cone_client
|
||||
.punch_randomly
|
||||
.store(false, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
hole_punching_a.run().await.unwrap();
|
||||
hole_punching_c.run().await.unwrap();
|
||||
|
||||
let udps = if is_inc {
|
||||
let udp1 = Arc::new(UdpSocket::bind("0.0.0.0:40147").await.unwrap());
|
||||
let udp2 = Arc::new(UdpSocket::bind("0.0.0.0:40194").await.unwrap());
|
||||
vec![udp1, udp2]
|
||||
} else {
|
||||
let udp1 = Arc::new(UdpSocket::bind("0.0.0.0:40141").await.unwrap());
|
||||
let udp2 = Arc::new(UdpSocket::bind("0.0.0.0:40100").await.unwrap());
|
||||
vec![udp1, udp2]
|
||||
};
|
||||
// let udp_dec = Arc::new(UdpSocket::bind("0.0.0.0:40140").await.unwrap());
|
||||
// let udp_dec2 = Arc::new(UdpSocket::bind("0.0.0.0:40050").await.unwrap());
|
||||
|
||||
let counter = Arc::new(AtomicU32::new(0));
|
||||
|
||||
// all these sockets should receive hole punching packet
|
||||
for udp in udps.iter().map(Arc::clone) {
|
||||
let counter = counter.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut buf = [0u8; 1024];
|
||||
let (len, addr) = udp.recv_from(&mut buf).await.unwrap();
|
||||
println!(
|
||||
"got predictable punch packet, {:?} {:?} {:?}",
|
||||
len,
|
||||
addr,
|
||||
udp.local_addr()
|
||||
);
|
||||
counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
|
||||
hole_punching_a.client.run_immediately().await;
|
||||
|
||||
let udp_len = udps.len();
|
||||
wait_for_condition(
|
||||
|| async { counter.load(std::sync::atomic::Ordering::Relaxed) == udp_len as u32 },
|
||||
Duration::from_secs(30),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
@@ -4,7 +4,7 @@ use std::{net::SocketAddr, sync::Mutex, time::Duration, vec};
|
||||
|
||||
use anyhow::{Context, Ok};
|
||||
use clap::{command, Args, Parser, Subcommand};
|
||||
use common::stun::StunInfoCollectorTrait;
|
||||
use common::{constants::EASYTIER_VERSION, stun::StunInfoCollectorTrait};
|
||||
use proto::{
|
||||
common::NatType,
|
||||
peer_rpc::{GetGlobalPeerMapRequest, PeerCenterRpc, PeerCenterRpcClientFactory},
|
||||
@@ -30,7 +30,7 @@ use humansize::format_size;
|
||||
use tabled::settings::Style;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "easytier-cli", author, version, about, long_about = None)]
|
||||
#[command(name = "easytier-cli", author, version = EASYTIER_VERSION, about, long_about = None)]
|
||||
struct Cli {
|
||||
/// the instance name
|
||||
#[arg(short = 'p', long, default_value = "127.0.0.1:15888")]
|
||||
@@ -179,14 +179,16 @@ impl CommandHandler {
|
||||
async fn list_peers(&self) -> Result<ListPeerResponse, Error> {
|
||||
let client = self.get_peer_manager_client().await?;
|
||||
let request = ListPeerRequest::default();
|
||||
let response = client.list_peer(BaseController {}, request).await?;
|
||||
let response = client.list_peer(BaseController::default(), request).await?;
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn list_routes(&self) -> Result<ListRouteResponse, Error> {
|
||||
let client = self.get_peer_manager_client().await?;
|
||||
let request = ListRouteRequest::default();
|
||||
let response = client.list_route(BaseController {}, request).await?;
|
||||
let response = client
|
||||
.list_route(BaseController::default(), request)
|
||||
.await?;
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
@@ -225,7 +227,12 @@ impl CommandHandler {
|
||||
impl From<PeerRoutePair> for PeerTableItem {
|
||||
fn from(p: PeerRoutePair) -> Self {
|
||||
PeerTableItem {
|
||||
ipv4: p.route.ipv4_addr.clone(),
|
||||
ipv4: p
|
||||
.route
|
||||
.ipv4_addr
|
||||
.clone()
|
||||
.map(|ip| ip.to_string())
|
||||
.unwrap_or_default(),
|
||||
hostname: p.route.hostname.clone(),
|
||||
cost: cost_to_str(p.route.cost),
|
||||
lat_ms: float_to_str(p.get_latency_ms().unwrap_or(0.0), 3),
|
||||
@@ -275,7 +282,7 @@ impl CommandHandler {
|
||||
|
||||
let client = self.get_peer_manager_client().await?;
|
||||
let node_info = client
|
||||
.show_node_info(BaseController {}, ShowNodeInfoRequest::default())
|
||||
.show_node_info(BaseController::default(), ShowNodeInfoRequest::default())
|
||||
.await?
|
||||
.node_info
|
||||
.ok_or(anyhow::anyhow!("node info not found"))?;
|
||||
@@ -296,7 +303,9 @@ impl CommandHandler {
|
||||
async fn handle_route_dump(&self) -> Result<(), Error> {
|
||||
let client = self.get_peer_manager_client().await?;
|
||||
let request = DumpRouteRequest::default();
|
||||
let response = client.dump_route(BaseController {}, request).await?;
|
||||
let response = client
|
||||
.dump_route(BaseController::default(), request)
|
||||
.await?;
|
||||
println!("response: {}", response.result);
|
||||
Ok(())
|
||||
}
|
||||
@@ -305,7 +314,7 @@ impl CommandHandler {
|
||||
let client = self.get_peer_manager_client().await?;
|
||||
let request = ListForeignNetworkRequest::default();
|
||||
let response = client
|
||||
.list_foreign_network(BaseController {}, request)
|
||||
.list_foreign_network(BaseController::default(), request)
|
||||
.await?;
|
||||
let network_map = response;
|
||||
if self.verbose {
|
||||
@@ -347,7 +356,7 @@ impl CommandHandler {
|
||||
let client = self.get_peer_manager_client().await?;
|
||||
let request = ListGlobalForeignNetworkRequest::default();
|
||||
let response = client
|
||||
.list_global_foreign_network(BaseController {}, request)
|
||||
.list_global_foreign_network(BaseController::default(), request)
|
||||
.await?;
|
||||
if self.verbose {
|
||||
println!("{:#?}", response);
|
||||
@@ -383,7 +392,7 @@ impl CommandHandler {
|
||||
let mut items: Vec<RouteTableItem> = vec![];
|
||||
let client = self.get_peer_manager_client().await?;
|
||||
let node_info = client
|
||||
.show_node_info(BaseController {}, ShowNodeInfoRequest::default())
|
||||
.show_node_info(BaseController::default(), ShowNodeInfoRequest::default())
|
||||
.await?
|
||||
.node_info
|
||||
.ok_or(anyhow::anyhow!("node info not found"))?;
|
||||
@@ -409,7 +418,12 @@ impl CommandHandler {
|
||||
|
||||
if p.route.cost == 1 {
|
||||
items.push(RouteTableItem {
|
||||
ipv4: p.route.ipv4_addr.clone(),
|
||||
ipv4: p
|
||||
.route
|
||||
.ipv4_addr
|
||||
.clone()
|
||||
.map(|ip| ip.to_string())
|
||||
.unwrap_or_default(),
|
||||
hostname: p.route.hostname.clone(),
|
||||
proxy_cidrs: p.route.proxy_cidrs.clone().join(",").to_string(),
|
||||
next_hop_ipv4: "DIRECT".to_string(),
|
||||
@@ -424,10 +438,20 @@ impl CommandHandler {
|
||||
});
|
||||
} else {
|
||||
items.push(RouteTableItem {
|
||||
ipv4: p.route.ipv4_addr.clone(),
|
||||
ipv4: p
|
||||
.route
|
||||
.ipv4_addr
|
||||
.clone()
|
||||
.map(|ip| ip.to_string())
|
||||
.unwrap_or_default(),
|
||||
hostname: p.route.hostname.clone(),
|
||||
proxy_cidrs: p.route.proxy_cidrs.clone().join(",").to_string(),
|
||||
next_hop_ipv4: next_hop_pair.route.ipv4_addr.clone(),
|
||||
next_hop_ipv4: next_hop_pair
|
||||
.route
|
||||
.ipv4_addr
|
||||
.clone()
|
||||
.map(|ip| ip.to_string())
|
||||
.unwrap_or_default(),
|
||||
next_hop_hostname: next_hop_pair.route.hostname.clone(),
|
||||
next_hop_lat: next_hop_pair.get_latency_ms().unwrap_or(0.0),
|
||||
cost: p.route.cost,
|
||||
@@ -451,7 +475,9 @@ impl CommandHandler {
|
||||
async fn handle_connector_list(&self) -> Result<(), Error> {
|
||||
let client = self.get_connector_manager_client().await?;
|
||||
let request = ListConnectorRequest::default();
|
||||
let response = client.list_connector(BaseController {}, request).await?;
|
||||
let response = client
|
||||
.list_connector(BaseController::default(), request)
|
||||
.await?;
|
||||
println!("response: {:#?}", response);
|
||||
Ok(())
|
||||
}
|
||||
@@ -515,7 +541,7 @@ async fn main() -> Result<(), Error> {
|
||||
Some(RouteSubCommand::Dump) => handler.handle_route_dump().await?,
|
||||
},
|
||||
SubCommand::Stun => {
|
||||
timeout(Duration::from_secs(5), async move {
|
||||
timeout(Duration::from_secs(25), async move {
|
||||
let collector = StunInfoCollector::new_with_default_servers();
|
||||
loop {
|
||||
let ret = collector.get_stun_info();
|
||||
@@ -532,7 +558,10 @@ async fn main() -> Result<(), Error> {
|
||||
SubCommand::PeerCenter => {
|
||||
let peer_center_client = handler.get_peer_center_client().await?;
|
||||
let resp = peer_center_client
|
||||
.get_global_peer_map(BaseController {}, GetGlobalPeerMapRequest::default())
|
||||
.get_global_peer_map(
|
||||
BaseController::default(),
|
||||
GetGlobalPeerMapRequest::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
#[derive(tabled::Tabled)]
|
||||
@@ -565,7 +594,10 @@ async fn main() -> Result<(), Error> {
|
||||
SubCommand::VpnPortal => {
|
||||
let vpn_portal_client = handler.get_vpn_portal_client().await?;
|
||||
let resp = vpn_portal_client
|
||||
.get_vpn_portal_info(BaseController {}, GetVpnPortalInfoRequest::default())
|
||||
.get_vpn_portal_info(
|
||||
BaseController::default(),
|
||||
GetVpnPortalInfoRequest::default(),
|
||||
)
|
||||
.await?
|
||||
.vpn_portal_info
|
||||
.unwrap_or_default();
|
||||
@@ -583,7 +615,7 @@ async fn main() -> Result<(), Error> {
|
||||
SubCommand::Node(sub_cmd) => {
|
||||
let client = handler.get_peer_manager_client().await?;
|
||||
let node_info = client
|
||||
.show_node_info(BaseController {}, ShowNodeInfoRequest::default())
|
||||
.show_node_info(BaseController::default(), ShowNodeInfoRequest::default())
|
||||
.await?
|
||||
.node_info
|
||||
.ok_or(anyhow::anyhow!("node info not found"))?;
|
||||
|
@@ -26,8 +26,9 @@ mod tunnel;
|
||||
mod utils;
|
||||
mod vpn_portal;
|
||||
|
||||
use common::config::{
|
||||
ConsoleLoggerConfig, FileLoggerConfig, NetworkIdentity, PeerConfig, VpnPortalConfig,
|
||||
use common::{
|
||||
config::{ConsoleLoggerConfig, FileLoggerConfig, NetworkIdentity, PeerConfig, VpnPortalConfig},
|
||||
constants::EASYTIER_VERSION,
|
||||
};
|
||||
use instance::instance::Instance;
|
||||
use tokio::net::TcpSocket;
|
||||
@@ -49,7 +50,7 @@ use mimalloc_rust::*;
|
||||
static GLOBAL_MIMALLOC: GlobalMiMalloc = GlobalMiMalloc;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "easytier-core", author, version, about, long_about = None)]
|
||||
#[command(name = "easytier-core", author, version = EASYTIER_VERSION , about, long_about = None)]
|
||||
struct Cli {
|
||||
#[arg(
|
||||
short,
|
||||
@@ -286,6 +287,12 @@ struct Cli {
|
||||
help = t!("core_clap.socks5").to_string()
|
||||
)]
|
||||
socks5: Option<u16>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
help = t!("core_clap.ipv6_listener").to_string()
|
||||
)]
|
||||
ipv6_listener: Option<String>,
|
||||
}
|
||||
|
||||
rust_i18n::i18n!("locales", fallback = "en");
|
||||
@@ -512,6 +519,12 @@ impl From<Cli> for TomlConfigLoader {
|
||||
}
|
||||
f.disable_p2p = cli.disable_p2p;
|
||||
f.relay_all_peer_rpc = cli.relay_all_peer_rpc;
|
||||
if let Some(ipv6_listener) = cli.ipv6_listener {
|
||||
f.ipv6_listener = ipv6_listener
|
||||
.parse()
|
||||
.with_context(|| format!("failed to parse ipv6 listener: {}", ipv6_listener))
|
||||
.unwrap();
|
||||
}
|
||||
cfg.set_flags(f);
|
||||
|
||||
cfg.set_exit_nodes(cli.exit_nodes.clone());
|
||||
|
@@ -358,7 +358,12 @@ impl IcmpProxy {
|
||||
if !self.cidr_set.contains_v4(ipv4.get_destination())
|
||||
&& !is_exit_node
|
||||
&& !(self.global_ctx.no_tun()
|
||||
&& Some(ipv4.get_destination()) == self.global_ctx.get_ipv4())
|
||||
&& Some(ipv4.get_destination())
|
||||
== self
|
||||
.global_ctx
|
||||
.get_ipv4()
|
||||
.as_ref()
|
||||
.map(cidr::Ipv4Inet::address))
|
||||
{
|
||||
return None;
|
||||
}
|
||||
@@ -382,7 +387,14 @@ impl IcmpProxy {
|
||||
return None;
|
||||
}
|
||||
|
||||
if self.global_ctx.no_tun() && Some(ipv4.get_destination()) == self.global_ctx.get_ipv4() {
|
||||
if self.global_ctx.no_tun()
|
||||
&& Some(ipv4.get_destination())
|
||||
== self
|
||||
.global_ctx
|
||||
.get_ipv4()
|
||||
.as_ref()
|
||||
.map(cidr::Ipv4Inet::address)
|
||||
{
|
||||
self.send_icmp_reply_to_peer(
|
||||
&ipv4.get_destination(),
|
||||
&ipv4.get_source(),
|
||||
|
@@ -111,7 +111,7 @@ struct Socks5Entry {
|
||||
type Socks5EntrySet = Arc<DashSet<Socks5Entry>>;
|
||||
|
||||
struct Socks5ServerNet {
|
||||
ipv4_addr: Ipv4Addr,
|
||||
ipv4_addr: cidr::Ipv4Inet,
|
||||
auth: Option<SimpleUserPassword>,
|
||||
|
||||
smoltcp_net: Arc<Net>,
|
||||
@@ -122,7 +122,7 @@ struct Socks5ServerNet {
|
||||
|
||||
impl Socks5ServerNet {
|
||||
pub fn new(
|
||||
ipv4_addr: Ipv4Addr,
|
||||
ipv4_addr: cidr::Ipv4Inet,
|
||||
auth: Option<SimpleUserPassword>,
|
||||
peer_manager: Arc<PeerManager>,
|
||||
packet_recv: Arc<Mutex<mpsc::Receiver<ZCPacket>>>,
|
||||
@@ -173,8 +173,10 @@ impl Socks5ServerNet {
|
||||
dev,
|
||||
NetConfig::new(
|
||||
interface_config,
|
||||
format!("{}/24", ipv4_addr).parse().unwrap(),
|
||||
vec![format!("{}", ipv4_addr).parse().unwrap()],
|
||||
format!("{}/{}", ipv4_addr.address(), ipv4_addr.network_length())
|
||||
.parse()
|
||||
.unwrap(),
|
||||
vec![format!("{}", ipv4_addr.address()).parse().unwrap()],
|
||||
),
|
||||
);
|
||||
|
||||
|
@@ -1,3 +1,4 @@
|
||||
use cidr::Ipv4Inet;
|
||||
use core::panic;
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
use dashmap::DashMap;
|
||||
@@ -526,7 +527,8 @@ impl TcpProxy {
|
||||
tracing::warn!("set_nodelay failed, ignore it: {:?}", e);
|
||||
}
|
||||
|
||||
let nat_dst = if Some(nat_entry.dst.ip()) == global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip))
|
||||
let nat_dst = if Some(nat_entry.dst.ip())
|
||||
== global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address()))
|
||||
{
|
||||
format!("127.0.0.1:{}", nat_entry.dst.port())
|
||||
.parse()
|
||||
@@ -591,7 +593,10 @@ impl TcpProxy {
|
||||
{
|
||||
Some(Ipv4Addr::new(192, 88, 99, 254))
|
||||
} else {
|
||||
self.global_ctx.get_ipv4()
|
||||
self.global_ctx
|
||||
.get_ipv4()
|
||||
.as_ref()
|
||||
.map(cidr::Ipv4Inet::address)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -621,7 +626,8 @@ impl TcpProxy {
|
||||
if !self.cidr_set.contains_v4(ipv4.get_destination())
|
||||
&& !is_exit_node
|
||||
&& !(self.global_ctx.no_tun()
|
||||
&& Some(ipv4.get_destination()) == self.global_ctx.get_ipv4())
|
||||
&& Some(ipv4.get_destination())
|
||||
== self.global_ctx.get_ipv4().as_ref().map(Ipv4Inet::address))
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
@@ -4,6 +4,7 @@ use std::{
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use cidr::Ipv4Inet;
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
use dashmap::DashMap;
|
||||
use pnet::packet::{
|
||||
@@ -182,7 +183,7 @@ impl UdpNatEntry {
|
||||
&mut buf,
|
||||
&src_v4,
|
||||
len,
|
||||
1256,
|
||||
1200,
|
||||
ip_id,
|
||||
)
|
||||
.await
|
||||
@@ -245,7 +246,8 @@ impl UdpProxy {
|
||||
if !self.cidr_set.contains_v4(ipv4.get_destination())
|
||||
&& !is_exit_node
|
||||
&& !(self.global_ctx.no_tun()
|
||||
&& Some(ipv4.get_destination()) == self.global_ctx.get_ipv4())
|
||||
&& Some(ipv4.get_destination())
|
||||
== self.global_ctx.get_ipv4().as_ref().map(Ipv4Inet::address))
|
||||
{
|
||||
return None;
|
||||
}
|
||||
@@ -296,14 +298,16 @@ impl UdpProxy {
|
||||
.replace(tokio::spawn(UdpNatEntry::forward_task(
|
||||
nat_entry.clone(),
|
||||
self.sender.clone(),
|
||||
self.global_ctx.get_ipv4()?,
|
||||
self.global_ctx.get_ipv4().map(|x| x.address())?,
|
||||
)));
|
||||
}
|
||||
|
||||
nat_entry.mark_active();
|
||||
|
||||
// TODO: should it be async.
|
||||
let dst_socket = if Some(ipv4.get_destination()) == self.global_ctx.get_ipv4() {
|
||||
let dst_socket = if Some(ipv4.get_destination())
|
||||
== self.global_ctx.get_ipv4().as_ref().map(Ipv4Inet::address)
|
||||
{
|
||||
format!("127.0.0.1:{}", udp_packet.get_destination())
|
||||
.parse()
|
||||
.unwrap()
|
||||
@@ -350,7 +354,7 @@ impl UdpProxy {
|
||||
peer_manager: Arc<PeerManager>,
|
||||
) -> Result<Arc<Self>, Error> {
|
||||
let cidr_set = CidrSet::new(global_ctx.clone());
|
||||
let (sender, receiver) = channel(64);
|
||||
let (sender, receiver) = channel(1024);
|
||||
let ret = Self {
|
||||
global_ctx,
|
||||
peer_manager,
|
||||
|
@@ -161,7 +161,7 @@ impl Instance {
|
||||
DirectConnectorManager::new(global_ctx.clone(), peer_manager.clone());
|
||||
direct_conn_manager.run();
|
||||
|
||||
let udp_hole_puncher = UdpHolePunchConnector::new(global_ctx.clone(), peer_manager.clone());
|
||||
let udp_hole_puncher = UdpHolePunchConnector::new(peer_manager.clone());
|
||||
|
||||
let peer_center = Arc::new(PeerCenterInstance::new(peer_manager.clone()));
|
||||
|
||||
@@ -270,19 +270,11 @@ impl Instance {
|
||||
|
||||
let mut used_ipv4 = HashSet::new();
|
||||
for route in routes {
|
||||
if route.ipv4_addr.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let Ok(peer_ipv4_addr) = route.ipv4_addr.parse::<Ipv4Addr>() else {
|
||||
let Some(peer_ipv4_addr) = route.ipv4_addr else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let Ok(peer_ipv4_addr) = Ipv4Inet::new(peer_ipv4_addr, 24) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
used_ipv4.insert(peer_ipv4_addr);
|
||||
used_ipv4.insert(peer_ipv4_addr.into());
|
||||
}
|
||||
|
||||
let dhcp_inet = used_ipv4.iter().next().unwrap_or(&default_ipv4_addr);
|
||||
@@ -304,7 +296,7 @@ impl Instance {
|
||||
continue;
|
||||
}
|
||||
|
||||
let last_ip = current_dhcp_ip.as_ref().map(Ipv4Inet::address);
|
||||
let last_ip = current_dhcp_ip.clone();
|
||||
tracing::debug!(
|
||||
?current_dhcp_ip,
|
||||
?candidate_ipv4_addr,
|
||||
@@ -316,11 +308,9 @@ impl Instance {
|
||||
if let Some(ip) = candidate_ipv4_addr {
|
||||
if global_ctx_c.no_tun() {
|
||||
current_dhcp_ip = Some(ip);
|
||||
global_ctx_c.set_ipv4(Some(ip.address()));
|
||||
global_ctx_c.issue_event(GlobalCtxEvent::DhcpIpv4Changed(
|
||||
last_ip,
|
||||
Some(ip.address()),
|
||||
));
|
||||
global_ctx_c.set_ipv4(Some(ip));
|
||||
global_ctx_c
|
||||
.issue_event(GlobalCtxEvent::DhcpIpv4Changed(last_ip, Some(ip)));
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -331,7 +321,7 @@ impl Instance {
|
||||
&peer_manager_c,
|
||||
_peer_packet_receiver.clone(),
|
||||
);
|
||||
if let Err(e) = new_nic_ctx.run(ip.address()).await {
|
||||
if let Err(e) = new_nic_ctx.run(ip).await {
|
||||
tracing::error!(
|
||||
?current_dhcp_ip,
|
||||
?candidate_ipv4_addr,
|
||||
@@ -345,9 +335,8 @@ impl Instance {
|
||||
}
|
||||
|
||||
current_dhcp_ip = Some(ip);
|
||||
global_ctx_c.set_ipv4(Some(ip.address()));
|
||||
global_ctx_c
|
||||
.issue_event(GlobalCtxEvent::DhcpIpv4Changed(last_ip, Some(ip.address())));
|
||||
global_ctx_c.set_ipv4(Some(ip));
|
||||
global_ctx_c.issue_event(GlobalCtxEvent::DhcpIpv4Changed(last_ip, Some(ip)));
|
||||
} else {
|
||||
current_dhcp_ip = None;
|
||||
global_ctx_c.set_ipv4(None);
|
||||
|
@@ -111,9 +111,10 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
|
||||
}
|
||||
|
||||
if self.global_ctx.config.get_flags().enable_ipv6 {
|
||||
let ipv6_listener = self.global_ctx.config.get_flags().ipv6_listener.clone();
|
||||
let _ = self
|
||||
.add_listener(
|
||||
UdpTunnelListener::new("udp://[::]:0".parse().unwrap()),
|
||||
UdpTunnelListener::new(ipv6_listener.parse().unwrap()),
|
||||
false,
|
||||
)
|
||||
.await?;
|
||||
|
@@ -504,8 +504,7 @@ pub fn reg_change_catrgory_in_profile(dev_name: &str) -> io::Result<()> {
|
||||
let subkey = profiles_key.open_subkey_with_flags(&subkey_name, KEY_ALL_ACCESS)?;
|
||||
match subkey.get_value::<String, _>("ProfileName") {
|
||||
Ok(profile_name) => {
|
||||
if !dev_name.is_empty() && dev_name == profile_name
|
||||
{
|
||||
if !dev_name.is_empty() && dev_name == profile_name {
|
||||
match subkey.set_value("Category", &1u32) {
|
||||
Ok(_) => tracing::trace!("Successfully set Category in registry"),
|
||||
Err(e) => tracing::error!("Failed to set Category in registry: {}", e),
|
||||
@@ -548,14 +547,16 @@ impl NicCtx {
|
||||
}
|
||||
}
|
||||
|
||||
async fn assign_ipv4_to_tun_device(&self, ipv4_addr: Ipv4Addr) -> Result<(), Error> {
|
||||
async fn assign_ipv4_to_tun_device(&self, ipv4_addr: cidr::Ipv4Inet) -> Result<(), Error> {
|
||||
let nic = self.nic.lock().await;
|
||||
nic.link_up().await?;
|
||||
nic.remove_ip(None).await?;
|
||||
nic.add_ip(ipv4_addr, 24).await?;
|
||||
nic.add_ip(ipv4_addr.address(), ipv4_addr.network_length() as i32)
|
||||
.await?;
|
||||
#[cfg(any(target_os = "macos", target_os = "freebsd"))]
|
||||
{
|
||||
nic.add_route(ipv4_addr, 24).await?;
|
||||
nic.add_route(ipv4_addr.first_address(), ipv4_addr.network_length())
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -710,18 +711,17 @@ impl NicCtx {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run(&mut self, ipv4_addr: Ipv4Addr) -> Result<(), Error> {
|
||||
pub async fn run(&mut self, ipv4_addr: cidr::Ipv4Inet) -> Result<(), Error> {
|
||||
let tunnel = {
|
||||
let mut nic = self.nic.lock().await;
|
||||
match nic.create_dev().await {
|
||||
Ok(ret) => {
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
let dev_name = self.global_ctx.get_flags().dev_name;
|
||||
let _ = reg_change_catrgory_in_profile(&dev_name);
|
||||
}
|
||||
|
||||
|
||||
self.global_ctx
|
||||
.issue_event(GlobalCtxEvent::TunDeviceReady(nic.ifname().to_string()));
|
||||
ret
|
||||
|
@@ -230,7 +230,7 @@ impl PeerCenterInstance {
|
||||
|
||||
let ret = client
|
||||
.get_global_peer_map(
|
||||
BaseController {},
|
||||
BaseController::default(),
|
||||
GetGlobalPeerMapRequest {
|
||||
digest: ctx.job_ctx.global_peer_map_digest.load(),
|
||||
},
|
||||
@@ -307,7 +307,7 @@ impl PeerCenterInstance {
|
||||
|
||||
let ret = client
|
||||
.report_peers(
|
||||
BaseController {},
|
||||
BaseController::default(),
|
||||
ReportPeersRequest {
|
||||
my_peer_id: my_node_id,
|
||||
peer_infos: Some(peers),
|
||||
|
@@ -15,6 +15,8 @@ pub mod foreign_network_manager;
|
||||
|
||||
pub mod encrypt;
|
||||
|
||||
pub mod peer_task;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests;
|
||||
|
||||
|
@@ -93,7 +93,7 @@ impl PeerConn {
|
||||
let peer_conn_tunnel_filter = StatsRecorderTunnelFilter::new();
|
||||
let throughput = peer_conn_tunnel_filter.filter_output();
|
||||
let peer_conn_tunnel = TunnelWithFilter::new(tunnel, peer_conn_tunnel_filter);
|
||||
let mut mpsc_tunnel = MpscTunnel::new(peer_conn_tunnel);
|
||||
let mut mpsc_tunnel = MpscTunnel::new(peer_conn_tunnel, Some(Duration::from_secs(7)));
|
||||
|
||||
let (recv, sink) = (mpsc_tunnel.get_stream(), mpsc_tunnel.get_sink());
|
||||
|
||||
@@ -224,7 +224,12 @@ impl PeerConn {
|
||||
self.info = Some(rsp);
|
||||
self.is_client = Some(false);
|
||||
self.send_handshake().await?;
|
||||
Ok(())
|
||||
|
||||
if self.get_peer_id() == self.my_peer_id {
|
||||
Err(Error::WaitRespError("peer id conflict".to_owned()))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument]
|
||||
@@ -235,7 +240,12 @@ impl PeerConn {
|
||||
tracing::info!("handshake response: {:?}", rsp);
|
||||
self.info = Some(rsp);
|
||||
self.is_client = Some(true);
|
||||
Ok(())
|
||||
|
||||
if self.get_peer_id() == self.my_peer_id {
|
||||
Err(Error::WaitRespError("peer id conflict".to_owned()))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handshake_done(&self) -> bool {
|
||||
@@ -282,9 +292,7 @@ impl PeerConn {
|
||||
tracing::error!(?e, "peer conn send ctrl resp error");
|
||||
}
|
||||
} else {
|
||||
if zc_packet.is_lossy() {
|
||||
let _ = sender.try_send(zc_packet);
|
||||
} else if sender.send(zc_packet).await.is_err() {
|
||||
if sender.send(zc_packet).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -398,6 +406,24 @@ mod tests {
|
||||
use crate::tunnel::filter::PacketRecorderTunnelFilter;
|
||||
use crate::tunnel::ring::create_ring_tunnel_pair;
|
||||
|
||||
#[tokio::test]
|
||||
async fn peer_conn_handshake_same_id() {
|
||||
let (c, s) = create_ring_tunnel_pair();
|
||||
let c_peer_id = new_peer_id();
|
||||
let s_peer_id = c_peer_id;
|
||||
|
||||
let mut c_peer = PeerConn::new(c_peer_id, get_mock_global_ctx(), Box::new(c));
|
||||
let mut s_peer = PeerConn::new(s_peer_id, get_mock_global_ctx(), Box::new(s));
|
||||
|
||||
let (c_ret, s_ret) = tokio::join!(
|
||||
c_peer.do_handshake_as_client(),
|
||||
s_peer.do_handshake_as_server()
|
||||
);
|
||||
|
||||
assert!(c_ret.is_err());
|
||||
assert!(s_ret.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn peer_conn_handshake() {
|
||||
let (c, s) = create_ring_tunnel_pair();
|
||||
|
@@ -294,6 +294,8 @@ impl PeerConnPinger {
|
||||
let need_close = if last_rx_packets != current_rx_packets {
|
||||
// if we receive some packet from peers, we should relax the condition
|
||||
counter > 50 && loss_rate_1 > 0.5
|
||||
|
||||
// TODO: wait more time to see if the loss rate is still high after no rx
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
@@ -185,7 +185,7 @@ impl PeerManager {
|
||||
) -> Self {
|
||||
let my_peer_id = rand::random();
|
||||
|
||||
let (packet_send, packet_recv) = mpsc::channel(100);
|
||||
let (packet_send, packet_recv) = mpsc::channel(128);
|
||||
let peers = Arc::new(PeerMap::new(
|
||||
packet_send.clone(),
|
||||
global_ctx.clone(),
|
||||
@@ -718,8 +718,16 @@ impl PeerManager {
|
||||
|
||||
let mut is_exit_node = false;
|
||||
let mut dst_peers = vec![];
|
||||
// NOTE: currently we only support ipv4 and cidr is 24
|
||||
if ipv4_addr.is_broadcast() || ipv4_addr.is_multicast() || ipv4_addr.octets()[3] == 255 {
|
||||
let network_length = self
|
||||
.global_ctx
|
||||
.get_ipv4()
|
||||
.map(|x| x.network_length())
|
||||
.unwrap_or(24);
|
||||
let ipv4_inet = cidr::Ipv4Inet::new(ipv4_addr, network_length).unwrap();
|
||||
if ipv4_addr.is_broadcast()
|
||||
|| ipv4_addr.is_multicast()
|
||||
|| ipv4_addr == ipv4_inet.last_address()
|
||||
{
|
||||
dst_peers.extend(
|
||||
self.peers
|
||||
.list_routes()
|
||||
@@ -1058,7 +1066,7 @@ mod tests {
|
||||
|
||||
let ret = stub
|
||||
.say_hello(
|
||||
RpcController {},
|
||||
RpcController::default(),
|
||||
SayHelloRequest {
|
||||
name: "abc".to_string(),
|
||||
},
|
||||
|
@@ -6,7 +6,7 @@ use std::{
|
||||
atomic::{AtomicBool, AtomicU32, Ordering},
|
||||
Arc, Weak,
|
||||
},
|
||||
time::{Duration, SystemTime},
|
||||
time::{Duration, Instant, SystemTime},
|
||||
};
|
||||
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
@@ -30,7 +30,7 @@ use crate::{
|
||||
},
|
||||
peers::route_trait::{Route, RouteInterfaceBox},
|
||||
proto::{
|
||||
common::{NatType, StunInfo},
|
||||
common::{Ipv4Inet, NatType, StunInfo},
|
||||
peer_rpc::{
|
||||
route_foreign_network_infos, ForeignNetworkRouteInfoEntry, ForeignNetworkRouteInfoKey,
|
||||
OspfRouteRpc, OspfRouteRpcClientFactory, OspfRouteRpcServer, PeerIdVersion,
|
||||
@@ -117,15 +117,22 @@ impl RoutePeerInfo {
|
||||
version: 0,
|
||||
easytier_version: EASYTIER_VERSION.to_string(),
|
||||
feature_flag: None,
|
||||
peer_route_id: 0,
|
||||
network_length: 24,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_self(&self, my_peer_id: PeerId, global_ctx: &ArcGlobalCtx) -> Self {
|
||||
pub fn update_self(
|
||||
&self,
|
||||
my_peer_id: PeerId,
|
||||
peer_route_id: u64,
|
||||
global_ctx: &ArcGlobalCtx,
|
||||
) -> Self {
|
||||
let mut new = Self {
|
||||
peer_id: my_peer_id,
|
||||
inst_id: Some(global_ctx.get_id().into()),
|
||||
cost: 0,
|
||||
ipv4_addr: global_ctx.get_ipv4().map(|x| x.into()),
|
||||
ipv4_addr: global_ctx.get_ipv4().map(|x| x.address().into()),
|
||||
proxy_cidrs: global_ctx
|
||||
.get_proxy_cidrs()
|
||||
.iter()
|
||||
@@ -143,6 +150,11 @@ impl RoutePeerInfo {
|
||||
|
||||
easytier_version: EASYTIER_VERSION.to_string(),
|
||||
feature_flag: Some(global_ctx.get_feature_flags()),
|
||||
peer_route_id,
|
||||
network_length: global_ctx
|
||||
.get_ipv4()
|
||||
.map(|x| x.network_length() as u32)
|
||||
.unwrap_or(24),
|
||||
};
|
||||
|
||||
let need_update_periodically = if let Ok(Ok(d)) =
|
||||
@@ -164,12 +176,21 @@ impl RoutePeerInfo {
|
||||
|
||||
impl Into<crate::proto::cli::Route> for RoutePeerInfo {
|
||||
fn into(self) -> crate::proto::cli::Route {
|
||||
let network_length = if self.network_length == 0 {
|
||||
24
|
||||
} else {
|
||||
self.network_length
|
||||
};
|
||||
|
||||
crate::proto::cli::Route {
|
||||
peer_id: self.peer_id,
|
||||
ipv4_addr: if let Some(ipv4_addr) = self.ipv4_addr {
|
||||
ipv4_addr.to_string()
|
||||
Some(Ipv4Inet {
|
||||
address: Some(ipv4_addr.into()),
|
||||
network_length,
|
||||
})
|
||||
} else {
|
||||
"".to_string()
|
||||
None
|
||||
},
|
||||
next_hop_peer_id: 0,
|
||||
cost: self.cost as i32,
|
||||
@@ -296,38 +317,57 @@ impl SyncedRouteInfo {
|
||||
fn check_duplicate_peer_id(
|
||||
&self,
|
||||
my_peer_id: PeerId,
|
||||
my_peer_route_id: u64,
|
||||
dst_peer_id: PeerId,
|
||||
route_infos: &Vec<RoutePeerInfo>,
|
||||
dst_peer_route_id: Option<u64>,
|
||||
info: &RoutePeerInfo,
|
||||
) -> Result<(), Error> {
|
||||
// 1. check if we are duplicated.
|
||||
for info in route_infos.iter() {
|
||||
if info.peer_id == my_peer_id {
|
||||
if info.version > self.get_peer_info_version_with_default(info.peer_id) {
|
||||
// if dst peer send to us with higher version info of my peer, our peer id is duplicated
|
||||
// TODO: handle this better. restart peer manager?
|
||||
panic!("my peer id is duplicated");
|
||||
// return Err(Error::DuplicatePeerId);
|
||||
}
|
||||
if info.peer_id == my_peer_id {
|
||||
if info.peer_route_id != my_peer_route_id
|
||||
&& info.version > self.get_peer_info_version_with_default(info.peer_id)
|
||||
{
|
||||
// if dst peer send to us with higher version info of my peer, our peer id is duplicated
|
||||
// TODO: handle this better. restart peer manager?
|
||||
panic!("my peer id is duplicated");
|
||||
// return Err(Error::DuplicatePeerId);
|
||||
}
|
||||
} else if info.peer_id == dst_peer_id {
|
||||
let Some(dst_peer_route_id) = dst_peer_route_id else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if info.peer_id == dst_peer_id {
|
||||
if info.version < self.get_peer_info_version_with_default(info.peer_id) {
|
||||
// if dst peer send to us with lower version info of dst peer, dst peer id is duplicated
|
||||
return Err(Error::DuplicatePeerId);
|
||||
}
|
||||
if dst_peer_route_id != info.peer_route_id
|
||||
&& info.version < self.get_peer_info_version_with_default(info.peer_id)
|
||||
{
|
||||
// if dst peer send to us with lower version info of dst peer, dst peer id is duplicated
|
||||
return Err(Error::DuplicatePeerId);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_peer_infos(
|
||||
&self,
|
||||
my_peer_id: PeerId,
|
||||
my_peer_route_id: u64,
|
||||
dst_peer_id: PeerId,
|
||||
peer_infos: &Vec<RoutePeerInfo>,
|
||||
) -> Result<(), Error> {
|
||||
self.check_duplicate_peer_id(my_peer_id, dst_peer_id, peer_infos)?;
|
||||
for mut route_info in peer_infos.iter().map(Clone::clone) {
|
||||
self.check_duplicate_peer_id(
|
||||
my_peer_id,
|
||||
my_peer_route_id,
|
||||
dst_peer_id,
|
||||
if route_info.peer_id == dst_peer_id {
|
||||
self.peer_infos.get(&dst_peer_id).map(|x| x.peer_route_id)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
&route_info,
|
||||
)?;
|
||||
|
||||
// time between peers may not be synchronized, so update last_update to local now.
|
||||
// note only last_update with larger version will be updated to local saved peer info.
|
||||
route_info.last_update = Some(SystemTime::now().into());
|
||||
@@ -391,12 +431,17 @@ impl SyncedRouteInfo {
|
||||
}
|
||||
}
|
||||
|
||||
fn update_my_peer_info(&self, my_peer_id: PeerId, global_ctx: &ArcGlobalCtx) -> bool {
|
||||
fn update_my_peer_info(
|
||||
&self,
|
||||
my_peer_id: PeerId,
|
||||
my_peer_route_id: u64,
|
||||
global_ctx: &ArcGlobalCtx,
|
||||
) -> bool {
|
||||
let mut old = self
|
||||
.peer_infos
|
||||
.entry(my_peer_id)
|
||||
.or_insert(RoutePeerInfo::new());
|
||||
let new = old.update_self(my_peer_id, &global_ctx);
|
||||
let new = old.update_self(my_peer_id, my_peer_route_id, &global_ctx);
|
||||
let new_version = new.version;
|
||||
let old_version = old.version;
|
||||
*old = new;
|
||||
@@ -525,7 +570,7 @@ impl RouteTable {
|
||||
fn get_nat_type(&self, peer_id: PeerId) -> Option<NatType> {
|
||||
self.peer_infos
|
||||
.get(&peer_id)
|
||||
.map(|x| NatType::try_from(x.udp_stun_info as i32).unwrap())
|
||||
.map(|x| NatType::try_from(x.udp_stun_info as i32).unwrap_or_default())
|
||||
}
|
||||
|
||||
fn build_peer_graph_from_synced_info<T: RouteCostCalculatorInterface>(
|
||||
@@ -885,6 +930,7 @@ impl Drop for SyncRouteSession {
|
||||
|
||||
struct PeerRouteServiceImpl {
|
||||
my_peer_id: PeerId,
|
||||
my_peer_route_id: u64,
|
||||
global_ctx: ArcGlobalCtx,
|
||||
sessions: DashMap<PeerId, Arc<SyncRouteSession>>,
|
||||
|
||||
@@ -904,6 +950,7 @@ impl Debug for PeerRouteServiceImpl {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("PeerRouteServiceImpl")
|
||||
.field("my_peer_id", &self.my_peer_id)
|
||||
.field("my_peer_route_id", &self.my_peer_route_id)
|
||||
.field("network", &self.global_ctx.get_network_identity())
|
||||
.field("sessions", &self.sessions)
|
||||
.field("route_table", &self.route_table)
|
||||
@@ -922,6 +969,7 @@ impl PeerRouteServiceImpl {
|
||||
fn new(my_peer_id: PeerId, global_ctx: ArcGlobalCtx) -> Self {
|
||||
PeerRouteServiceImpl {
|
||||
my_peer_id,
|
||||
my_peer_route_id: rand::random(),
|
||||
global_ctx,
|
||||
sessions: DashMap::new(),
|
||||
|
||||
@@ -977,10 +1025,11 @@ impl PeerRouteServiceImpl {
|
||||
}
|
||||
|
||||
fn update_my_peer_info(&self) -> bool {
|
||||
if self
|
||||
.synced_route_info
|
||||
.update_my_peer_info(self.my_peer_id, &self.global_ctx)
|
||||
{
|
||||
if self.synced_route_info.update_my_peer_info(
|
||||
self.my_peer_id,
|
||||
self.my_peer_route_id,
|
||||
&self.global_ctx,
|
||||
) {
|
||||
self.update_route_table_and_cached_local_conn_bitmap();
|
||||
return true;
|
||||
}
|
||||
@@ -1272,6 +1321,7 @@ impl PeerRouteServiceImpl {
|
||||
&self,
|
||||
dst_peer_id: PeerId,
|
||||
peer_rpc: Arc<PeerRpcManager>,
|
||||
sync_as_initiator: bool,
|
||||
) -> bool {
|
||||
let Some(session) = self.get_session(dst_peer_id) else {
|
||||
// if session not exist, exit the sync loop.
|
||||
@@ -1288,6 +1338,7 @@ impl PeerRouteServiceImpl {
|
||||
&& conn_bitmap.is_none()
|
||||
&& foreign_network.is_none()
|
||||
&& !session.need_sync_initiator_info.load(Ordering::Relaxed)
|
||||
&& !(sync_as_initiator && session.we_are_initiator.load(Ordering::Relaxed))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@@ -1304,7 +1355,7 @@ impl PeerRouteServiceImpl {
|
||||
self.global_ctx.get_network_name(),
|
||||
);
|
||||
|
||||
let mut ctrl = BaseController {};
|
||||
let mut ctrl = BaseController::default();
|
||||
ctrl.set_timeout_ms(3000);
|
||||
let ret = rpc_stub
|
||||
.sync_route_info(
|
||||
@@ -1336,7 +1387,9 @@ impl PeerRouteServiceImpl {
|
||||
if resp.error.is_some() {
|
||||
let err = resp.error.unwrap();
|
||||
if err == Error::DuplicatePeerId as i32 {
|
||||
panic!("duplicate peer id");
|
||||
if !self.global_ctx.get_feature_flags().is_public_server {
|
||||
panic!("duplicate peer id");
|
||||
}
|
||||
} else {
|
||||
tracing::error!(?ret, ?my_peer_id, ?dst_peer_id, "sync_route_info failed");
|
||||
session
|
||||
@@ -1444,6 +1497,7 @@ impl RouteSessionManager {
|
||||
dst_peer_id: PeerId,
|
||||
mut sync_now: tokio::sync::broadcast::Receiver<()>,
|
||||
) {
|
||||
let mut last_sync = Instant::now();
|
||||
loop {
|
||||
let mut first_time = true;
|
||||
|
||||
@@ -1461,8 +1515,16 @@ impl RouteSessionManager {
|
||||
service_impl.update_my_infos().await;
|
||||
}
|
||||
|
||||
// if we are initiator, we should ensure the dst has the session.
|
||||
let sync_as_initiator = if last_sync.elapsed().as_secs() > 10 {
|
||||
last_sync = Instant::now();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if service_impl
|
||||
.sync_route_with_peer(dst_peer_id, peer_rpc.clone())
|
||||
.sync_route_with_peer(dst_peer_id, peer_rpc.clone(), sync_as_initiator)
|
||||
.await
|
||||
{
|
||||
break;
|
||||
@@ -1677,6 +1739,7 @@ impl RouteSessionManager {
|
||||
if let Some(peer_infos) = &peer_infos {
|
||||
service_impl.synced_route_info.update_peer_infos(
|
||||
my_peer_id,
|
||||
service_impl.my_peer_route_id,
|
||||
from_peer_id,
|
||||
peer_infos,
|
||||
)?;
|
||||
|
@@ -224,7 +224,10 @@ pub mod tests {
|
||||
|
||||
let msg = random_string(8192);
|
||||
let ret = stub
|
||||
.say_hello(RpcController {}, SayHelloRequest { name: msg.clone() })
|
||||
.say_hello(
|
||||
RpcController::default(),
|
||||
SayHelloRequest { name: msg.clone() },
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -233,7 +236,10 @@ pub mod tests {
|
||||
|
||||
let msg = random_string(10);
|
||||
let ret = stub
|
||||
.say_hello(RpcController {}, SayHelloRequest { name: msg.clone() })
|
||||
.say_hello(
|
||||
RpcController::default(),
|
||||
SayHelloRequest { name: msg.clone() },
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -281,7 +287,10 @@ pub mod tests {
|
||||
);
|
||||
|
||||
let ret = stub
|
||||
.say_hello(RpcController {}, SayHelloRequest { name: msg.clone() })
|
||||
.say_hello(
|
||||
RpcController::default(),
|
||||
SayHelloRequest { name: msg.clone() },
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(ret.greeting, format!("Hello {}!", msg));
|
||||
@@ -289,14 +298,20 @@ pub mod tests {
|
||||
// call again
|
||||
let msg = random_string(16 * 1024);
|
||||
let ret = stub
|
||||
.say_hello(RpcController {}, SayHelloRequest { name: msg.clone() })
|
||||
.say_hello(
|
||||
RpcController::default(),
|
||||
SayHelloRequest { name: msg.clone() },
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(ret.greeting, format!("Hello {}!", msg));
|
||||
|
||||
let msg = random_string(16 * 1024);
|
||||
let ret = stub
|
||||
.say_hello(RpcController {}, SayHelloRequest { name: msg.clone() })
|
||||
.say_hello(
|
||||
RpcController::default(),
|
||||
SayHelloRequest { name: msg.clone() },
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(ret.greeting, format!("Hello {}!", msg));
|
||||
@@ -340,13 +355,19 @@ pub mod tests {
|
||||
|
||||
let msg = random_string(16 * 1024);
|
||||
let ret = stub1
|
||||
.say_hello(RpcController {}, SayHelloRequest { name: msg.clone() })
|
||||
.say_hello(
|
||||
RpcController::default(),
|
||||
SayHelloRequest { name: msg.clone() },
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(ret.greeting, format!("Hello {}!", msg));
|
||||
|
||||
let ret = stub2
|
||||
.say_hello(RpcController {}, SayHelloRequest { name: msg.clone() })
|
||||
.say_hello(
|
||||
RpcController::default(),
|
||||
SayHelloRequest { name: msg.clone() },
|
||||
)
|
||||
.await;
|
||||
assert!(ret.is_err() && ret.unwrap_err().to_string().contains("Timeout"));
|
||||
}
|
||||
|
138
easytier/src/peers/peer_task.rs
Normal file
138
easytier/src/peers/peer_task.rs
Normal file
@@ -0,0 +1,138 @@
|
||||
use std::result::Result;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use dashmap::DashMap;
|
||||
use tokio::select;
|
||||
use tokio::sync::Notify;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
use crate::common::scoped_task::ScopedTask;
|
||||
use anyhow::Error;
|
||||
|
||||
use super::peer_manager::PeerManager;
|
||||
|
||||
#[async_trait]
|
||||
pub trait PeerTaskLauncher: Send + Sync + Clone + 'static {
|
||||
type Data;
|
||||
type CollectPeerItem;
|
||||
type TaskRet;
|
||||
|
||||
fn new_data(&self, peer_mgr: Arc<PeerManager>) -> Self::Data;
|
||||
async fn collect_peers_need_task(&self, data: &Self::Data) -> Vec<Self::CollectPeerItem>;
|
||||
async fn launch_task(
|
||||
&self,
|
||||
data: &Self::Data,
|
||||
item: Self::CollectPeerItem,
|
||||
) -> JoinHandle<Result<Self::TaskRet, Error>>;
|
||||
|
||||
async fn all_task_done(&self, _data: &Self::Data) {}
|
||||
|
||||
fn loop_interval_ms(&self) -> u64 {
|
||||
5000
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PeerTaskManager<Launcher: PeerTaskLauncher> {
|
||||
launcher: Launcher,
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
main_loop_task: Mutex<Option<ScopedTask<()>>>,
|
||||
run_signal: Arc<Notify>,
|
||||
data: Launcher::Data,
|
||||
}
|
||||
|
||||
impl<D, C, T, L> PeerTaskManager<L>
|
||||
where
|
||||
D: Send + Sync + Clone + 'static,
|
||||
C: std::fmt::Debug + Send + Sync + Clone + core::hash::Hash + Eq + 'static,
|
||||
T: Send + 'static,
|
||||
L: PeerTaskLauncher<Data = D, CollectPeerItem = C, TaskRet = T> + 'static,
|
||||
{
|
||||
pub fn new(launcher: L, peer_mgr: Arc<PeerManager>) -> Self {
|
||||
let data = launcher.new_data(peer_mgr.clone());
|
||||
Self {
|
||||
launcher,
|
||||
peer_mgr,
|
||||
main_loop_task: Mutex::new(None),
|
||||
run_signal: Arc::new(Notify::new()),
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start(&self) {
|
||||
let task = tokio::spawn(Self::main_loop(
|
||||
self.launcher.clone(),
|
||||
self.data.clone(),
|
||||
self.run_signal.clone(),
|
||||
))
|
||||
.into();
|
||||
self.main_loop_task.lock().unwrap().replace(task);
|
||||
}
|
||||
|
||||
async fn main_loop(launcher: L, data: D, signal: Arc<Notify>) {
|
||||
let peer_task_map = Arc::new(DashMap::<C, ScopedTask<Result<T, Error>>>::new());
|
||||
|
||||
loop {
|
||||
let peers_to_connect = launcher.collect_peers_need_task(&data).await;
|
||||
|
||||
// remove task not in peers_to_connect
|
||||
let mut to_remove = vec![];
|
||||
for item in peer_task_map.iter() {
|
||||
if !peers_to_connect.contains(item.key()) || item.value().is_finished() {
|
||||
to_remove.push(item.key().clone());
|
||||
}
|
||||
}
|
||||
|
||||
tracing::debug!(
|
||||
?peers_to_connect,
|
||||
?to_remove,
|
||||
"got peers to connect and remove"
|
||||
);
|
||||
|
||||
for key in to_remove {
|
||||
if let Some((_, task)) = peer_task_map.remove(&key) {
|
||||
task.abort();
|
||||
match task.await {
|
||||
Ok(Ok(_)) => {}
|
||||
Ok(Err(task_ret)) => {
|
||||
tracing::error!(?task_ret, "hole punching task failed");
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(?e, "hole punching task aborted");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !peers_to_connect.is_empty() {
|
||||
for item in peers_to_connect {
|
||||
if peer_task_map.contains_key(&item) {
|
||||
continue;
|
||||
}
|
||||
|
||||
tracing::debug!(?item, "launch hole punching task");
|
||||
peer_task_map
|
||||
.insert(item.clone(), launcher.launch_task(&data, item).await.into());
|
||||
}
|
||||
} else if peer_task_map.is_empty() {
|
||||
tracing::debug!("all task done");
|
||||
launcher.all_task_done(&data).await;
|
||||
}
|
||||
|
||||
select! {
|
||||
_ = tokio::time::sleep(std::time::Duration::from_millis(
|
||||
launcher.loop_interval_ms(),
|
||||
)) => {},
|
||||
_ = signal.notified() => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_immediately(&self) {
|
||||
self.run_signal.notify_one();
|
||||
}
|
||||
|
||||
pub fn data(&self) -> D {
|
||||
self.data.clone()
|
||||
}
|
||||
}
|
@@ -45,7 +45,7 @@ message ListPeerResponse {
|
||||
|
||||
message Route {
|
||||
uint32 peer_id = 1;
|
||||
string ipv4_addr = 2;
|
||||
common.Ipv4Inet ipv4_addr = 2;
|
||||
uint32 next_hop_peer_id = 3;
|
||||
int32 cost = 4;
|
||||
repeated string proxy_cidrs = 5;
|
||||
|
@@ -42,6 +42,8 @@ message RpcPacket {
|
||||
int32 trace_id = 9;
|
||||
}
|
||||
|
||||
message Void {}
|
||||
|
||||
message UUID {
|
||||
uint64 high = 1;
|
||||
uint64 low = 2;
|
||||
@@ -57,6 +59,8 @@ enum NatType {
|
||||
PortRestricted = 5;
|
||||
Symmetric = 6;
|
||||
SymUdpFirewall = 7;
|
||||
SymmetricEasyInc = 8;
|
||||
SymmetricEasyDec = 9;
|
||||
}
|
||||
|
||||
message Ipv4Addr { uint32 addr = 1; }
|
||||
@@ -68,6 +72,11 @@ message Ipv6Addr {
|
||||
uint32 part4 = 4;
|
||||
}
|
||||
|
||||
message Ipv4Inet {
|
||||
Ipv4Addr address = 1;
|
||||
uint32 network_length = 2;
|
||||
}
|
||||
|
||||
message Url { string url = 1; }
|
||||
|
||||
message SocketAddr {
|
||||
|
@@ -1,5 +1,7 @@
|
||||
use std::{fmt::Display, str::FromStr};
|
||||
|
||||
use anyhow::Context;
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/common.rs"));
|
||||
|
||||
impl From<uuid::Uuid> for Uuid {
|
||||
@@ -60,10 +62,8 @@ impl From<Ipv6Addr> for std::net::Ipv6Addr {
|
||||
let part3 = value.part3.to_be_bytes();
|
||||
let part4 = value.part4.to_be_bytes();
|
||||
std::net::Ipv6Addr::from([
|
||||
part1[0], part1[1], part1[2], part1[3],
|
||||
part2[0], part2[1], part2[2], part2[3],
|
||||
part3[0], part3[1], part3[2], part3[3],
|
||||
part4[0], part4[1], part4[2], part4[3]
|
||||
part1[0], part1[1], part1[2], part1[3], part2[0], part2[1], part2[2], part2[3],
|
||||
part3[0], part3[1], part3[2], part3[3], part4[0], part4[1], part4[2], part4[3],
|
||||
])
|
||||
}
|
||||
}
|
||||
@@ -74,6 +74,37 @@ impl ToString for Ipv6Addr {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<cidr::Ipv4Inet> for Ipv4Inet {
|
||||
fn from(value: cidr::Ipv4Inet) -> Self {
|
||||
Ipv4Inet {
|
||||
address: Some(value.address().into()),
|
||||
network_length: value.network_length() as u32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Ipv4Inet> for cidr::Ipv4Inet {
|
||||
fn from(value: Ipv4Inet) -> Self {
|
||||
cidr::Ipv4Inet::new(value.address.unwrap().into(), value.network_length as u8).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Ipv4Inet {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", cidr::Ipv4Inet::from(self.clone()))
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Ipv4Inet {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(Ipv4Inet::from(
|
||||
cidr::Ipv4Inet::from_str(s).with_context(|| "Failed to parse Ipv4Inet")?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<url::Url> for Url {
|
||||
fn from(value: url::Url) -> Self {
|
||||
Url {
|
||||
|
@@ -19,6 +19,9 @@ message RoutePeerInfo {
|
||||
|
||||
string easytier_version = 10;
|
||||
common.PeerFeatureFlag feature_flag = 11;
|
||||
uint64 peer_route_id = 12;
|
||||
|
||||
uint32 network_length = 13;
|
||||
}
|
||||
|
||||
message PeerIdVersion {
|
||||
@@ -92,27 +95,78 @@ service DirectConnectorRpc {
|
||||
rpc GetIpList(GetIpListRequest) returns (GetIpListResponse);
|
||||
}
|
||||
|
||||
message TryPunchHoleRequest { common.SocketAddr local_mapped_addr = 1; }
|
||||
|
||||
message TryPunchHoleResponse { common.SocketAddr remote_mapped_addr = 1; }
|
||||
|
||||
message TryPunchSymmetricRequest {
|
||||
common.SocketAddr listener_addr = 1;
|
||||
uint32 port = 2;
|
||||
repeated common.Ipv4Addr public_ips = 3;
|
||||
uint32 min_port = 4;
|
||||
uint32 max_port = 5;
|
||||
uint32 transaction_id = 6;
|
||||
uint32 round = 7;
|
||||
uint32 last_port_index = 8;
|
||||
message SelectPunchListenerRequest {
|
||||
bool force_new = 1;
|
||||
}
|
||||
|
||||
message TryPunchSymmetricResponse { uint32 last_port_index = 1; }
|
||||
message SelectPunchListenerResponse {
|
||||
common.SocketAddr listener_mapped_addr = 1;
|
||||
}
|
||||
|
||||
message SendPunchPacketConeRequest {
|
||||
common.SocketAddr listener_mapped_addr = 1;
|
||||
common.SocketAddr dest_addr = 2;
|
||||
uint32 transaction_id = 3;
|
||||
// send this many packets in a batch
|
||||
uint32 packet_count_per_batch = 4;
|
||||
// send total this batch count, total packet count = packet_batch_size * packet_batch_count
|
||||
uint32 packet_batch_count = 5;
|
||||
// interval between each batch
|
||||
uint32 packet_interval_ms = 6;
|
||||
}
|
||||
|
||||
message SendPunchPacketHardSymRequest {
|
||||
common.SocketAddr listener_mapped_addr = 1;
|
||||
|
||||
repeated common.Ipv4Addr public_ips = 2;
|
||||
uint32 transaction_id = 3;
|
||||
uint32 port_index = 4;
|
||||
uint32 round = 5;
|
||||
}
|
||||
|
||||
message SendPunchPacketHardSymResponse { uint32 next_port_index = 1; }
|
||||
|
||||
message SendPunchPacketEasySymRequest {
|
||||
common.SocketAddr listener_mapped_addr = 1;
|
||||
repeated common.Ipv4Addr public_ips = 2;
|
||||
uint32 transaction_id = 3;
|
||||
|
||||
uint32 base_port_num = 4;
|
||||
uint32 max_port_num = 5;
|
||||
bool is_incremental = 6;
|
||||
}
|
||||
|
||||
message SendPunchPacketBothEasySymRequest {
|
||||
uint32 udp_socket_count = 1;
|
||||
common.Ipv4Addr public_ip = 2;
|
||||
uint32 transaction_id = 3;
|
||||
|
||||
uint32 dst_port_num = 4;
|
||||
uint32 wait_time_ms = 5;
|
||||
}
|
||||
|
||||
message SendPunchPacketBothEasySymResponse {
|
||||
// is doing punch with other peer
|
||||
bool is_busy = 1;
|
||||
common.SocketAddr base_mapped_addr = 2;
|
||||
}
|
||||
|
||||
service UdpHolePunchRpc {
|
||||
rpc TryPunchHole(TryPunchHoleRequest) returns (TryPunchHoleResponse);
|
||||
rpc TryPunchSymmetric(TryPunchSymmetricRequest)
|
||||
returns (TryPunchSymmetricResponse);
|
||||
rpc SelectPunchListener(SelectPunchListenerRequest)
|
||||
returns (SelectPunchListenerResponse);
|
||||
|
||||
// send packet to one remote_addr, used by nat1-3 to nat1-3
|
||||
rpc SendPunchPacketCone(SendPunchPacketConeRequest) returns (common.Void);
|
||||
|
||||
// send packet to multiple remote_addr (birthday attack), used by nat4 to nat1-3
|
||||
rpc SendPunchPacketHardSym(SendPunchPacketHardSymRequest)
|
||||
returns (SendPunchPacketHardSymResponse);
|
||||
rpc SendPunchPacketEasySym(SendPunchPacketEasySymRequest)
|
||||
returns (common.Void);
|
||||
|
||||
// nat4 to nat4 (both predictably)
|
||||
rpc SendPunchPacketBothEasySym(SendPunchPacketBothEasySymRequest)
|
||||
returns (SendPunchPacketBothEasySymResponse);
|
||||
}
|
||||
|
||||
message DirectConnectedPeerInfo { int32 latency_ms = 1; }
|
||||
|
@@ -61,8 +61,8 @@ impl Client {
|
||||
pub fn new() -> Self {
|
||||
let (ring_a, ring_b) = create_ring_tunnel_pair();
|
||||
Self {
|
||||
mpsc: Mutex::new(MpscTunnel::new(ring_a)),
|
||||
transport: Mutex::new(MpscTunnel::new(ring_b)),
|
||||
mpsc: Mutex::new(MpscTunnel::new(ring_a, None)),
|
||||
transport: Mutex::new(MpscTunnel::new(ring_b, None)),
|
||||
inflight_requests: Arc::new(DashMap::new()),
|
||||
tasks: Arc::new(Mutex::new(JoinSet::new())),
|
||||
}
|
||||
|
@@ -56,8 +56,8 @@ impl Server {
|
||||
|
||||
Self {
|
||||
registry,
|
||||
mpsc: Mutex::new(Some(MpscTunnel::new(ring_a))),
|
||||
transport: Mutex::new(MpscTunnel::new(ring_b)),
|
||||
mpsc: Mutex::new(Some(MpscTunnel::new(ring_a, None))),
|
||||
transport: Mutex::new(MpscTunnel::new(ring_b, None)),
|
||||
tasks: Arc::new(Mutex::new(JoinSet::new())),
|
||||
packet_mergers: Arc::new(DashMap::new()),
|
||||
}
|
||||
@@ -146,7 +146,7 @@ impl Server {
|
||||
async fn handle_rpc_request(packet: RpcPacket, reg: Arc<ServiceRegistry>) -> Result<Bytes> {
|
||||
let rpc_request = RpcRequest::decode(Bytes::from(packet.body))?;
|
||||
let timeout_duration = std::time::Duration::from_millis(rpc_request.timeout_ms as u64);
|
||||
let ctrl = RpcController {};
|
||||
let ctrl = RpcController::default();
|
||||
Ok(timeout(
|
||||
timeout_duration,
|
||||
reg.call_method(
|
||||
|
@@ -13,6 +13,34 @@ pub trait Controller: Send + Sync + 'static {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BaseController {}
|
||||
pub struct BaseController {
|
||||
pub timeout_ms: i32,
|
||||
pub trace_id: i32,
|
||||
}
|
||||
|
||||
impl Controller for BaseController {}
|
||||
impl Controller for BaseController {
|
||||
fn timeout_ms(&self) -> i32 {
|
||||
self.timeout_ms
|
||||
}
|
||||
|
||||
fn set_timeout_ms(&mut self, timeout_ms: i32) {
|
||||
self.timeout_ms = timeout_ms;
|
||||
}
|
||||
|
||||
fn set_trace_id(&mut self, trace_id: i32) {
|
||||
self.trace_id = trace_id;
|
||||
}
|
||||
|
||||
fn trace_id(&self) -> i32 {
|
||||
self.trace_id
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BaseController {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
timeout_ms: 5000,
|
||||
trace_id: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -121,14 +121,14 @@ async fn rpc_basic_test() {
|
||||
|
||||
// small size req and resp
|
||||
|
||||
let ctrl = RpcController {};
|
||||
let ctrl = RpcController::default();
|
||||
let input = SayHelloRequest {
|
||||
name: "world".to_string(),
|
||||
};
|
||||
let ret = out.say_hello(ctrl, input).await;
|
||||
assert_eq!(ret.unwrap().greeting, "Hello world!");
|
||||
|
||||
let ctrl = RpcController {};
|
||||
let ctrl = RpcController::default();
|
||||
let input = SayGoodbyeRequest {
|
||||
name: "world".to_string(),
|
||||
};
|
||||
@@ -136,7 +136,7 @@ async fn rpc_basic_test() {
|
||||
assert_eq!(ret.unwrap().greeting, "Goodbye, world!");
|
||||
|
||||
// large size req and resp
|
||||
let ctrl = RpcController {};
|
||||
let ctrl = RpcController::default();
|
||||
let name = random_string(20 * 1024 * 1024);
|
||||
let input = SayGoodbyeRequest { name: name.clone() };
|
||||
let ret = out.say_goodbye(ctrl, input).await;
|
||||
@@ -160,7 +160,7 @@ async fn rpc_timeout_test() {
|
||||
.client
|
||||
.scoped_client::<GreetingClientFactory<RpcController>>(1, 1, "test".to_string());
|
||||
|
||||
let ctrl = RpcController {};
|
||||
let ctrl = RpcController::default();
|
||||
let input = SayHelloRequest {
|
||||
name: "world".to_string(),
|
||||
};
|
||||
@@ -175,6 +175,83 @@ async fn rpc_timeout_test() {
|
||||
assert_eq!(0, ctx.server.inflight_count());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn rpc_tunnel_stuck_test() {
|
||||
use crate::proto::rpc_types;
|
||||
use crate::tunnel::ring::RING_TUNNEL_CAP;
|
||||
|
||||
let rpc_server = Server::new();
|
||||
rpc_server.run();
|
||||
let server = GreetingServer::new(GreetingService {
|
||||
delay_ms: 0,
|
||||
prefix: "Hello".to_string(),
|
||||
});
|
||||
rpc_server.registry().register(server, "test");
|
||||
|
||||
let client = Client::new();
|
||||
client.run();
|
||||
|
||||
let rpc_tasks = Arc::new(Mutex::new(JoinSet::new()));
|
||||
let (mut rx, tx) = (
|
||||
rpc_server.get_transport_stream(),
|
||||
client.get_transport_sink(),
|
||||
);
|
||||
|
||||
rpc_tasks.lock().unwrap().spawn(async move {
|
||||
while let Some(Ok(packet)) = rx.next().await {
|
||||
if let Err(err) = tx.send(packet).await {
|
||||
println!("{:?}", err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// mock server is stuck (no task to do forwards)
|
||||
|
||||
let mut tasks = JoinSet::new();
|
||||
for _ in 0..RING_TUNNEL_CAP + 15 {
|
||||
let out =
|
||||
client.scoped_client::<GreetingClientFactory<RpcController>>(1, 1, "test".to_string());
|
||||
tasks.spawn(async move {
|
||||
let mut ctrl = RpcController::default();
|
||||
ctrl.timeout_ms = 1000;
|
||||
|
||||
let input = SayHelloRequest {
|
||||
name: "world".to_string(),
|
||||
};
|
||||
|
||||
out.say_hello(ctrl, input).await
|
||||
});
|
||||
}
|
||||
while let Some(ret) = tasks.join_next().await {
|
||||
assert!(matches!(ret, Ok(Err(rpc_types::error::Error::Timeout(_)))));
|
||||
}
|
||||
|
||||
// start server consumer, new requests should be processed
|
||||
let (mut rx, tx) = (
|
||||
client.get_transport_stream(),
|
||||
rpc_server.get_transport_sink(),
|
||||
);
|
||||
rpc_tasks.lock().unwrap().spawn(async move {
|
||||
while let Some(Ok(packet)) = rx.next().await {
|
||||
if let Err(err) = tx.send(packet).await {
|
||||
println!("{:?}", err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let out =
|
||||
client.scoped_client::<GreetingClientFactory<RpcController>>(1, 1, "test".to_string());
|
||||
let mut ctrl = RpcController::default();
|
||||
ctrl.timeout_ms = 1000;
|
||||
let input = SayHelloRequest {
|
||||
name: "fuck world".to_string(),
|
||||
};
|
||||
let ret = out.say_hello(ctrl, input).await.unwrap();
|
||||
assert_eq!(ret.greeting, "Hello fuck world!");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn standalone_rpc_test() {
|
||||
use crate::proto::rpc_impl::standalone::{StandAloneClient, StandAloneServer};
|
||||
@@ -199,7 +276,7 @@ async fn standalone_rpc_test() {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let ctrl = RpcController {};
|
||||
let ctrl = RpcController::default();
|
||||
let input = SayHelloRequest {
|
||||
name: "world".to_string(),
|
||||
};
|
||||
@@ -211,7 +288,7 @@ async fn standalone_rpc_test() {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let ctrl = RpcController {};
|
||||
let ctrl = RpcController::default();
|
||||
let input = SayGoodbyeRequest {
|
||||
name: "world".to_string(),
|
||||
};
|
||||
|
@@ -130,7 +130,7 @@ pub fn enable_log() {
|
||||
fn check_route(ipv4: &str, dst_peer_id: PeerId, routes: Vec<crate::proto::cli::Route>) {
|
||||
let mut found = false;
|
||||
for r in routes.iter() {
|
||||
if r.ipv4_addr == ipv4.to_string() {
|
||||
if r.ipv4_addr == Some(ipv4.parse().unwrap()) {
|
||||
found = true;
|
||||
assert_eq!(r.peer_id, dst_peer_id, "{:?}", routes);
|
||||
}
|
||||
@@ -154,7 +154,7 @@ async fn wait_proxy_route_appear(
|
||||
let r = r;
|
||||
if r.proxy_cidrs.contains(&proxy_cidr.to_owned()) {
|
||||
assert_eq!(r.peer_id, dst_peer_id);
|
||||
assert_eq!(r.ipv4_addr, ipv4);
|
||||
assert_eq!(r.ipv4_addr, Some(ipv4.parse().unwrap()));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@@ -184,13 +184,13 @@ pub async fn basic_three_node_test(#[values("tcp", "udp", "wg", "ws", "wss")] pr
|
||||
let insts = init_three_node(proto).await;
|
||||
|
||||
check_route(
|
||||
"10.144.144.2",
|
||||
"10.144.144.2/24",
|
||||
insts[1].peer_id(),
|
||||
insts[0].get_peer_manager().list_routes().await,
|
||||
);
|
||||
|
||||
check_route(
|
||||
"10.144.144.3",
|
||||
"10.144.144.3/24",
|
||||
insts[2].peer_id(),
|
||||
insts[0].get_peer_manager().list_routes().await,
|
||||
);
|
||||
@@ -357,7 +357,7 @@ pub async fn subnet_proxy_three_node_test(
|
||||
|
||||
wait_proxy_route_appear(
|
||||
&insts[0].get_peer_manager(),
|
||||
"10.144.144.3",
|
||||
"10.144.144.3/24",
|
||||
insts[2].peer_id(),
|
||||
"10.1.2.0/24",
|
||||
)
|
||||
|
@@ -94,7 +94,7 @@ pub trait Tunnel: Send {
|
||||
|
||||
#[auto_impl::auto_impl(Arc)]
|
||||
pub trait TunnelConnCounter: 'static + Send + Sync + Debug {
|
||||
fn get(&self) -> u32;
|
||||
fn get(&self) -> Option<u32>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
@@ -114,8 +114,8 @@ pub trait TunnelListener: Send {
|
||||
#[derive(Debug)]
|
||||
struct FakeTunnelConnCounter {}
|
||||
impl TunnelConnCounter for FakeTunnelConnCounter {
|
||||
fn get(&self) -> u32 {
|
||||
0
|
||||
fn get(&self) -> Option<u32> {
|
||||
None
|
||||
}
|
||||
}
|
||||
Arc::new(Box::new(FakeTunnelConnCounter {}))
|
||||
|
@@ -41,13 +41,13 @@ pub struct MpscTunnel<T> {
|
||||
}
|
||||
|
||||
impl<T: Tunnel> MpscTunnel<T> {
|
||||
pub fn new(tunnel: T) -> Self {
|
||||
pub fn new(tunnel: T, send_timeout: Option<Duration>) -> Self {
|
||||
let (tx, mut rx) = channel(32);
|
||||
let (stream, mut sink) = tunnel.split();
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
loop {
|
||||
if let Err(e) = Self::forward_one_round(&mut rx, &mut sink).await {
|
||||
if let Err(e) = Self::forward_one_round(&mut rx, &mut sink, send_timeout).await {
|
||||
tracing::error!(?e, "forward error");
|
||||
break;
|
||||
}
|
||||
@@ -68,21 +68,59 @@ impl<T: Tunnel> MpscTunnel<T> {
|
||||
async fn forward_one_round(
|
||||
rx: &mut Receiver<ZCPacket>,
|
||||
sink: &mut Pin<Box<dyn ZCPacketSink>>,
|
||||
send_timeout_ms: Option<Duration>,
|
||||
) -> Result<(), TunnelError> {
|
||||
let item = rx.recv().await.with_context(|| "recv error")?;
|
||||
sink.feed(item).await?;
|
||||
if let Some(timeout_ms) = send_timeout_ms {
|
||||
Self::forward_one_round_with_timeout(rx, sink, item, timeout_ms).await
|
||||
} else {
|
||||
Self::forward_one_round_no_timeout(rx, sink, item).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn forward_one_round_no_timeout(
|
||||
rx: &mut Receiver<ZCPacket>,
|
||||
sink: &mut Pin<Box<dyn ZCPacketSink>>,
|
||||
initial_item: ZCPacket,
|
||||
) -> Result<(), TunnelError> {
|
||||
sink.feed(initial_item).await?;
|
||||
|
||||
while let Ok(item) = rx.try_recv() {
|
||||
if let Err(e) = timeout(Duration::from_secs(5), sink.feed(item))
|
||||
.await
|
||||
.unwrap()
|
||||
{
|
||||
tracing::error!(?e, "feed error");
|
||||
break;
|
||||
match sink.feed(item).await {
|
||||
Err(e) => {
|
||||
tracing::error!(?e, "feed error");
|
||||
return Err(e);
|
||||
}
|
||||
Ok(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
sink.flush().await
|
||||
}
|
||||
|
||||
async fn forward_one_round_with_timeout(
|
||||
rx: &mut Receiver<ZCPacket>,
|
||||
sink: &mut Pin<Box<dyn ZCPacketSink>>,
|
||||
initial_item: ZCPacket,
|
||||
timeout_ms: Duration,
|
||||
) -> Result<(), TunnelError> {
|
||||
match timeout(timeout_ms, async move {
|
||||
Self::forward_one_round_no_timeout(rx, sink, initial_item).await
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(Ok(_)) => Ok(()),
|
||||
Ok(Err(e)) => {
|
||||
tracing::error!(?e, "forward error");
|
||||
Err(e)
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(?e, "forward timeout");
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_stream(&mut self) -> Pin<Box<dyn ZCPacketStream>> {
|
||||
self.stream.take().unwrap()
|
||||
}
|
||||
@@ -97,17 +135,12 @@ impl<T: Tunnel> MpscTunnel<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Tunnel> From<T> for MpscTunnel<T> {
|
||||
fn from(tunnel: T) -> Self {
|
||||
Self::new(tunnel)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use futures::StreamExt;
|
||||
|
||||
use crate::tunnel::{
|
||||
ring::{create_ring_tunnel_pair, RING_TUNNEL_CAP},
|
||||
tcp::{TcpTunnelConnector, TcpTunnelListener},
|
||||
TunnelConnector, TunnelListener,
|
||||
};
|
||||
@@ -147,7 +180,7 @@ mod tests {
|
||||
});
|
||||
|
||||
let tunnel = connector.connect().await.unwrap();
|
||||
let mpsc_tunnel = MpscTunnel::from(tunnel);
|
||||
let mpsc_tunnel = MpscTunnel::new(tunnel, None);
|
||||
|
||||
let sink1 = mpsc_tunnel.get_sink();
|
||||
let t2 = tokio::spawn(async move {
|
||||
@@ -198,4 +231,24 @@ mod tests {
|
||||
|
||||
let _ = tokio::join!(t1, t2, t3, t4);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn mpsc_slow_receiver_with_send_timeout() {
|
||||
let (a, _b) = create_ring_tunnel_pair();
|
||||
let mpsc_tunnel = MpscTunnel::new(a, Some(Duration::from_secs(1)));
|
||||
let s = mpsc_tunnel.get_sink();
|
||||
for _ in 0..RING_TUNNEL_CAP {
|
||||
s.send(ZCPacket::new_with_payload(&[0; 1024]))
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(1500)).await;
|
||||
let e = s.send(ZCPacket::new_with_payload(&[0; 1024])).await;
|
||||
assert!(e.is_ok());
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(1500)).await;
|
||||
|
||||
let e = s.send(ZCPacket::new_with_payload(&[0; 1024])).await;
|
||||
assert!(e.is_err());
|
||||
}
|
||||
}
|
||||
|
@@ -26,7 +26,7 @@ use super::{
|
||||
StreamItem, Tunnel, TunnelConnector, TunnelError, TunnelInfo, TunnelListener,
|
||||
};
|
||||
|
||||
static RING_TUNNEL_CAP: usize = 64;
|
||||
pub static RING_TUNNEL_CAP: usize = 128;
|
||||
static RING_TUNNEL_RESERVERD_CAP: usize = 4;
|
||||
|
||||
type RingLock = parking_lot::Mutex<()>;
|
||||
|
@@ -43,6 +43,10 @@ impl TunnelListener for TcpTunnelListener {
|
||||
setup_sokcet2(&socket2_socket, &addr)?;
|
||||
let socket = TcpSocket::from_std_stream(socket2_socket.into());
|
||||
|
||||
if let Err(e) = socket.set_nodelay(true) {
|
||||
tracing::warn!(?e, "set_nodelay fail in listen");
|
||||
}
|
||||
|
||||
self.addr
|
||||
.set_port(Some(socket.local_addr()?.port()))
|
||||
.unwrap();
|
||||
@@ -54,7 +58,11 @@ impl TunnelListener for TcpTunnelListener {
|
||||
async fn accept(&mut self) -> Result<Box<dyn Tunnel>, super::TunnelError> {
|
||||
let listener = self.listener.as_ref().unwrap();
|
||||
let (stream, _) = listener.accept().await?;
|
||||
stream.set_nodelay(true).unwrap();
|
||||
|
||||
if let Err(e) = stream.set_nodelay(true) {
|
||||
tracing::warn!(?e, "set_nodelay fail in accept");
|
||||
}
|
||||
|
||||
let info = TunnelInfo {
|
||||
tunnel_type: "tcp".to_owned(),
|
||||
local_addr: Some(self.local_url().into()),
|
||||
@@ -80,7 +88,9 @@ fn get_tunnel_with_tcp_stream(
|
||||
stream: TcpStream,
|
||||
remote_url: url::Url,
|
||||
) -> Result<Box<dyn Tunnel>, super::TunnelError> {
|
||||
stream.set_nodelay(true).unwrap();
|
||||
if let Err(e) = stream.set_nodelay(true) {
|
||||
tracing::warn!(?e, "set_nodelay fail in get_tunnel_with_tcp_stream");
|
||||
}
|
||||
|
||||
let info = TunnelInfo {
|
||||
tunnel_type: "tcp".to_owned(),
|
||||
|
@@ -1,4 +1,7 @@
|
||||
use std::{fmt::Debug, sync::Arc};
|
||||
use std::{
|
||||
fmt::Debug,
|
||||
sync::{Arc, Weak},
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::BytesMut;
|
||||
@@ -293,8 +296,8 @@ impl UdpTunnelListenerData {
|
||||
return;
|
||||
}
|
||||
|
||||
let ring_for_send_udp = Arc::new(RingTunnel::new(32));
|
||||
let ring_for_recv_udp = Arc::new(RingTunnel::new(32));
|
||||
let ring_for_send_udp = Arc::new(RingTunnel::new(128));
|
||||
let ring_for_recv_udp = Arc::new(RingTunnel::new(128));
|
||||
tracing::debug!(
|
||||
?ring_for_send_udp,
|
||||
?ring_for_recv_udp,
|
||||
@@ -445,25 +448,25 @@ impl TunnelListener for UdpTunnelListener {
|
||||
|
||||
fn get_conn_counter(&self) -> Arc<Box<dyn TunnelConnCounter>> {
|
||||
struct UdpTunnelConnCounter {
|
||||
sock_map: Arc<DashMap<SocketAddr, UdpConnection>>,
|
||||
sock_map: Weak<DashMap<SocketAddr, UdpConnection>>,
|
||||
}
|
||||
|
||||
impl TunnelConnCounter for UdpTunnelConnCounter {
|
||||
fn get(&self) -> Option<u32> {
|
||||
self.sock_map.upgrade().map(|x| x.len() as u32)
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for UdpTunnelConnCounter {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("UdpTunnelConnCounter")
|
||||
.field("sock_map_len", &self.sock_map.len())
|
||||
.field("sock_map_len", &self.get())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl TunnelConnCounter for UdpTunnelConnCounter {
|
||||
fn get(&self) -> u32 {
|
||||
self.sock_map.len() as u32
|
||||
}
|
||||
}
|
||||
|
||||
Arc::new(Box::new(UdpTunnelConnCounter {
|
||||
sock_map: self.data.sock_map.clone(),
|
||||
sock_map: Arc::downgrade(&self.data.sock_map.clone()),
|
||||
}))
|
||||
}
|
||||
}
|
||||
@@ -556,8 +559,8 @@ impl UdpTunnelConnector {
|
||||
dst_addr: SocketAddr,
|
||||
conn_id: u32,
|
||||
) -> Result<Box<dyn super::Tunnel>, super::TunnelError> {
|
||||
let ring_for_send_udp = Arc::new(RingTunnel::new(32));
|
||||
let ring_for_recv_udp = Arc::new(RingTunnel::new(32));
|
||||
let ring_for_send_udp = Arc::new(RingTunnel::new(128));
|
||||
let ring_for_recv_udp = Arc::new(RingTunnel::new(128));
|
||||
tracing::debug!(
|
||||
?ring_for_send_udp,
|
||||
?ring_for_recv_udp,
|
||||
@@ -942,14 +945,22 @@ mod tests {
|
||||
|
||||
listener.listen().await.unwrap();
|
||||
let c1 = listener.accept().await.unwrap();
|
||||
assert_eq!(conn_counter.get(), 1);
|
||||
assert_eq!(conn_counter.get(), Some(1));
|
||||
let c2 = listener.accept().await.unwrap();
|
||||
assert_eq!(conn_counter.get(), 2);
|
||||
assert_eq!(conn_counter.get(), Some(2));
|
||||
|
||||
drop(c2);
|
||||
wait_for_condition(|| async { conn_counter.get() == 1 }, Duration::from_secs(1)).await;
|
||||
wait_for_condition(
|
||||
|| async { conn_counter.get() == Some(1) },
|
||||
Duration::from_secs(1),
|
||||
)
|
||||
.await;
|
||||
|
||||
drop(c1);
|
||||
wait_for_condition(|| async { conn_counter.get() == 0 }, Duration::from_secs(1)).await;
|
||||
wait_for_condition(
|
||||
|| async { conn_counter.get().unwrap_or(0) == 0 },
|
||||
Duration::from_secs(1),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
@@ -81,7 +81,7 @@ impl WireGuardImpl {
|
||||
wg_peer_ip_table: WgPeerIpTable,
|
||||
) {
|
||||
let info = t.info().unwrap_or_default();
|
||||
let mut mpsc_tunnel = MpscTunnel::new(t);
|
||||
let mut mpsc_tunnel = MpscTunnel::new(t, None);
|
||||
let mut stream = mpsc_tunnel.get_stream();
|
||||
let mut ip_registered = false;
|
||||
|
||||
@@ -284,13 +284,11 @@ impl VpnPortal for WireGuard {
|
||||
.collect::<Vec<_>>();
|
||||
for ipv4 in routes
|
||||
.iter()
|
||||
.map(|x| x.ipv4_addr.clone())
|
||||
.chain(global_ctx.get_ipv4().iter().map(|x| x.to_string()))
|
||||
.filter(|x| x.ipv4_addr.is_some())
|
||||
.map(|x| x.ipv4_addr.unwrap())
|
||||
.chain(global_ctx.get_ipv4().into_iter().map(Into::into))
|
||||
{
|
||||
let Ok(ipv4) = ipv4.parse() else {
|
||||
continue;
|
||||
};
|
||||
let inet = Ipv4Inet::new(ipv4, 24).unwrap();
|
||||
let inet = Ipv4Inet::from(ipv4);
|
||||
allow_ips.push(inet.network().to_string());
|
||||
break;
|
||||
}
|
||||
|
@@ -55,6 +55,12 @@ if ! command -v unzip >/dev/null 2>&1; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# check if curl is installed
|
||||
if ! command -v curl >/dev/null 2>&1; then
|
||||
echo -e "\r\n${RED_COLOR}Error: curl is not installed${RES}\r\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "\r\n${RED_COLOR}----------------------NOTICE----------------------${RES}\r\n"
|
||||
echo " This is a temporary script to install EasyTier "
|
||||
echo " EasyTier requires a dedicated empty folder to install"
|
||||
|
Reference in New Issue
Block a user