refactor: forward only one port

This commit is contained in:
fengcaiwen
2025-04-29 21:42:58 +08:00
parent fe08448249
commit 18ef72fc20
6 changed files with 33 additions and 48 deletions

View File

@@ -45,9 +45,9 @@ spec:
ip6tables -P FORWARD ACCEPT
iptables -t nat -A POSTROUTING -s ${CIDR4} -o eth0 -j MASQUERADE
ip6tables -t nat -A POSTROUTING -s ${CIDR6} -o eth0 -j MASQUERADE
kubevpn server -l "tcp://:10800" -l "tun://:8422?net=${TunIPv4}&net6=${TunIPv6}" -l "gtcp://:10801" -l "gudp://:10802" --debug=true
kubevpn server -l "tcp://:10800" -l "tun://:8422?net=${TunIPv4}&net6=${TunIPv6}" -l "gtcp://:10801" --debug=true
{{- else }}
- kubevpn server -l "tcp://:10800" -l "gtcp://:10801" -l "gudp://:10802" --debug=true
- kubevpn server -l "tcp://:10800" -l "gtcp://:10801" --debug=true
{{- end }}
command:
- /bin/sh

View File

@@ -45,24 +45,24 @@ const (
VolumeSyncthing = "syncthing"
// innerIPv4Pool is used as tun ip
// 198.19.0.0/16 network is part of the 198.18.0.0/15 (reserved for benchmarking).
// 198.19.0.0/16 network is part of the 198.18.0.0/15 (reserved for benchmarking).
// https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
// so we split it into 2 parts: 198.18.0.0/15 --> [198.19.0.0/16, 198.19.0.0/16]
innerIPv4Pool = "198.19.0.100/16"
// 原因在docker环境中设置docker的 gateway 和 subnet不能 inner 的冲突,也不能和 docker的 172.17 冲突
// 不然的话,请求会不通的
// 解决的问题:在 k8s 中的 名叫 kubernetes 的 service ip 为
// ➜ ~ kubectl get service kubernetes
//NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
//kubernetes ClusterIP 172.17.0.1 <none> 443/TCP 190d
//
// ➜ ~ docker network inspect bridge | jq '.[0].IPAM.Config'
//[
// {
// "Subnet": "172.17.0.0/16",
// "Gateway": "172.17.0.1"
// }
//]
// 如果不创建 network那么是无法请求到 这个 kubernetes 的 service 的
/*
reasondocker use 172.17.0.0/16 network conflict with k8s service kubernetes
➜ ~ kubectl get service kubernetes
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 172.17.0.1 <none> 443/TCP 190d
➜ ~ docker network inspect bridge | jq '.[0].IPAM.Config'
[
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
*/
dockerInnerIPv4Pool = "198.18.0.100/16"
// 2001:2::/64 network is part of the 2001:2::/48 (reserved for benchmarking)
@@ -99,9 +99,6 @@ const (
// hosts entry key word
HostsKeyWord = "# Add by KubeVPN"
GHCR_IMAGE_REGISTRY = "ghcr.io"
DOCKER_IMAGE_REGISTRY = "docker.io"
)
var (

View File

@@ -38,9 +38,6 @@ type ClientDevice struct {
tunInbound chan *Packet
tunOutbound chan *Packet
errChan chan error
remote *net.UDPAddr
forward *Forwarder
}
func (d *ClientDevice) handlePacket(ctx context.Context, forward *Forwarder) {

View File

@@ -237,24 +237,15 @@ func (c *ConnectOptions) DoConnect(ctx context.Context, isLite bool, stopChan <-
plog.G(ctx).Errorf("Add extra node IP failed: %v", err)
return
}
var rawTCPForwardPort, gvisorTCPForwardPort, gvisorUDPForwardPort int
rawTCPForwardPort, err = util.GetAvailableTCPPortOrDie()
if err != nil {
return err
}
gvisorTCPForwardPort, err = util.GetAvailableTCPPortOrDie()
if err != nil {
return err
}
gvisorUDPForwardPort, err = util.GetAvailableTCPPortOrDie()
var tcpForwardPort int
tcpForwardPort, err = util.GetAvailableTCPPortOrDie()
if err != nil {
return err
}
plog.G(ctx).Info("Forwarding port...")
portPair := []string{
fmt.Sprintf("%d:10800", rawTCPForwardPort),
fmt.Sprintf("%d:10801", gvisorTCPForwardPort),
fmt.Sprintf("%d:10802", gvisorUDPForwardPort),
portPair := []string{fmt.Sprintf("%d:10800", tcpForwardPort)}
if c.Engine == config.EngineGvisor {
portPair = []string{fmt.Sprintf("%d:10801", tcpForwardPort)}
}
if err = c.portForward(c.ctx, portPair); err != nil {
return
@@ -262,10 +253,7 @@ func (c *ConnectOptions) DoConnect(ctx context.Context, isLite bool, stopChan <-
if util.IsWindows() {
driver.InstallWireGuardTunDriver()
}
forward := fmt.Sprintf("tcp://127.0.0.1:%d", rawTCPForwardPort)
if c.Engine == config.EngineGvisor {
forward = fmt.Sprintf("tcp://127.0.0.1:%d", gvisorTCPForwardPort)
}
forward := fmt.Sprintf("tcp://127.0.0.1:%d", tcpForwardPort)
if err = c.startLocalTunServer(c.ctx, forward, isLite); err != nil {
plog.G(ctx).Errorf("Start local tun service failed: %v", err)
return
@@ -319,7 +307,7 @@ func (c *ConnectOptions) portForward(ctx context.Context, portPair []string) err
podName := pod.GetName()
// try to detect pod is delete event, if pod is deleted, needs to redo port-forward
go util.CheckPodStatus(childCtx, cancelFunc, podName, c.clientset.CoreV1().Pods(c.Namespace))
go util.CheckPortStatus(childCtx, cancelFunc, readyChan, strings.Split(portPair[1], ":")[0])
go util.CheckPortStatus(childCtx, cancelFunc, readyChan, strings.Split(portPair[0], ":")[0])
go c.heartbeats(childCtx, util.GetPodIP(pod)...)
if *first {
go func() {
@@ -1230,6 +1218,9 @@ func (c *ConnectOptions) heartbeats(ctx context.Context, ips ...string) {
var dstIPv4, dstIPv6 net.IP
for _, podIP := range ips {
ip := net.ParseIP(podIP)
if ip == nil {
continue
}
if ip.To4() != nil {
dstIPv4 = ip
} else {

View File

@@ -363,7 +363,7 @@ func genDeploySpec(namespace string, udp8422 string, tcp10800 string, tcp9002 st
Args: []string{util.If(
gvisor,
`
kubevpn server -l "tcp://:10800" -l "gtcp://:10801" -l "gudp://:10802" --debug=true`,
kubevpn server -l "tcp://:10800" -l "gtcp://:10801" --debug=true`,
`
echo 1 > /proc/sys/net/ipv4/ip_forward
echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6
@@ -375,7 +375,7 @@ iptables -P FORWARD ACCEPT
ip6tables -P FORWARD ACCEPT
iptables -t nat -A POSTROUTING -s ${CIDR4} -o eth0 -j MASQUERADE
ip6tables -t nat -A POSTROUTING -s ${CIDR6} -o eth0 -j MASQUERADE
kubevpn server -l "tcp://:10800" -l "tun://:8422?net=${TunIPv4}&net6=${TunIPv6}" -l "gtcp://:10801" -l "gudp://:10802" --debug=true`,
kubevpn server -l "tcp://:10800" -l "tun://:8422?net=${TunIPv4}&net6=${TunIPv6}" -l "gtcp://:10801" --debug=true`,
)},
EnvFrom: []v1.EnvFromSource{{
SecretRef: &v1.SecretEnvSource{

View File

@@ -382,7 +382,7 @@ func CheckPodStatus(ctx context.Context, cancelFunc context.CancelFunc, podName
}
}
func CheckPortStatus(ctx context.Context, cancelFunc context.CancelFunc, readyChan chan struct{}, localGvisorTCPPort string) {
func CheckPortStatus(ctx context.Context, cancelFunc context.CancelFunc, readyChan chan struct{}, localRandomTCPPort string) {
defer cancelFunc()
ticker := time.NewTicker(time.Second * 60)
defer ticker.Stop()
@@ -398,10 +398,10 @@ func CheckPortStatus(ctx context.Context, cancelFunc context.CancelFunc, readyCh
for ctx.Err() == nil {
var lc net.ListenConfig
conn, err := lc.Listen(ctx, "tcp", net.JoinHostPort("127.0.0.1", localGvisorTCPPort))
conn, err := lc.Listen(ctx, "tcp", net.JoinHostPort("127.0.0.1", localRandomTCPPort))
if err == nil {
_ = conn.Close()
plog.G(ctx).Debugf("Local port: %s is free", localGvisorTCPPort)
plog.G(ctx).Debugf("Local port: %s is free", localRandomTCPPort)
return
}
time.Sleep(time.Second * 1)