mirror of
https://github.com/kubenetworks/kubevpn.git
synced 2025-09-26 19:31:17 +08:00
refactor: remove options netstack (#673)
* refactor: remove options netstack * refactor: remove options netstack * refactor: forward chain use gvisor tcp * refactor: docs * refactor: remove forwarder options * refactor: optimize code * refactor: remove node type "tcp://" * hotfix: packet read from tun needs to handle by gvisor * hotfix: fix charts * refactor: remove parameter engine
This commit is contained in:
@@ -188,7 +188,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
|
||||
authors ClusterIP 172.21.5.160 <none> 9080/TCP 114d app=authors
|
||||
details ClusterIP 172.21.6.183 <none> 9080/TCP 114d app=details
|
||||
kubernetes ClusterIP 172.21.0.1 <none> 443/TCP 319d <none>
|
||||
kubevpn-traffic-manager ClusterIP 172.21.2.86 <none> 8422/UDP,10800/TCP,9002/TCP,80/TCP 2m28s app=kubevpn-traffic-manager
|
||||
kubevpn-traffic-manager ClusterIP 172.21.2.86 <none> 10801/TCP,9002/TCP,80/TCP 2m28s app=kubevpn-traffic-manager
|
||||
productpage ClusterIP 172.21.10.49 <none> 9080/TCP 114d app=productpage
|
||||
ratings ClusterIP 172.21.3.247 <none> 9080/TCP 114d app=ratings
|
||||
reviews ClusterIP 172.21.8.24 <none> 9080/TCP 114d app=reviews
|
||||
|
@@ -167,7 +167,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
|
||||
authors ClusterIP 172.21.5.160 <none> 9080/TCP 114d app=authors
|
||||
details ClusterIP 172.21.6.183 <none> 9080/TCP 114d app=details
|
||||
kubernetes ClusterIP 172.21.0.1 <none> 443/TCP 319d <none>
|
||||
kubevpn-traffic-manager ClusterIP 172.21.2.86 <none> 8422/UDP,10800/TCP,9002/TCP,80/TCP 2m28s app=kubevpn-traffic-manager
|
||||
kubevpn-traffic-manager ClusterIP 172.21.2.86 <none> 10800/TCP,9002/TCP,80/TCP 2m28s app=kubevpn-traffic-manager
|
||||
productpage ClusterIP 172.21.10.49 <none> 9080/TCP 114d app=productpage
|
||||
ratings ClusterIP 172.21.3.247 <none> 9080/TCP 114d app=ratings
|
||||
reviews ClusterIP 172.21.8.24 <none> 9080/TCP 114d app=reviews
|
||||
|
@@ -26,5 +26,11 @@ helm install kubevpn kubevpn/kubevpn --set image.repository=ccr.ccs.tencentyun.c
|
||||
## AWS Fargate cluster
|
||||
|
||||
```shell
|
||||
helm install kubevpn kubevpn/kubevpn --set netstack=gvisor -n kubevpn --create-namespace
|
||||
helm install kubevpn kubevpn/kubevpn -n kubevpn --create-namespace
|
||||
```
|
||||
|
||||
*Proxy/ServiceMesh mode only support k8s service*
|
||||
|
||||
```shell
|
||||
kubevpn proxy service/authors
|
||||
```
|
@@ -32,35 +32,12 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- args:
|
||||
{{- if eq .Values.netstack "system" }}
|
||||
- |
|
||||
echo 1 > /proc/sys/net/ipv4/ip_forward
|
||||
echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6
|
||||
echo 1 > /proc/sys/net/ipv6/conf/all/forwarding
|
||||
update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
iptables -P INPUT ACCEPT
|
||||
ip6tables -P INPUT ACCEPT
|
||||
iptables -P FORWARD ACCEPT
|
||||
ip6tables -P FORWARD ACCEPT
|
||||
iptables -t nat -A POSTROUTING -s ${CIDR4} -o eth0 -j MASQUERADE
|
||||
ip6tables -t nat -A POSTROUTING -s ${CIDR6} -o eth0 -j MASQUERADE
|
||||
kubevpn server -l "tcp://:10800" -l "tun://:8422?net=${TunIPv4}&net6=${TunIPv6}" -l "gtcp://:10801" -l "gudp://:10802"
|
||||
{{- else }}
|
||||
- kubevpn server -l "tcp://:10800" -l "gtcp://:10801" -l "gudp://:10802"
|
||||
{{- end }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
env:
|
||||
- name: CIDR4
|
||||
value: 198.19.0.0/16
|
||||
- name: CIDR6
|
||||
value: 2001:2::/64
|
||||
- name: TunIPv4
|
||||
value: 198.19.0.100/16
|
||||
- name: TunIPv6
|
||||
value: 2001:2::9999/64
|
||||
- command:
|
||||
- kubevpn
|
||||
args:
|
||||
- server
|
||||
- -l gtcp://:10801
|
||||
- -l gudp://:10802
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
@@ -68,18 +45,11 @@ spec:
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
name: vpn
|
||||
ports:
|
||||
- containerPort: {{ .Values.service.port8422 }}
|
||||
name: 8422-for-udp
|
||||
protocol: UDP
|
||||
- containerPort: {{ .Values.service.port10800 }}
|
||||
name: 10800-for-tcp
|
||||
- containerPort: {{ .Values.service.port10801 }}
|
||||
name: 10801-for-tcp
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if eq .Values.netstack "system" }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
- args:
|
||||
- control-plane
|
||||
command:
|
||||
|
@@ -8,14 +8,10 @@ metadata:
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- name: 8422-for-udp
|
||||
port: {{ .Values.service.port8422 }}
|
||||
protocol: UDP
|
||||
targetPort: 8422
|
||||
- name: 10800-for-tcp
|
||||
port: {{ .Values.service.port10800 }}
|
||||
- name: 10801-for-tcp
|
||||
port: {{ .Values.service.port10801 }}
|
||||
protocol: TCP
|
||||
targetPort: 10800
|
||||
targetPort: 10801
|
||||
- name: 9002-for-envoy
|
||||
port: {{ .Values.service.port9002 }}
|
||||
protocol: TCP
|
||||
|
@@ -4,10 +4,6 @@
|
||||
|
||||
# default namespace
|
||||
namespace: kubevpn
|
||||
# default is system mode, available ["system", "gvisor"]
|
||||
# system: needs privilege permission and cap NET_ADMIN (Best experience)
|
||||
# gvisor: no needs any additional permission (Best compatibility)
|
||||
netstack: system
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
@@ -58,9 +54,8 @@ securityContext:
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port8422: 8422
|
||||
port9002: 9002
|
||||
port10800: 10800
|
||||
port10801: 10801
|
||||
port80: 80
|
||||
port53: 53
|
||||
|
||||
|
@@ -105,7 +105,6 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
|
||||
Workloads: args,
|
||||
ExtraRoute: extraRoute.ToRPC(),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
Engine: string(options.Engine),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
TargetContainer: options.TargetContainer,
|
||||
TargetImage: options.TargetImage,
|
||||
@@ -140,7 +139,7 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to target cluster cloned workloads, If not special, redirect all traffic to target cluster cloned workloads. eg: --headers foo=bar --headers env=dev")
|
||||
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &options.Engine)
|
||||
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName)
|
||||
|
||||
cmdutil.AddContainerVarFlags(cmd, &options.TargetContainer, options.TargetContainer)
|
||||
cmd.Flags().StringVar(&options.TargetImage, "target-image", "", "Clone container use this image to startup container, if not special, use origin image")
|
||||
|
@@ -26,7 +26,6 @@ import (
|
||||
)
|
||||
|
||||
func CmdConnect(f cmdutil.Factory) *cobra.Command {
|
||||
var connect = &handler.ConnectOptions{}
|
||||
var extraRoute = &handler.ExtraRouteInfo{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
var transferImage, foreground, lite bool
|
||||
@@ -91,7 +90,6 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
|
||||
KubeconfigBytes: string(bytes),
|
||||
Namespace: ns,
|
||||
ExtraRoute: extraRoute.ToRPC(),
|
||||
Engine: string(connect.Engine),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
|
||||
SshJump: sshConf.ToRPC(),
|
||||
@@ -142,7 +140,7 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &connect.Engine)
|
||||
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName)
|
||||
cmd.Flags().BoolVar(&foreground, "foreground", false, "Hang up")
|
||||
cmd.Flags().BoolVar(&lite, "lite", false, "connect to multiple cluster in lite mode. mode \"lite\": design for only connecting to multiple cluster network. mode \"full\": not only connect to cluster network, it also supports proxy workloads inbound traffic to local PC.")
|
||||
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
|
||||
|
@@ -138,7 +138,7 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
cmdutil.AddContainerVarFlags(cmd, &options.ContainerName, options.ContainerName)
|
||||
cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc("container", completion.ContainerCompletionFunc(f)))
|
||||
cmd.Flags().StringVar((*string)(&options.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
|
||||
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &options.Engine)
|
||||
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName)
|
||||
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
|
||||
|
||||
// diy docker options
|
||||
|
@@ -26,7 +26,6 @@ import (
|
||||
func CmdProxy(f cmdutil.Factory) *cobra.Command {
|
||||
var headers = make(map[string]string)
|
||||
var portmap []string
|
||||
var connect = handler.ConnectOptions{}
|
||||
var extraRoute = &handler.ExtraRouteInfo{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
var transferImage, foreground bool
|
||||
@@ -123,7 +122,6 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
|
||||
PortMap: portmap,
|
||||
Workloads: args,
|
||||
ExtraRoute: extraRoute.ToRPC(),
|
||||
Engine: string(connect.Engine),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
TransferImage: transferImage,
|
||||
Image: config.Image,
|
||||
@@ -163,7 +161,7 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
|
||||
}
|
||||
cmd.Flags().StringToStringVarP(&headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to local PC, If not special, redirect all traffic to local PC. format: <KEY>=<VALUE> eg: --headers foo=bar --headers env=dev")
|
||||
cmd.Flags().StringArrayVar(&portmap, "portmap", []string{}, "Port map, map container port to local port, format: [tcp/udp]/containerPort:localPort, If not special, localPort will use containerPort. eg: tcp/80:8080 or udp/5000:5001 or 80 or 80:8080")
|
||||
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &connect.Engine)
|
||||
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName)
|
||||
cmd.Flags().BoolVar(&foreground, "foreground", false, "foreground hang up")
|
||||
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
|
||||
|
||||
|
@@ -32,7 +32,7 @@ func CmdServer(cmdutil.Factory) *cobra.Command {
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# server listener
|
||||
kubevpn server -l "tcp://:10800" -l "tun://127.0.0.1:8422?net=198.19.0.123/32"
|
||||
kubevpn server -l "gtcp://:10801" -l "tun://?net=198.19.0.123/32"
|
||||
`)),
|
||||
PreRun: func(*cobra.Command, []string) {
|
||||
runtime.GOMAXPROCS(0)
|
||||
@@ -54,7 +54,6 @@ func CmdServer(cmdutil.Factory) *cobra.Command {
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringArrayVarP(&route.Listeners, "listener", "l", []string{}, "Startup listener server. eg: tcp://localhost:1080")
|
||||
cmd.Flags().StringVarP(&route.Forwarder, "forwarder", "f", "", "Special forwarder. eg: tcp://192.168.1.100:2345")
|
||||
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug log or not")
|
||||
return cmd
|
||||
}
|
||||
|
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 372 KiB After Width: | Height: | Size: 372 KiB |
@@ -41,11 +41,14 @@ const (
|
||||
|
||||
VolumeSyncthing = "syncthing"
|
||||
|
||||
// innerIPv4Pool is used as tun ip
|
||||
// IPv4Pool is used as tun ip
|
||||
// 198.19.0.0/16 network is part of the 198.18.0.0/15 (reserved for benchmarking).
|
||||
// https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
|
||||
// so we split it into 2 parts: 198.18.0.0/15 --> [198.19.0.0/16, 198.19.0.0/16]
|
||||
innerIPv4Pool = "198.19.0.100/16"
|
||||
IPv4Pool = "198.19.0.0/16"
|
||||
// 2001:2::/64 network is part of the 2001:2::/48 (reserved for benchmarking)
|
||||
// https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
|
||||
IPv6Pool = "2001:2::/64"
|
||||
/*
|
||||
reason:docker use 172.17.0.0/16 network conflict with k8s service kubernetes
|
||||
➜ ~ kubectl get service kubernetes
|
||||
@@ -60,11 +63,7 @@ const (
|
||||
}
|
||||
]
|
||||
*/
|
||||
dockerInnerIPv4Pool = "198.18.0.100/16"
|
||||
|
||||
// 2001:2::/64 network is part of the 2001:2::/48 (reserved for benchmarking)
|
||||
// https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
|
||||
innerIPv6Pool = "2001:2::9999/64"
|
||||
DockerIPv4Pool = "198.18.0.1/16"
|
||||
|
||||
DefaultNetDir = "/etc/cni/net.d"
|
||||
|
||||
@@ -123,15 +122,15 @@ var (
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
RouterIP, CIDR, err = net.ParseCIDR(innerIPv4Pool)
|
||||
RouterIP, CIDR, err = net.ParseCIDR(IPv4Pool)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
RouterIP6, CIDR6, err = net.ParseCIDR(innerIPv6Pool)
|
||||
RouterIP6, CIDR6, err = net.ParseCIDR(IPv6Pool)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
DockerRouterIP, DockerCIDR, err = net.ParseCIDR(dockerInnerIPv4Pool)
|
||||
DockerRouterIP, DockerCIDR, err = net.ParseCIDR(DockerIPv4Pool)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -216,11 +215,4 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
type Engine string
|
||||
|
||||
const (
|
||||
EngineGvisor Engine = "gvisor"
|
||||
EngineSystem Engine = "system"
|
||||
)
|
||||
|
||||
const Slogan = "Now you can access resources in the kubernetes cluster !"
|
||||
|
@@ -103,7 +103,7 @@ func (h *gvisorTCPHandler) readFromTCPConnWriteToEndpoint(ctx context.Context, c
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TCP-GVISOR] Failed to write to %s <- %s : %s", c.(net.Conn).RemoteAddr(), c.(net.Conn).LocalAddr(), err)
|
||||
}
|
||||
} else {
|
||||
} else if buf[0] == 1 {
|
||||
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
|
||||
ReserveHeaderBytes: 0,
|
||||
Payload: buffer.MakeWithData(buf[1:read]),
|
||||
@@ -113,6 +113,16 @@ func (h *gvisorTCPHandler) readFromTCPConnWriteToEndpoint(ctx context.Context, c
|
||||
endpoint.InjectInbound(protocol, pkt)
|
||||
pkt.DecRef()
|
||||
plog.G(ctx).Debugf("[TCP-GVISOR] Write to Gvisor. SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(ipProtocol).String(), read)
|
||||
} else {
|
||||
util.SafeWrite(TCPPacketChan, &Packet{
|
||||
data: buf[:],
|
||||
length: read,
|
||||
src: src,
|
||||
dst: dst,
|
||||
}, func(v *Packet) {
|
||||
config.LPool.Put(buf[:])
|
||||
plog.G(ctx).Debugf("[TCP-TUN] Drop packet. SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(ipProtocol).String(), read)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -8,7 +8,6 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/tun"
|
||||
@@ -22,17 +21,16 @@ var (
|
||||
)
|
||||
|
||||
// Route example:
|
||||
// -l "tcp://:10800" -l "tun://:8422?net=198.19.0.100/16"
|
||||
// -l "tun:/10.233.24.133:8422?net=198.19.0.102/16&route=198.19.0.0/16"
|
||||
// -l "tun:/127.0.0.1:8422?net=198.19.0.102/16&route=198.19.0.0/16,10.233.0.0/16" -f "tcp://127.0.0.1:10800"
|
||||
// -l "gtcp://:10801" -l "tun://?net=198.19.0.100/16"
|
||||
// -l "tun:/tcp://10.233.24.133:8422?net=198.19.0.102/16&route=198.19.0.0/16"
|
||||
// -l "tun:/tcp://127.0.0.1:10800?net=198.19.0.102/16&route=198.19.0.0/16,10.233.0.0/16"
|
||||
type Route struct {
|
||||
Listeners []string // -l tun
|
||||
Forwarder string // -f tcp
|
||||
Retries int
|
||||
}
|
||||
|
||||
func (r *Route) ParseForwarder() (*Forwarder, error) {
|
||||
forwarder, err := ParseNode(r.Forwarder)
|
||||
func ParseForwarder(remote string) (*Forwarder, error) {
|
||||
forwarder, err := ParseNode(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -40,20 +38,13 @@ func (r *Route) ParseForwarder() (*Forwarder, error) {
|
||||
Connector: NewUDPOverTCPConnector(),
|
||||
Transporter: TCPTransporter(nil),
|
||||
}
|
||||
return NewForwarder(r.Retries, forwarder), nil
|
||||
return NewForwarder(5, forwarder), nil
|
||||
}
|
||||
|
||||
func (r *Route) GenerateServers() ([]Server, error) {
|
||||
forwarder, err := r.ParseForwarder()
|
||||
if err != nil && !errors.Is(err, ErrorInvalidNode) {
|
||||
plog.G(context.Background()).Errorf("Failed to parse forwarder: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
servers := make([]Server, 0, len(r.Listeners))
|
||||
for _, l := range r.Listeners {
|
||||
var node *Node
|
||||
node, err = ParseNode(l)
|
||||
node, err := ParseNode(l)
|
||||
if err != nil {
|
||||
plog.G(context.Background()).Errorf("Failed to parse node %s: %v", l, err)
|
||||
return nil, err
|
||||
@@ -64,7 +55,14 @@ func (r *Route) GenerateServers() ([]Server, error) {
|
||||
|
||||
switch node.Protocol {
|
||||
case "tun":
|
||||
handler = TunHandler(forwarder, node)
|
||||
var forwarder *Forwarder
|
||||
if node.Remote != "" {
|
||||
forwarder, err = ParseForwarder(node.Remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
handler = TunHandler(node, forwarder)
|
||||
listener, err = tun.Listener(tun.Config{
|
||||
Name: node.Get("name"),
|
||||
Addr: node.Get("net"),
|
||||
@@ -77,13 +75,6 @@ func (r *Route) GenerateServers() ([]Server, error) {
|
||||
plog.G(context.Background()).Errorf("Failed to create tun listener: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
case "tcp":
|
||||
handler = TCPHandler()
|
||||
listener, err = TCPListener(node.Addr)
|
||||
if err != nil {
|
||||
plog.G(context.Background()).Errorf("Failed to create tcp listener: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
case "gtcp":
|
||||
handler = GvisorTCPHandler()
|
||||
listener, err = GvisorTCPListener(node.Addr)
|
||||
|
@@ -42,27 +42,6 @@ func (tr *tcpTransporter) Dial(ctx context.Context, addr string) (net.Conn, erro
|
||||
return tls.Client(conn, tr.tlsConfig), nil
|
||||
}
|
||||
|
||||
func TCPListener(addr string) (net.Listener, error) {
|
||||
laddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener, err := net.ListenTCP("tcp", laddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serverConfig, err := util.GetTlsServerConfig(nil)
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrNoTLSConfig) {
|
||||
plog.G(context.Background()).Warn("tls config not found in config, use raw tcp mode")
|
||||
return &tcpKeepAliveListener{TCPListener: listener}, nil
|
||||
}
|
||||
plog.G(context.Background()).Errorf("failed to get tls server config: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return tls.NewListener(&tcpKeepAliveListener{TCPListener: listener}, serverConfig), nil
|
||||
}
|
||||
|
||||
type tcpKeepAliveListener struct {
|
||||
*net.TCPListener
|
||||
}
|
||||
|
@@ -3,13 +3,8 @@ package core
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/google/gopacket/layers"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type UDPOverTCPConnector struct {
|
||||
@@ -38,89 +33,3 @@ func (c *UDPOverTCPConnector) ConnectContext(ctx context.Context, conn net.Conn)
|
||||
}
|
||||
return NewUDPConnOverTCP(ctx, conn)
|
||||
}
|
||||
|
||||
type UDPOverTCPHandler struct {
|
||||
// map[srcIP]net.Conn
|
||||
routeMapTCP *sync.Map
|
||||
packetChan chan *Packet
|
||||
}
|
||||
|
||||
func TCPHandler() Handler {
|
||||
return &UDPOverTCPHandler{
|
||||
routeMapTCP: RouteMapTCP,
|
||||
packetChan: TCPPacketChan,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *UDPOverTCPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
|
||||
tcpConn = NewBufferedTCP(tcpConn)
|
||||
defer tcpConn.Close()
|
||||
plog.G(ctx).Infof("[TCP] Handle connection %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
|
||||
defer h.removeFromRouteMapTCP(ctx, tcpConn)
|
||||
|
||||
for ctx.Err() == nil {
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
datagram, err := readDatagramPacket(tcpConn, buf)
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TCP] Failed to read from %s -> %s: %v", tcpConn.RemoteAddr(), tcpConn.LocalAddr(), err)
|
||||
config.LPool.Put(buf[:])
|
||||
return
|
||||
}
|
||||
|
||||
err = h.handlePacket(ctx, tcpConn, datagram)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *UDPOverTCPHandler) handlePacket(ctx context.Context, tcpConn net.Conn, datagram *DatagramPacket) error {
|
||||
src, dst, protocol, err := util.ParseIP(datagram.Data[1:datagram.DataLength])
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TCP] Unknown packet")
|
||||
config.LPool.Put(datagram.Data[:])
|
||||
return err
|
||||
}
|
||||
|
||||
h.addToRouteMapTCP(ctx, src, tcpConn)
|
||||
|
||||
if conn, ok := h.routeMapTCP.Load(dst.String()); ok {
|
||||
plog.G(ctx).Debugf("[TCP] Find TCP route SRC: %s to DST: %s -> %s", src, dst, conn.(net.Conn).RemoteAddr())
|
||||
err = datagram.Write(conn.(net.Conn))
|
||||
config.LPool.Put(datagram.Data[:])
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TCP] Failed to write to %s <- %s : %s", conn.(net.Conn).RemoteAddr(), conn.(net.Conn).LocalAddr(), err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
plog.G(ctx).Debugf("[TCP] Forward to TUN device, SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(protocol).String(), datagram.DataLength)
|
||||
util.SafeWrite(h.packetChan, NewPacket(datagram.Data, int(datagram.DataLength), src, dst), func(v *Packet) {
|
||||
plog.G(context.Background()).Errorf("Stuck packet, SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(protocol).String(), v.length)
|
||||
h.packetChan <- v
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *UDPOverTCPHandler) addToRouteMapTCP(ctx context.Context, src net.IP, tcpConn net.Conn) {
|
||||
value, loaded := h.routeMapTCP.LoadOrStore(src.String(), tcpConn)
|
||||
if loaded {
|
||||
if value.(net.Conn) != tcpConn {
|
||||
h.routeMapTCP.Store(src.String(), tcpConn)
|
||||
plog.G(ctx).Infof("[TCP] Replace route map TCP to DST %s by connation %s -> %s", src, tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
}
|
||||
} else {
|
||||
plog.G(ctx).Infof("[TCP] Add new route map TCP to DST %s by connation %s -> %s", src, tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
}
|
||||
}
|
||||
|
||||
func (h *UDPOverTCPHandler) removeFromRouteMapTCP(ctx context.Context, tcpConn net.Conn) {
|
||||
h.routeMapTCP.Range(func(key, value any) bool {
|
||||
if value.(net.Conn) == tcpConn {
|
||||
h.routeMapTCP.Delete(key)
|
||||
plog.G(ctx).Infof("[TCP] Delete to DST: %s by conn %s -> %s from globle route map TCP", key, tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
@@ -24,18 +24,18 @@ type tunHandler struct {
|
||||
}
|
||||
|
||||
// TunHandler creates a handler for tun tunnel.
|
||||
func TunHandler(forward *Forwarder, node *Node) Handler {
|
||||
func TunHandler(node *Node, forward *Forwarder) Handler {
|
||||
return &tunHandler{
|
||||
forward: forward,
|
||||
node: node,
|
||||
forward: forward,
|
||||
routeMapTCP: RouteMapTCP,
|
||||
errChan: make(chan error, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *tunHandler) Handle(ctx context.Context, tun net.Conn) {
|
||||
if remote := h.node.Remote; remote != "" {
|
||||
h.HandleClient(ctx, tun)
|
||||
if !h.forward.IsEmpty() {
|
||||
h.HandleClient(ctx, tun, h.forward)
|
||||
} else {
|
||||
h.HandleServer(ctx, tun)
|
||||
}
|
||||
@@ -194,7 +194,7 @@ func (p *Peer) routeTun(ctx context.Context) {
|
||||
if conn, ok := p.routeMapTCP.Load(packet.dst.String()); ok {
|
||||
plog.G(ctx).Debugf("[TUN] Find TCP route to dst: %s -> %s", packet.dst.String(), conn.(net.Conn).RemoteAddr())
|
||||
copy(packet.data[1:packet.length+1], packet.data[:packet.length])
|
||||
packet.data[0] = 0
|
||||
packet.data[0] = 1
|
||||
dgram := newDatagramPacket(packet.data, packet.length+1)
|
||||
err := dgram.Write(conn.(net.Conn))
|
||||
config.LPool.Put(packet.data[:])
|
||||
|
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func (h *tunHandler) HandleClient(ctx context.Context, tun net.Conn) {
|
||||
func (h *tunHandler) HandleClient(ctx context.Context, tun net.Conn, forwarder *Forwarder) {
|
||||
device := &ClientDevice{
|
||||
tun: tun,
|
||||
tunInbound: make(chan *Packet, MaxSize),
|
||||
@@ -23,7 +23,7 @@ func (h *tunHandler) HandleClient(ctx context.Context, tun net.Conn) {
|
||||
}
|
||||
|
||||
defer device.Close()
|
||||
go device.handlePacket(ctx, h.forward)
|
||||
go device.handlePacket(ctx, forwarder)
|
||||
go device.readFromTun(ctx)
|
||||
go device.writeToTun(ctx)
|
||||
go device.heartbeats(ctx)
|
||||
|
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
@@ -27,7 +26,6 @@ func (svr *Server) Clone(resp rpc.Daemon_CloneServer) (err error) {
|
||||
KubeconfigBytes: req.KubeconfigBytes,
|
||||
Namespace: req.Namespace,
|
||||
ExtraRoute: req.ExtraRoute,
|
||||
Engine: req.Engine,
|
||||
SshJump: req.SshJump,
|
||||
TransferImage: req.TransferImage,
|
||||
Image: req.Image,
|
||||
@@ -82,7 +80,6 @@ func (svr *Server) Clone(resp rpc.Daemon_CloneServer) (err error) {
|
||||
Headers: req.Headers,
|
||||
Workloads: req.Workloads,
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
|
||||
TargetContainer: req.TargetContainer,
|
||||
|
@@ -10,7 +10,6 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
@@ -41,7 +40,6 @@ func (svr *Server) ConnectFork(resp rpc.Daemon_ConnectForkServer) (err error) {
|
||||
connect := &handler.ConnectOptions{
|
||||
Namespace: req.ManagerNamespace,
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
OriginNamespace: req.Namespace,
|
||||
Lock: &svr.Lock,
|
||||
@@ -108,7 +106,6 @@ func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp
|
||||
Namespace: req.Namespace,
|
||||
OriginNamespace: req.Namespace,
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
Request: proto.Clone(req).(*rpc.ConnectRequest),
|
||||
}
|
||||
|
@@ -12,7 +12,6 @@ import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
@@ -55,7 +54,6 @@ func (svr *Server) Connect(resp rpc.Daemon_ConnectServer) (err error) {
|
||||
svr.connect = &handler.ConnectOptions{
|
||||
Namespace: req.ManagerNamespace,
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
OriginNamespace: req.Namespace,
|
||||
Lock: &svr.Lock,
|
||||
@@ -118,7 +116,6 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon
|
||||
Namespace: req.Namespace,
|
||||
OriginNamespace: req.Namespace,
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
Request: proto.Clone(req).(*rpc.ConnectRequest),
|
||||
}
|
||||
|
@@ -11,10 +11,8 @@ import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/cli-runtime/pkg/resource"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
@@ -54,7 +52,6 @@ func (svr *Server) Proxy(resp rpc.Daemon_ProxyServer) (err error) {
|
||||
}
|
||||
connect := &handler.ConnectOptions{
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
Image: req.Image,
|
||||
ImagePullSecretName: req.ImagePullSecretName,
|
||||
@@ -65,19 +62,10 @@ func (svr *Server) Proxy(resp rpc.Daemon_ProxyServer) (err error) {
|
||||
return err
|
||||
}
|
||||
var workloads []string
|
||||
var objectList []*resource.Info
|
||||
workloads, objectList, err = util.NormalizedResource(connect.GetFactory(), req.Namespace, req.Workloads)
|
||||
workloads, _, err = util.NormalizedResource(connect.GetFactory(), req.Namespace, req.Workloads)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// netstack gvisor only support k8s service
|
||||
if config.Engine(req.Engine) == config.EngineGvisor {
|
||||
for _, info := range objectList {
|
||||
if !util.IsK8sService(info) {
|
||||
return errors.Errorf("netstack gvisor mode only support k8s services, but got %s", info.Mapping.Resource.Resource)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil && svr.connect != nil {
|
||||
@@ -194,7 +182,6 @@ func convert(req *rpc.ProxyRequest) *rpc.ConnectRequest {
|
||||
return &rpc.ConnectRequest{
|
||||
KubeconfigBytes: req.KubeconfigBytes,
|
||||
Namespace: req.Namespace,
|
||||
Engine: req.Engine,
|
||||
ExtraRoute: req.ExtraRoute,
|
||||
SshJump: req.SshJump,
|
||||
TransferImage: req.TransferImage,
|
||||
|
@@ -46,8 +46,8 @@ func (svr *Server) SshStart(ctx context.Context, req *rpc.SshStartRequest) (resp
|
||||
|
||||
r := core.Route{
|
||||
Listeners: []string{
|
||||
"tun://127.0.0.1:8422?net=" + DefaultServerIP,
|
||||
"tcp://:10800",
|
||||
"tun://?net=" + DefaultServerIP,
|
||||
"gtcp://:10801",
|
||||
},
|
||||
Retries: 5,
|
||||
}
|
||||
|
@@ -93,7 +93,7 @@ func (w *wsHandler) createTwoWayTUNTunnel(ctx context.Context, cli *ssh.Client)
|
||||
return err
|
||||
}
|
||||
|
||||
remotePort := 10800
|
||||
remotePort := 10801
|
||||
var localPort int
|
||||
localPort, err = util.GetAvailableTCPPortOrDie()
|
||||
if err != nil {
|
||||
@@ -131,10 +131,9 @@ func (w *wsHandler) createTwoWayTUNTunnel(ctx context.Context, cli *ssh.Client)
|
||||
w.cidr = append(w.cidr, string(serverIP))
|
||||
r := core.Route{
|
||||
Listeners: []string{
|
||||
fmt.Sprintf("tun:/127.0.0.1:8422?net=%s&route=%s", clientIP, strings.Join(w.cidr, ",")),
|
||||
fmt.Sprintf("tun:/%s?net=%s&route=%s", fmt.Sprintf("tcp://127.0.0.1:%d", localPort), clientIP, strings.Join(w.cidr, ",")),
|
||||
},
|
||||
Forwarder: fmt.Sprintf("tcp://127.0.0.1:%d", localPort),
|
||||
Retries: 5,
|
||||
Retries: 5,
|
||||
}
|
||||
servers, err := handler.Parse(r)
|
||||
if err != nil {
|
||||
|
@@ -25,7 +25,6 @@ type ConnectRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
KubeconfigBytes string `protobuf:"bytes,1,opt,name=KubeconfigBytes,proto3" json:"KubeconfigBytes,omitempty"`
|
||||
Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"`
|
||||
Engine string `protobuf:"bytes,3,opt,name=Engine,proto3" json:"Engine,omitempty"`
|
||||
// extra route table info
|
||||
ExtraRoute *ExtraRoute `protobuf:"bytes,4,opt,name=ExtraRoute,proto3" json:"ExtraRoute,omitempty"`
|
||||
// ssh jump
|
||||
@@ -98,13 +97,6 @@ func (x *ConnectRequest) GetNamespace() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ConnectRequest) GetEngine() string {
|
||||
if x != nil {
|
||||
return x.Engine
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ConnectRequest) GetExtraRoute() *ExtraRoute {
|
||||
if x != nil {
|
||||
return x.ExtraRoute
|
||||
@@ -365,7 +357,6 @@ type ProxyRequest struct {
|
||||
Headers map[string]string `protobuf:"bytes,3,rep,name=Headers,proto3" json:"Headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
PortMap []string `protobuf:"bytes,4,rep,name=PortMap,proto3" json:"PortMap,omitempty"`
|
||||
Workloads []string `protobuf:"bytes,5,rep,name=Workloads,proto3" json:"Workloads,omitempty"`
|
||||
Engine string `protobuf:"bytes,7,opt,name=Engine,proto3" json:"Engine,omitempty"`
|
||||
// extra route table info
|
||||
ExtraRoute *ExtraRoute `protobuf:"bytes,8,opt,name=ExtraRoute,proto3" json:"ExtraRoute,omitempty"`
|
||||
// ssh jump
|
||||
@@ -455,13 +446,6 @@ func (x *ProxyRequest) GetWorkloads() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ProxyRequest) GetEngine() string {
|
||||
if x != nil {
|
||||
return x.Engine
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ProxyRequest) GetExtraRoute() *ExtraRoute {
|
||||
if x != nil {
|
||||
return x.ExtraRoute
|
||||
@@ -671,7 +655,6 @@ type CloneRequest struct {
|
||||
Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"`
|
||||
Headers map[string]string `protobuf:"bytes,3,rep,name=Headers,proto3" json:"Headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
Workloads []string `protobuf:"bytes,4,rep,name=Workloads,proto3" json:"Workloads,omitempty"`
|
||||
Engine string `protobuf:"bytes,6,opt,name=Engine,proto3" json:"Engine,omitempty"`
|
||||
// extra route table info
|
||||
ExtraRoute *ExtraRoute `protobuf:"bytes,7,opt,name=ExtraRoute,proto3" json:"ExtraRoute,omitempty"`
|
||||
// ssh jump
|
||||
@@ -750,13 +733,6 @@ func (x *CloneRequest) GetWorkloads() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *CloneRequest) GetEngine() string {
|
||||
if x != nil {
|
||||
return x.Engine
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CloneRequest) GetExtraRoute() *ExtraRoute {
|
||||
if x != nil {
|
||||
return x.ExtraRoute
|
||||
@@ -2909,11 +2885,10 @@ var File_daemon_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_daemon_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\fdaemon.proto\x12\x03rpc\"\xf5\x03\n" +
|
||||
"\fdaemon.proto\x12\x03rpc\"\xdd\x03\n" +
|
||||
"\x0eConnectRequest\x12(\n" +
|
||||
"\x0fKubeconfigBytes\x18\x01 \x01(\tR\x0fKubeconfigBytes\x12\x1c\n" +
|
||||
"\tNamespace\x18\x02 \x01(\tR\tNamespace\x12\x16\n" +
|
||||
"\x06Engine\x18\x03 \x01(\tR\x06Engine\x12/\n" +
|
||||
"\tNamespace\x18\x02 \x01(\tR\tNamespace\x12/\n" +
|
||||
"\n" +
|
||||
"ExtraRoute\x18\x04 \x01(\v2\x0f.rpc.ExtraRouteR\n" +
|
||||
"ExtraRoute\x12&\n" +
|
||||
@@ -2947,14 +2922,13 @@ const file_daemon_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"_Namespace\".\n" +
|
||||
"\x12DisconnectResponse\x12\x18\n" +
|
||||
"\aMessage\x18\x01 \x01(\tR\aMessage\"\xf9\x04\n" +
|
||||
"\aMessage\x18\x01 \x01(\tR\aMessage\"\xe1\x04\n" +
|
||||
"\fProxyRequest\x12(\n" +
|
||||
"\x0fKubeconfigBytes\x18\x01 \x01(\tR\x0fKubeconfigBytes\x12\x1c\n" +
|
||||
"\tNamespace\x18\x02 \x01(\tR\tNamespace\x128\n" +
|
||||
"\aHeaders\x18\x03 \x03(\v2\x1e.rpc.ProxyRequest.HeadersEntryR\aHeaders\x12\x18\n" +
|
||||
"\aPortMap\x18\x04 \x03(\tR\aPortMap\x12\x1c\n" +
|
||||
"\tWorkloads\x18\x05 \x03(\tR\tWorkloads\x12\x16\n" +
|
||||
"\x06Engine\x18\a \x01(\tR\x06Engine\x12/\n" +
|
||||
"\tWorkloads\x18\x05 \x03(\tR\tWorkloads\x12/\n" +
|
||||
"\n" +
|
||||
"ExtraRoute\x18\b \x01(\v2\x0f.rpc.ExtraRouteR\n" +
|
||||
"ExtraRoute\x12&\n" +
|
||||
@@ -2978,13 +2952,12 @@ const file_daemon_proto_rawDesc = "" +
|
||||
"\tNamespace\x18\x01 \x01(\tR\tNamespace\x12\x1c\n" +
|
||||
"\tWorkloads\x18\x02 \x03(\tR\tWorkloads\")\n" +
|
||||
"\rLeaveResponse\x12\x18\n" +
|
||||
"\aMessage\x18\x01 \x01(\tR\aMessage\"\x99\x05\n" +
|
||||
"\aMessage\x18\x01 \x01(\tR\aMessage\"\x81\x05\n" +
|
||||
"\fCloneRequest\x12(\n" +
|
||||
"\x0fKubeconfigBytes\x18\x01 \x01(\tR\x0fKubeconfigBytes\x12\x1c\n" +
|
||||
"\tNamespace\x18\x02 \x01(\tR\tNamespace\x128\n" +
|
||||
"\aHeaders\x18\x03 \x03(\v2\x1e.rpc.CloneRequest.HeadersEntryR\aHeaders\x12\x1c\n" +
|
||||
"\tWorkloads\x18\x04 \x03(\tR\tWorkloads\x12\x16\n" +
|
||||
"\x06Engine\x18\x06 \x01(\tR\x06Engine\x12/\n" +
|
||||
"\tWorkloads\x18\x04 \x03(\tR\tWorkloads\x12/\n" +
|
||||
"\n" +
|
||||
"ExtraRoute\x18\a \x01(\v2\x0f.rpc.ExtraRouteR\n" +
|
||||
"ExtraRoute\x12&\n" +
|
||||
|
@@ -34,7 +34,6 @@ service Daemon {
|
||||
message ConnectRequest {
|
||||
string KubeconfigBytes = 1;
|
||||
string Namespace = 2;
|
||||
string Engine = 3;
|
||||
|
||||
// extra route table info
|
||||
ExtraRoute ExtraRoute = 4;
|
||||
@@ -97,7 +96,6 @@ message ProxyRequest {
|
||||
map<string, string> Headers = 3;
|
||||
repeated string PortMap = 4;
|
||||
repeated string Workloads = 5;
|
||||
string Engine = 7;
|
||||
|
||||
// extra route table info
|
||||
ExtraRoute ExtraRoute = 8;
|
||||
@@ -145,7 +143,6 @@ message CloneRequest {
|
||||
string Namespace = 2;
|
||||
map<string, string> Headers = 3;
|
||||
repeated string Workloads = 4;
|
||||
string Engine = 6;
|
||||
|
||||
// extra route table info
|
||||
ExtraRoute ExtraRoute = 7;
|
||||
|
@@ -45,7 +45,6 @@ type Options struct {
|
||||
NoProxy bool
|
||||
ExtraRouteInfo handler.ExtraRouteInfo
|
||||
ConnectMode ConnectMode
|
||||
Engine config.Engine
|
||||
|
||||
// docker options
|
||||
DevImage string
|
||||
@@ -112,7 +111,6 @@ func (option *Options) Connect(ctx context.Context, sshConfig *pkgssh.SshConfig,
|
||||
Headers: option.Headers,
|
||||
Workloads: util.If(option.NoProxy, nil, []string{option.Workload}),
|
||||
ExtraRoute: option.ExtraRouteInfo.ToRPC(),
|
||||
Engine: string(option.Engine),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(option.factory),
|
||||
Image: config.Image,
|
||||
ImagePullSecretName: imagePullSecretName,
|
||||
@@ -260,7 +258,6 @@ func (option *Options) CreateConnectContainer(ctx context.Context, portBindings
|
||||
"-n", option.Namespace,
|
||||
"--kubeconfig", "/root/.kube/config",
|
||||
"--image", config.Image,
|
||||
"--netstack", string(option.Engine),
|
||||
}
|
||||
} else {
|
||||
entrypoint = []string{
|
||||
@@ -271,7 +268,6 @@ func (option *Options) CreateConnectContainer(ctx context.Context, portBindings
|
||||
"-n", option.Namespace,
|
||||
"--kubeconfig", "/root/.kube/config",
|
||||
"--image", config.Image,
|
||||
"--netstack", string(option.Engine),
|
||||
"--manager-namespace", managerNamespace,
|
||||
}
|
||||
for k, v := range option.Headers {
|
||||
|
@@ -32,8 +32,8 @@ func NewDHCPManager(clientset *kubernetes.Clientset, namespace string) *Manager
|
||||
return &Manager{
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
cidr: &net.IPNet{IP: config.RouterIP, Mask: config.CIDR.Mask},
|
||||
cidr6: &net.IPNet{IP: config.RouterIP6, Mask: config.CIDR6.Mask},
|
||||
cidr: config.CIDR,
|
||||
cidr6: config.CIDR6,
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -43,7 +43,6 @@ type CloneOptions struct {
|
||||
Headers map[string]string
|
||||
Workloads []string
|
||||
ExtraRouteInfo ExtraRouteInfo
|
||||
Engine config.Engine
|
||||
|
||||
TargetContainer string
|
||||
TargetImage string
|
||||
@@ -231,7 +230,7 @@ func (d *CloneOptions) DoClone(ctx context.Context, kubeconfigJsonBytes []byte,
|
||||
Value: "1",
|
||||
},
|
||||
}...)*/
|
||||
container := genVPNContainer(workload, d.Engine, d.Namespace, image, args)
|
||||
container := genVPNContainer(workload, d.Namespace, image, args)
|
||||
containerSync := genSyncthingContainer(d.RemoteDir, syncDataDirName, image)
|
||||
spec.Spec.Containers = append(containers, *container, *containerSync)
|
||||
//set spec
|
||||
@@ -315,7 +314,7 @@ func genSyncthingContainer(remoteDir string, syncDataDirName string, image strin
|
||||
return containerSync
|
||||
}
|
||||
|
||||
func genVPNContainer(workload string, engine config.Engine, namespace string, image string, args []string) *v1.Container {
|
||||
func genVPNContainer(workload string, namespace string, image string, args []string) *v1.Container {
|
||||
container := &v1.Container{
|
||||
Name: config.ContainerSidecarVPN,
|
||||
Image: image,
|
||||
@@ -327,7 +326,6 @@ func genVPNContainer(workload string, engine config.Engine, namespace string, im
|
||||
"--kubeconfig", "/tmp/.kube/" + config.KUBECONFIG,
|
||||
"--namespace", namespace,
|
||||
"--image", image,
|
||||
"--netstack", string(engine),
|
||||
"--foreground",
|
||||
}, args...),
|
||||
Env: []v1.EnvVar{},
|
||||
|
@@ -23,7 +23,6 @@ import (
|
||||
admissionv1 "k8s.io/api/admissionregistration/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apinetworkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@@ -48,7 +47,6 @@ import (
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
"k8s.io/utils/pointer"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/core"
|
||||
@@ -65,7 +63,6 @@ import (
|
||||
type ConnectOptions struct {
|
||||
Namespace string
|
||||
ExtraRouteInfo ExtraRouteInfo
|
||||
Engine config.Engine
|
||||
Foreground bool
|
||||
OriginKubeconfigPath string
|
||||
OriginNamespace string
|
||||
@@ -195,7 +192,7 @@ func (c *ConnectOptions) CreateRemoteInboundPod(ctx context.Context, namespace s
|
||||
// todo consider to use ephemeral container
|
||||
// https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/
|
||||
// means mesh mode
|
||||
if c.Engine == config.EngineGvisor {
|
||||
if util.IsK8sService(object) {
|
||||
err = inject.InjectEnvoyAndSSH(ctx, nodeID, c.factory, c.Namespace, object, controller, headers, portMap, image)
|
||||
} else if len(headers) != 0 || len(portMap) != 0 {
|
||||
err = inject.InjectServiceMesh(ctx, nodeID, c.factory, c.Namespace, controller, configInfo, headers, portMap, tlsSecret, image)
|
||||
@@ -211,7 +208,7 @@ func (c *ConnectOptions) CreateRemoteInboundPod(ctx context.Context, namespace s
|
||||
portMap: portMap,
|
||||
workload: workload,
|
||||
namespace: namespace,
|
||||
portMapper: util.If(c.Engine == config.EngineGvisor, NewMapper(c.clientset, namespace, labels.SelectorFromSet(templateSpec.Labels).String(), headers, workload), nil),
|
||||
portMapper: util.If(util.IsK8sService(object), NewMapper(c.clientset, namespace, labels.SelectorFromSet(templateSpec.Labels).String(), headers, workload), nil),
|
||||
})
|
||||
}
|
||||
return
|
||||
@@ -229,7 +226,7 @@ func (c *ConnectOptions) DoConnect(ctx context.Context, isLite bool) (err error)
|
||||
plog.G(ctx).Errorf("Failed to get network CIDR: %v", err)
|
||||
return
|
||||
}
|
||||
if err = createOutboundPod(c.ctx, c.clientset, c.Namespace, c.Engine == config.EngineGvisor, c.Image, c.ImagePullSecretName); err != nil {
|
||||
if err = createOutboundPod(c.ctx, c.clientset, c.Namespace, c.Image, c.ImagePullSecretName); err != nil {
|
||||
return
|
||||
}
|
||||
if err = c.upgradeDeploy(c.ctx); err != nil {
|
||||
@@ -242,11 +239,7 @@ func (c *ConnectOptions) DoConnect(ctx context.Context, isLite bool) (err error)
|
||||
plog.G(ctx).Errorf("Add extra node IP failed: %v", err)
|
||||
return
|
||||
}
|
||||
var rawTCPForwardPort, gvisorTCPForwardPort, gvisorUDPForwardPort int
|
||||
rawTCPForwardPort, err = util.GetAvailableTCPPortOrDie()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var gvisorTCPForwardPort, gvisorUDPForwardPort int
|
||||
gvisorTCPForwardPort, err = util.GetAvailableTCPPortOrDie()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -257,25 +250,17 @@ func (c *ConnectOptions) DoConnect(ctx context.Context, isLite bool) (err error)
|
||||
}
|
||||
plog.G(ctx).Info("Forwarding port...")
|
||||
portPair := []string{
|
||||
fmt.Sprintf("%d:10800", rawTCPForwardPort),
|
||||
fmt.Sprintf("%d:10801", gvisorTCPForwardPort),
|
||||
fmt.Sprintf("%d:10802", gvisorUDPForwardPort),
|
||||
}
|
||||
if c.Engine == config.EngineGvisor {
|
||||
portPair = []string{
|
||||
fmt.Sprintf("%d:10801", gvisorTCPForwardPort),
|
||||
fmt.Sprintf("%d:10802", gvisorUDPForwardPort),
|
||||
}
|
||||
}
|
||||
if err = c.portForward(c.ctx, portPair); err != nil {
|
||||
return
|
||||
}
|
||||
if util.IsWindows() {
|
||||
driver.InstallWireGuardTunDriver()
|
||||
}
|
||||
forward := fmt.Sprintf("tcp://127.0.0.1:%d", rawTCPForwardPort)
|
||||
if c.Engine == config.EngineGvisor {
|
||||
forward = fmt.Sprintf("tcp://127.0.0.1:%d", gvisorTCPForwardPort)
|
||||
}
|
||||
forward := fmt.Sprintf("tcp://127.0.0.1:%d", gvisorTCPForwardPort)
|
||||
|
||||
if err = c.startLocalTunServer(c.ctx, forward, isLite); err != nil {
|
||||
plog.G(ctx).Errorf("Start local tun service failed: %v", err)
|
||||
return
|
||||
@@ -427,13 +412,12 @@ func (c *ConnectOptions) startLocalTunServer(ctx context.Context, forwardAddress
|
||||
tunConfig.Addr6 = (&net.IPNet{IP: c.LocalTunIPv6.IP, Mask: net.CIDRMask(128, 128)}).String()
|
||||
}
|
||||
|
||||
localNode := fmt.Sprintf("tun:/127.0.0.1:8422")
|
||||
localNode := "tun://"
|
||||
node, err := core.ParseNode(localNode)
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Failed to parse local node %s: %v", localNode, err)
|
||||
return err
|
||||
}
|
||||
|
||||
forward, err := core.ParseNode(forwardAddress)
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Failed to parse forward node %s: %v", forwardAddress, err)
|
||||
@@ -445,7 +429,7 @@ func (c *ConnectOptions) startLocalTunServer(ctx context.Context, forwardAddress
|
||||
}
|
||||
forwarder := core.NewForwarder(5, forward)
|
||||
|
||||
handler := core.TunHandler(forwarder, node)
|
||||
handler := core.TunHandler(node, forwarder)
|
||||
listener, err := tun.Listener(tunConfig)
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Failed to create tun listener: %v", err)
|
||||
@@ -638,7 +622,7 @@ func (c *ConnectOptions) addRoute(ipStrList ...string) error {
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) setupDNS(ctx context.Context, svcInformer cache.SharedIndexInformer) error {
|
||||
const portTCP = 10800
|
||||
const portTCP = 10801
|
||||
podList, err := c.GetRunningPodList(ctx)
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Get running pod list failed, err: %v", err)
|
||||
@@ -1020,22 +1004,14 @@ func (c *ConnectOptions) upgradeDeploy(ctx context.Context) error {
|
||||
|
||||
// 2) update deploy
|
||||
plog.G(ctx).Infof("Set image %s --> %s...", serverImg, clientImg)
|
||||
err = upgradeDeploySpec(ctx, c.factory, c.Namespace, deploy.Name, c.Engine == config.EngineGvisor, clientImg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// because use webhook(kubevpn-traffic-manager container webhook) to assign ip,
|
||||
// if create new pod use old webhook, ip will still change to old CIDR.
|
||||
// so after patched, check again if env is newer or not,
|
||||
// if env is still old, needs to re-patch using new webhook
|
||||
err = restartDeploy(ctx, c.factory, c.clientset, c.Namespace, deploy.Name)
|
||||
err = upgradeDeploySpec(ctx, c.factory, c.Namespace, deploy.Name, clientImg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func upgradeDeploySpec(ctx context.Context, f cmdutil.Factory, ns, name string, gvisor bool, image string) error {
|
||||
func upgradeDeploySpec(ctx context.Context, f cmdutil.Factory, ns, name, image string) error {
|
||||
r := f.NewBuilder().
|
||||
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
|
||||
NamespaceParam(ns).DefaultNamespace().
|
||||
@@ -1069,8 +1045,7 @@ func upgradeDeploySpec(ctx context.Context, f cmdutil.Factory, ns, name string,
|
||||
}
|
||||
patches := set.CalculatePatches(infos, scheme.DefaultJSONEncoder(), func(obj pkgruntime.Object) ([]byte, error) {
|
||||
_, err = polymorphichelpers.UpdatePodSpecForObjectFn(obj, func(spec *v1.PodSpec) error {
|
||||
udp8422 := "8422-for-udp"
|
||||
tcp10800 := "10800-for-tcp"
|
||||
tcp10801 := "10801-for-tcp"
|
||||
tcp9002 := "9002-for-envoy"
|
||||
tcp80 := "80-for-webhook"
|
||||
udp53 := "53-for-dns"
|
||||
@@ -1081,7 +1056,7 @@ func upgradeDeploySpec(ctx context.Context, f cmdutil.Factory, ns, name string,
|
||||
break
|
||||
}
|
||||
}
|
||||
deploySpec := genDeploySpec(ns, udp8422, tcp10800, tcp9002, udp53, tcp80, gvisor, image, imagePullSecret)
|
||||
deploySpec := genDeploySpec(ns, tcp10801, tcp9002, udp53, tcp80, image, imagePullSecret)
|
||||
*spec = deploySpec.Spec.Template.Spec
|
||||
return nil
|
||||
})
|
||||
@@ -1155,68 +1130,6 @@ func upgradeSecretSpec(ctx context.Context, f cmdutil.Factory, ns string) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func restartDeploy(ctx context.Context, f cmdutil.Factory, clientset *kubernetes.Clientset, ns, name string) error {
|
||||
label := fields.OneTermEqualSelector("app", config.ConfigMapPodTrafficManager).String()
|
||||
list, err := util.GetRunningPodList(ctx, clientset, ns, label)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pod := list[0]
|
||||
container, _ := util.FindContainerByName(&pod, config.ContainerSidecarVPN)
|
||||
if container == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
envs := map[string]string{
|
||||
"CIDR4": config.CIDR.String(),
|
||||
"CIDR6": config.CIDR6.String(),
|
||||
config.EnvInboundPodTunIPv4: (&net.IPNet{IP: config.RouterIP, Mask: config.CIDR.Mask}).String(),
|
||||
config.EnvInboundPodTunIPv6: (&net.IPNet{IP: config.RouterIP6, Mask: config.CIDR6.Mask}).String(),
|
||||
}
|
||||
|
||||
var mismatch bool
|
||||
for _, existing := range container.Env {
|
||||
if envs[existing.Name] != existing.Value {
|
||||
mismatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !mismatch {
|
||||
return nil
|
||||
}
|
||||
err = deletePodImmediately(ctx, clientset, ns, label)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.RolloutStatus(ctx, f, ns, fmt.Sprintf("%s/%s", "deployments", name), time.Minute*60)
|
||||
return err
|
||||
}
|
||||
|
||||
// delete old pod immediately
|
||||
func deletePodImmediately(ctx context.Context, clientset *kubernetes.Clientset, ns string, label string) error {
|
||||
result, err := clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: label,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// delete old pod then delete new pod
|
||||
sort.SliceStable(result.Items, func(i, j int) bool {
|
||||
return result.Items[i].DeletionTimestamp != nil
|
||||
})
|
||||
for _, item := range result.Items {
|
||||
options := metav1.DeleteOptions{GracePeriodSeconds: ptr.To[int64](0)}
|
||||
err = clientset.CoreV1().Pods(ns).Delete(ctx, item.Name, options)
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) GetTunDeviceName() (string, error) {
|
||||
var ips []net.IP
|
||||
if c.LocalTunIPv4 != nil {
|
||||
|
@@ -2,6 +2,7 @@ package handler
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
@@ -11,6 +12,8 @@ import (
|
||||
"github.com/libp2p/go-netroute"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/core"
|
||||
)
|
||||
|
||||
func TestRoute(t *testing.T) {
|
||||
@@ -70,3 +73,67 @@ func TestSort(t *testing.T) {
|
||||
t.Fatal()
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNode(t *testing.T) {
|
||||
type args struct {
|
||||
s string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *core.Node
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "",
|
||||
args: args{s: "tun:/tcp://remote-addr:9080?net=10.10.10.10/24&gw=10.10.10.1&mtu=1500&route=10.10.10.10/24&route=10.10.10.11/24"},
|
||||
want: &core.Node{
|
||||
Addr: "",
|
||||
Protocol: "tun",
|
||||
Remote: "tcp://remote-addr:9080",
|
||||
Values: url.Values{
|
||||
"net": []string{"10.10.10.10/24"},
|
||||
"gw": []string{"10.10.10.1"},
|
||||
"mtu": []string{"1500"},
|
||||
"route": []string{
|
||||
"10.10.10.10/24",
|
||||
"10.10.10.11/24",
|
||||
},
|
||||
},
|
||||
Client: nil,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "",
|
||||
args: args{s: "tun:/tcp://remote-addr:9080?net=10.10.10.10/24&gw=10.10.10.1&mtu=1500&route=10.10.10.10/24,10.10.10.11/24"},
|
||||
want: &core.Node{
|
||||
Addr: "",
|
||||
Protocol: "tun",
|
||||
Remote: "tcp://remote-addr:9080",
|
||||
Values: url.Values{
|
||||
"net": []string{"10.10.10.10/24"},
|
||||
"gw": []string{"10.10.10.1"},
|
||||
"mtu": []string{"1500"},
|
||||
"route": []string{
|
||||
"10.10.10.10/24,10.10.10.11/24",
|
||||
},
|
||||
},
|
||||
Client: nil,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := core.ParseNode(tt.args.s)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ParseNode() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(*got, *tt.want) {
|
||||
t.Errorf("ParseNode() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -1,16 +1,14 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
)
|
||||
|
||||
func AddCommonFlags(cmd *pflag.FlagSet, transferImage *bool, imagePullSecretName *string, engine *config.Engine) {
|
||||
func AddCommonFlags(cmd *pflag.FlagSet, transferImage *bool, imagePullSecretName *string) {
|
||||
cmd.BoolVar(&config.Debug, "debug", false, "enable debug mode or not, true or false")
|
||||
cmd.StringVar(&config.Image, "image", config.Image, "use this image to startup container")
|
||||
cmd.StringVar(imagePullSecretName, "image-pull-secret-name", *imagePullSecretName, "secret name to pull image if registry is private")
|
||||
cmd.BoolVar(transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
|
||||
cmd.StringVar((*string)(engine), "netstack", string(config.EngineSystem), fmt.Sprintf(`network stack ("%s"|"%s") %s: use gvisor (good compatibility), %s: use raw mode (best performance, relays on iptables SNAT)`, config.EngineGvisor, config.EngineSystem, config.EngineGvisor, config.EngineSystem))
|
||||
}
|
||||
|
@@ -103,7 +103,7 @@ func TestFunctions(t *testing.T) {
|
||||
|
||||
// 8) install centrally in ns test -- proxy mode with service mesh and gvisor
|
||||
t.Run("kubevpnQuit", kubevpnQuit)
|
||||
t.Run("kubevpnProxyWithServiceMeshAndGvisorModePortMap", kubevpnProxyWithServiceMeshAndGvisorModePortMap)
|
||||
t.Run("kubevpnProxyWithServiceMeshAndK8sServicePortMap", kubevpnProxyWithServiceMeshAndK8sServicePortMap)
|
||||
t.Run("checkServiceShouldNotInNsDefault", checkServiceShouldNotInNsDefault)
|
||||
t.Run("commonTest", commonTest)
|
||||
t.Run("serviceMeshReviewsServiceIPPortMap", serviceMeshReviewsServiceIPPortMap)
|
||||
@@ -472,7 +472,7 @@ func kubevpnProxyWithServiceMesh(t *testing.T) {
|
||||
}
|
||||
|
||||
func kubevpnProxyWithServiceMeshAndGvisorMode(t *testing.T) {
|
||||
cmd := exec.Command("kubevpn", "proxy", "svc/reviews", "--headers", "env=test", "--netstack", "gvisor", "--debug")
|
||||
cmd := exec.Command("kubevpn", "proxy", "svc/reviews", "--headers", "env=test", "--debug")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Run()
|
||||
@@ -481,8 +481,8 @@ func kubevpnProxyWithServiceMeshAndGvisorMode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func kubevpnProxyWithServiceMeshAndGvisorModePortMap(t *testing.T) {
|
||||
cmd := exec.Command("kubevpn", "proxy", "svc/reviews", "--headers", "env=test", "--netstack", "gvisor", "--debug", "--portmap", "9080:8080")
|
||||
func kubevpnProxyWithServiceMeshAndK8sServicePortMap(t *testing.T) {
|
||||
cmd := exec.Command("kubevpn", "proxy", "svc/reviews", "--headers", "env=test", "--debug", "--portmap", "9080:8080")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Run()
|
||||
|
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
admissionv1 "k8s.io/api/admissionregistration/v1"
|
||||
@@ -28,7 +27,7 @@ import (
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func createOutboundPod(ctx context.Context, clientset *kubernetes.Clientset, namespace string, gvisor bool, image, imagePullSecretName string) (err error) {
|
||||
func createOutboundPod(ctx context.Context, clientset *kubernetes.Clientset, namespace, image, imagePullSecretName string) (err error) {
|
||||
var exists bool
|
||||
exists, err = util.DetectPodExists(ctx, clientset, namespace)
|
||||
if err != nil {
|
||||
@@ -102,12 +101,11 @@ func createOutboundPod(ctx context.Context, clientset *kubernetes.Clientset, nam
|
||||
|
||||
// 5) create service
|
||||
plog.G(ctx).Infof("Creating Service %s", config.ConfigMapPodTrafficManager)
|
||||
udp8422 := "8422-for-udp"
|
||||
tcp10800 := "10800-for-tcp"
|
||||
tcp10801 := "10801-for-tcp"
|
||||
tcp9002 := "9002-for-envoy"
|
||||
tcp80 := "80-for-webhook"
|
||||
udp53 := "53-for-dns"
|
||||
svcSpec := genService(namespace, udp8422, tcp10800, tcp9002, tcp80, udp53)
|
||||
svcSpec := genService(namespace, tcp10801, tcp9002, tcp80, udp53)
|
||||
_, err = clientset.CoreV1().Services(namespace).Create(ctx, svcSpec, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Creating Service error: %s", err.Error())
|
||||
@@ -138,7 +136,7 @@ func createOutboundPod(ctx context.Context, clientset *kubernetes.Clientset, nam
|
||||
|
||||
// 7) create deployment
|
||||
plog.G(ctx).Infof("Creating Deployment %s", config.ConfigMapPodTrafficManager)
|
||||
deploy := genDeploySpec(namespace, udp8422, tcp10800, tcp9002, udp53, tcp80, gvisor, image, imagePullSecretName)
|
||||
deploy := genDeploySpec(namespace, tcp10801, tcp9002, udp53, tcp80, image, imagePullSecretName)
|
||||
deploy, err = clientset.AppsV1().Deployments(namespace).Create(ctx, deploy, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Failed to create deployment for %s: %v", config.ConfigMapPodTrafficManager, err)
|
||||
@@ -241,7 +239,7 @@ func genMutatingWebhookConfiguration(namespace string, crt []byte) *admissionv1.
|
||||
}
|
||||
}
|
||||
|
||||
func genService(namespace string, udp8422 string, tcp10800 string, tcp9002 string, tcp80 string, udp53 string) *v1.Service {
|
||||
func genService(namespace string, tcp10801 string, tcp9002 string, tcp80 string, udp53 string) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.ConfigMapPodTrafficManager,
|
||||
@@ -249,15 +247,10 @@ func genService(namespace string, udp8422 string, tcp10800 string, tcp9002 strin
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Name: udp8422,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
Port: 8422,
|
||||
TargetPort: intstr.FromInt32(8422),
|
||||
}, {
|
||||
Name: tcp10800,
|
||||
Name: tcp10801,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 10800,
|
||||
TargetPort: intstr.FromInt32(10800),
|
||||
Port: 10801,
|
||||
TargetPort: intstr.FromInt32(10801),
|
||||
}, {
|
||||
Name: tcp9002,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
@@ -296,7 +289,7 @@ func genSecret(namespace string, crt []byte, key []byte, host []byte) *v1.Secret
|
||||
return secret
|
||||
}
|
||||
|
||||
func genDeploySpec(namespace string, udp8422 string, tcp10800 string, tcp9002 string, udp53 string, tcp80 string, gvisor bool, image, imagePullSecretName string) *appsv1.Deployment {
|
||||
func genDeploySpec(namespace, tcp10801, tcp9002, udp53, tcp80, image, imagePullSecretName string) *appsv1.Deployment {
|
||||
var resourcesSmall = v1.ResourceRequirements{
|
||||
Requests: map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
@@ -318,9 +311,6 @@ func genDeploySpec(namespace string, udp8422 string, tcp10800 string, tcp9002 st
|
||||
},
|
||||
}
|
||||
|
||||
innerIpv4CIDR := net.IPNet{IP: config.RouterIP, Mask: config.CIDR.Mask}
|
||||
innerIpv6CIDR := net.IPNet{IP: config.RouterIP6, Mask: config.CIDR6.Mask}
|
||||
|
||||
deploy := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.ConfigMapPodTrafficManager,
|
||||
@@ -345,24 +335,12 @@ func genDeploySpec(namespace string, udp8422 string, tcp10800 string, tcp9002 st
|
||||
{
|
||||
Name: config.ContainerSidecarVPN,
|
||||
Image: image,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{util.If(
|
||||
gvisor,
|
||||
`
|
||||
kubevpn server -l "tcp://:10800" -l "gtcp://:10801" -l "gudp://:10802"`,
|
||||
`
|
||||
echo 1 > /proc/sys/net/ipv4/ip_forward
|
||||
echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6
|
||||
echo 1 > /proc/sys/net/ipv6/conf/all/forwarding
|
||||
update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
iptables -P INPUT ACCEPT
|
||||
ip6tables -P INPUT ACCEPT
|
||||
iptables -P FORWARD ACCEPT
|
||||
ip6tables -P FORWARD ACCEPT
|
||||
iptables -t nat -A POSTROUTING -s ${CIDR4} -o eth0 -j MASQUERADE
|
||||
ip6tables -t nat -A POSTROUTING -s ${CIDR6} -o eth0 -j MASQUERADE
|
||||
kubevpn server -l "tcp://:10800" -l "tun://:8422?net=${TunIPv4}&net6=${TunIPv6}" -l "gtcp://:10801" -l "gudp://:10802"`,
|
||||
)},
|
||||
Command: []string{"kubevpn"},
|
||||
Args: []string{
|
||||
"server",
|
||||
"-l gtcp://:10801",
|
||||
"-l gudp://:10802",
|
||||
},
|
||||
EnvFrom: []v1.EnvFromSource{{
|
||||
SecretRef: &v1.SecretEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
@@ -370,48 +348,19 @@ kubevpn server -l "tcp://:10800" -l "tun://:8422?net=${TunIPv4}&net6=${TunIPv6}"
|
||||
},
|
||||
},
|
||||
}},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "CIDR4",
|
||||
Value: config.CIDR.String(),
|
||||
},
|
||||
{
|
||||
Name: "CIDR6",
|
||||
Value: config.CIDR6.String(),
|
||||
},
|
||||
{
|
||||
Name: config.EnvInboundPodTunIPv4,
|
||||
Value: innerIpv4CIDR.String(),
|
||||
},
|
||||
{
|
||||
Name: config.EnvInboundPodTunIPv6,
|
||||
Value: innerIpv6CIDR.String(),
|
||||
},
|
||||
},
|
||||
Env: []v1.EnvVar{},
|
||||
Ports: []v1.ContainerPort{{
|
||||
Name: udp8422,
|
||||
ContainerPort: 8422,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
}, {
|
||||
Name: tcp10800,
|
||||
ContainerPort: 10800,
|
||||
Name: tcp10801,
|
||||
ContainerPort: 10801,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}},
|
||||
Resources: resourcesLarge,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Capabilities: &v1.Capabilities{
|
||||
Add: util.If(gvisor,
|
||||
[]v1.Capability{},
|
||||
[]v1.Capability{
|
||||
"NET_ADMIN",
|
||||
//"SYS_MODULE",
|
||||
},
|
||||
),
|
||||
Add: []v1.Capability{},
|
||||
},
|
||||
RunAsUser: pointer.Int64(0),
|
||||
RunAsGroup: pointer.Int64(0),
|
||||
Privileged: util.If(gvisor, pointer.Bool(false), pointer.Bool(true)),
|
||||
Privileged: pointer.Bool(false),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@@ -59,7 +59,7 @@ iptables -t nat -A PREROUTING ! -p icmp ! -s 127.0.0.1 ! -d ${CIDR4} -j DNAT --t
|
||||
ip6tables -t nat -A PREROUTING ! -p icmp ! -s 0:0:0:0:0:0:0:1 ! -d ${CIDR6} -j DNAT --to :15006
|
||||
iptables -t nat -A POSTROUTING ! -p icmp ! -s 127.0.0.1 ! -d ${CIDR4} -j MASQUERADE
|
||||
ip6tables -t nat -A POSTROUTING ! -p icmp ! -s 0:0:0:0:0:0:0:1 ! -d ${CIDR6} -j MASQUERADE
|
||||
kubevpn server -l "tun:/localhost:8422?net=${TunIPv4}&net6=${TunIPv6}&route=${CIDR4}" -f "tcp://${TrafficManagerService}:10800"`,
|
||||
kubevpn server -l "tun:/tcp://${TrafficManagerService}:10801?net=${TunIPv4}&net6=${TunIPv6}&route=${CIDR4}"`,
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
@@ -178,9 +178,10 @@ func AddEnvoyContainer(spec *v1.PodTemplateSpec, ns, nodeID string, ipv6 bool, m
|
||||
spec.Spec.Containers = append(spec.Spec.Containers, v1.Container{
|
||||
Name: config.ContainerSidecarVPN,
|
||||
Image: image,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{`
|
||||
kubevpn server -l "ssh://:2222"`,
|
||||
Command: []string{"kubevpn"},
|
||||
Args: []string{
|
||||
"server",
|
||||
"-l ssh://:2222",
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: map[v1.ResourceName]resource.Quantity{
|
||||
@@ -193,10 +194,7 @@ kubevpn server -l "ssh://:2222"`,
|
||||
},
|
||||
},
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
RunAsUser: pointer.Int64(0),
|
||||
RunAsGroup: pointer.Int64(0),
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{},
|
||||
})
|
||||
spec.Spec.Containers = append(spec.Spec.Containers, v1.Container{
|
||||
Name: config.ContainerSidecarEnvoyProxy,
|
||||
|
@@ -104,7 +104,7 @@ iptables -t nat -A PREROUTING ! -p icmp -j DNAT --to ${LocalTunIPv4}
|
||||
ip6tables -t nat -A PREROUTING ! -p icmp -j DNAT --to ${LocalTunIPv6}
|
||||
iptables -t nat -A POSTROUTING ! -p icmp -j MASQUERADE
|
||||
ip6tables -t nat -A POSTROUTING ! -p icmp -j MASQUERADE
|
||||
kubevpn server -l "tun:/127.0.0.1:8422?net=${TunIPv4}&net6=${TunIPv6}&route=${CIDR4}" -f "tcp://${TrafficManagerService}:10800"`,
|
||||
kubevpn server -l "tun:/tcp://${TrafficManagerService}:10801?net=${TunIPv4}&net6=${TunIPv6}&route=${CIDR4}"`,
|
||||
},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Capabilities: &corev1.Capabilities{
|
||||
|
@@ -93,9 +93,6 @@ func InjectEnvoyAndSSH(ctx context.Context, nodeID string, f cmdutil.Factory, ma
|
||||
return err
|
||||
}
|
||||
|
||||
if !util.IsK8sService(current) {
|
||||
return nil
|
||||
}
|
||||
// 2) modify service containerPort to envoy listener port
|
||||
err = ModifyServiceTargetPort(ctx, clientset, object.Namespace, current.Name, containerPort2EnvoyListenerPort)
|
||||
if err != nil {
|
||||
|
@@ -11,7 +11,6 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubectl/pkg/cmd/util/podcmd"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
@@ -70,15 +69,10 @@ func (h *admissionReviewHandler) handleCreate(ar v1.AdmissionReview) *v1.Admissi
|
||||
if container == nil {
|
||||
return &v1.AdmissionResponse{UID: ar.Request.UID, Allowed: true}
|
||||
}
|
||||
value, ok := util.FindContainerEnv(container, config.EnvInboundPodTunIPv4)
|
||||
_, ok := util.FindContainerEnv(container, config.EnvInboundPodTunIPv4)
|
||||
if !ok {
|
||||
return &v1.AdmissionResponse{UID: ar.Request.UID, Allowed: true}
|
||||
}
|
||||
// if create pod kubevpn-traffic-manager, just ignore it
|
||||
// because 198.19.0.100 is reserved
|
||||
if x, _, _ := net.ParseCIDR(value); config.RouterIP.Equal(x) {
|
||||
return &v1.AdmissionResponse{UID: ar.Request.UID, Allowed: true}
|
||||
}
|
||||
|
||||
// 2) release old ip
|
||||
h.Lock()
|
||||
@@ -142,11 +136,7 @@ func (h *admissionReviewHandler) handleCreate(ar v1.AdmissionReview) *v1.Admissi
|
||||
plog.G(context.Background()).Errorf("Failed to marshal json patch %v, err: %v", patch, err)
|
||||
return toV1AdmissionResponse(err)
|
||||
}
|
||||
var shouldPatchPod = func(pod *corev1.Pod) bool {
|
||||
namedContainer, _ := podcmd.FindContainerByName(pod, config.ContainerSidecarVPN)
|
||||
return namedContainer != nil
|
||||
}
|
||||
return applyPodPatch(ar, shouldPatchPod, string(marshal))
|
||||
return applyPodPatch(ar, string(marshal))
|
||||
}
|
||||
|
||||
// handle delete pod event
|
||||
@@ -164,15 +154,10 @@ func (h *admissionReviewHandler) handleDelete(ar v1.AdmissionReview) *v1.Admissi
|
||||
if container == nil {
|
||||
return &v1.AdmissionResponse{Allowed: true}
|
||||
}
|
||||
value, ok := util.FindContainerEnv(container, config.EnvInboundPodTunIPv4)
|
||||
_, ok := util.FindContainerEnv(container, config.EnvInboundPodTunIPv4)
|
||||
if !ok {
|
||||
return &v1.AdmissionResponse{Allowed: true}
|
||||
}
|
||||
// if delete pod kubevpn-traffic-manager, just ignore it
|
||||
// because 198.19.0.100 is reserved
|
||||
if x, _, _ := net.ParseCIDR(value); config.RouterIP.Equal(x) {
|
||||
return &v1.AdmissionResponse{Allowed: true}
|
||||
}
|
||||
|
||||
// 2) release ip
|
||||
var ipv4, ipv6 net.IP
|
||||
@@ -201,7 +186,7 @@ func (h *admissionReviewHandler) handleDelete(ar v1.AdmissionReview) *v1.Admissi
|
||||
return &v1.AdmissionResponse{Allowed: true}
|
||||
}
|
||||
|
||||
func applyPodPatch(ar v1.AdmissionReview, shouldPatchPod func(*corev1.Pod) bool, patch string) *v1.AdmissionResponse {
|
||||
func applyPodPatch(ar v1.AdmissionReview, patch string) *v1.AdmissionResponse {
|
||||
plog.G(context.Background()).Infof("Apply pod patch: %s", patch)
|
||||
podResource := metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
|
||||
if ar.Request.Resource != podResource {
|
||||
@@ -217,10 +202,10 @@ func applyPodPatch(ar v1.AdmissionReview, shouldPatchPod func(*corev1.Pod) bool,
|
||||
plog.G(context.Background()).Errorf("Failed to decode request into pod, err: %v, req: %s", err, string(raw))
|
||||
return toV1AdmissionResponse(err)
|
||||
}
|
||||
reviewResponse := v1.AdmissionResponse{Allowed: true}
|
||||
if shouldPatchPod(&pod) {
|
||||
reviewResponse.Patch = []byte(patch)
|
||||
reviewResponse.PatchType = ptr.To(v1.PatchTypeJSONPatch)
|
||||
reviewResponse := v1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Patch: []byte(patch),
|
||||
PatchType: ptr.To(v1.PatchTypeJSONPatch),
|
||||
}
|
||||
return &reviewResponse
|
||||
}
|
||||
|
Reference in New Issue
Block a user