mirror of
https://github.com/kubenetworks/kubevpn.git
synced 2025-10-22 06:50:11 +08:00
feat: proxy mode use traffic-manager pod image (#635)
This commit is contained in:
@@ -1,11 +1,16 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/dhcp"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/webhook"
|
||||
)
|
||||
@@ -25,7 +30,16 @@ func CmdWebhook(f cmdutil.Factory) *cobra.Command {
|
||||
go util.StartupPProfForServer(0)
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return webhook.Main(f)
|
||||
ns := os.Getenv(config.EnvPodNamespace)
|
||||
if ns == "" {
|
||||
return fmt.Errorf("failed to get pod namespace")
|
||||
}
|
||||
clientset, err := f.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
manager := dhcp.NewDHCPManager(clientset.CoreV1().ConfigMaps(ns), ns)
|
||||
return webhook.Main(manager, clientset)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
|
@@ -3,7 +3,6 @@ package controlplane
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -49,7 +48,7 @@ func (p *Processor) newVersion() string {
|
||||
func (p *Processor) ProcessFile(file NotifyMessage) error {
|
||||
configList, err := ParseYaml(file.FilePath)
|
||||
if err != nil {
|
||||
p.logger.Errorf("error parsing yaml file: %v", err)
|
||||
p.logger.Errorf("failed to parse config file: %v", err)
|
||||
return err
|
||||
}
|
||||
enableIPv6, _ := util.DetectSupportIPv6()
|
||||
@@ -57,14 +56,21 @@ func (p *Processor) ProcessFile(file NotifyMessage) error {
|
||||
if len(config.Uid) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var marshal []byte
|
||||
marshal, err = json.Marshal(config)
|
||||
if err != nil {
|
||||
p.logger.Errorf("failed to marshal config: %v", err)
|
||||
return err
|
||||
}
|
||||
uid := util.GenEnvoyUID(config.Namespace, config.Uid)
|
||||
lastConfig, ok := p.expireCache.Get(uid)
|
||||
if ok && reflect.DeepEqual(lastConfig.(*Virtual), config) {
|
||||
marshal, _ := json.Marshal(config)
|
||||
p.logger.Infof("config are same, not needs to update, config: %s", string(marshal))
|
||||
p.logger.Infof("not needs to update, config: %s", string(marshal))
|
||||
continue
|
||||
}
|
||||
p.logger.Infof("update config, version %d, config %v", p.version, config)
|
||||
|
||||
p.logger.Infof("update config, version: %d, config: %s", p.version, marshal)
|
||||
|
||||
listeners, clusters, routes, endpoints := config.To(enableIPv6, p.logger)
|
||||
resources := map[resource.Type][]types.Resource{
|
||||
@@ -75,20 +81,20 @@ func (p *Processor) ProcessFile(file NotifyMessage) error {
|
||||
resource.RuntimeType: {}, // runtimes
|
||||
resource.SecretType: {}, // secrets
|
||||
}
|
||||
|
||||
var snapshot *cache.Snapshot
|
||||
snapshot, err = cache.NewSnapshot(p.newVersion(), resources)
|
||||
|
||||
if err != nil {
|
||||
p.logger.Errorf("snapshot inconsistency: %v, err: %v", snapshot, err)
|
||||
p.logger.Errorf("failed to snapshot inconsistency: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = snapshot.Consistent(); err != nil {
|
||||
p.logger.Errorf("snapshot inconsistency: %v, err: %v", snapshot, err)
|
||||
p.logger.Errorf("failed to snapshot inconsistency: %v", err)
|
||||
return err
|
||||
}
|
||||
p.logger.Infof("will serve snapshot %+v, nodeID: %s", snapshot, uid)
|
||||
if err = p.cache.SetSnapshot(context.Background(), uid, snapshot); err != nil {
|
||||
err = p.cache.SetSnapshot(context.Background(), uid, snapshot)
|
||||
if err != nil {
|
||||
p.logger.Errorf("snapshot error %q for %v", err, snapshot)
|
||||
return err
|
||||
}
|
||||
@@ -103,7 +109,7 @@ func ParseYaml(file string) ([]*Virtual, error) {
|
||||
|
||||
yamlFile, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error reading YAML file: %s\n", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal(yamlFile, &virtualList)
|
||||
|
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
clusterservice "github.com/envoyproxy/go-control-plane/envoy/service/cluster/v3"
|
||||
discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
secretservice "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3"
|
||||
serverv3 "github.com/envoyproxy/go-control-plane/pkg/server/v3"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
)
|
||||
@@ -23,8 +25,17 @@ const (
|
||||
)
|
||||
|
||||
func RunServer(ctx context.Context, server serverv3.Server, port uint) error {
|
||||
grpcServer := grpc.NewServer(grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams))
|
||||
|
||||
grpcOpts := []grpc.ServerOption{
|
||||
grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams),
|
||||
grpc.KeepaliveParams(keepalive.ServerParameters{
|
||||
Time: 15 * time.Second,
|
||||
Timeout: 5 * time.Second,
|
||||
}),
|
||||
grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
|
||||
MinTime: 15 * time.Second,
|
||||
PermitWithoutStream: true,
|
||||
})}
|
||||
grpcServer := grpc.NewServer(grpcOpts...)
|
||||
var lc net.ListenConfig
|
||||
listener, err := lc.Listen(ctx, "tcp", fmt.Sprintf(":%d", port))
|
||||
if err != nil {
|
||||
|
@@ -118,10 +118,9 @@ func (svr *Server) Clone(resp rpc.Daemon_CloneServer) (err error) {
|
||||
plog.G(context.Background()).Errorf("Failed to init client: %v", err)
|
||||
return err
|
||||
}
|
||||
config.Image = req.Image
|
||||
logger.Infof("Clone workloads...")
|
||||
options.SetContext(sshCtx)
|
||||
err = options.DoClone(plog.WithLogger(sshCtx, logger), []byte(req.KubeconfigBytes))
|
||||
err = options.DoClone(plog.WithLogger(sshCtx, logger), []byte(req.KubeconfigBytes), req.Image)
|
||||
if err != nil {
|
||||
plog.G(context.Background()).Errorf("Clone workloads failed: %v", err)
|
||||
return err
|
||||
|
@@ -36,6 +36,7 @@ func (svr *Server) ConnectFork(resp rpc.Daemon_ConnectForkServer) (err error) {
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
OriginNamespace: req.Namespace,
|
||||
Lock: &svr.Lock,
|
||||
Image: req.Image,
|
||||
ImagePullSecretName: req.ImagePullSecretName,
|
||||
}
|
||||
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
|
||||
@@ -67,7 +68,6 @@ func (svr *Server) ConnectFork(resp rpc.Daemon_ConnectForkServer) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
config.Image = req.Image
|
||||
err = connect.DoConnect(sshCtx, true)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to connect...")
|
||||
|
@@ -53,6 +53,7 @@ func (svr *Server) Connect(resp rpc.Daemon_ConnectServer) (err error) {
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
OriginNamespace: req.Namespace,
|
||||
Lock: &svr.Lock,
|
||||
Image: req.Image,
|
||||
ImagePullSecretName: req.ImagePullSecretName,
|
||||
}
|
||||
var file string
|
||||
@@ -86,7 +87,6 @@ func (svr *Server) Connect(resp rpc.Daemon_ConnectServer) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
config.Image = req.Image
|
||||
err = svr.connect.DoConnect(sshCtx, false)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to connect...")
|
||||
|
@@ -10,6 +10,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/cli-runtime/pkg/resource"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
@@ -35,7 +36,6 @@ func (svr *Server) Proxy(resp rpc.Daemon_ProxyServer) (e error) {
|
||||
}
|
||||
|
||||
logger := plog.GetLoggerForClient(int32(log.InfoLevel), io.MultiWriter(newProxyWarp(resp), svr.LogFile))
|
||||
config.Image = req.Image
|
||||
ctx := plog.WithLogger(resp.Context(), logger)
|
||||
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
|
||||
|
||||
@@ -54,6 +54,7 @@ func (svr *Server) Proxy(resp rpc.Daemon_ProxyServer) (e error) {
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
Image: req.Image,
|
||||
ImagePullSecretName: req.ImagePullSecretName,
|
||||
}
|
||||
err = connect.InitClient(util.InitFactoryByPath(file, req.Namespace))
|
||||
@@ -156,7 +157,13 @@ func (svr *Server) Proxy(resp rpc.Daemon_ProxyServer) (e error) {
|
||||
}
|
||||
}
|
||||
|
||||
err = svr.connect.CreateRemoteInboundPod(plog.WithLogger(cancel, logger), req.Namespace, workloads, req.Headers, req.PortMap)
|
||||
var podList []v1.Pod
|
||||
podList, err = svr.connect.GetRunningPodList(cancel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
image := podList[0].Spec.Containers[0].Image
|
||||
err = svr.connect.CreateRemoteInboundPod(plog.WithLogger(cancel, logger), req.Namespace, workloads, req.Headers, req.PortMap, image)
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Failed to inject inbound sidecar: %v", err)
|
||||
return err
|
||||
|
@@ -90,7 +90,7 @@ func (d *CloneOptions) SetContext(ctx context.Context) {
|
||||
* 3) create serviceAccount as needed
|
||||
* 4) modify podTempSpec inject kubevpn container
|
||||
*/
|
||||
func (d *CloneOptions) DoClone(ctx context.Context, kubeconfigJsonBytes []byte) error {
|
||||
func (d *CloneOptions) DoClone(ctx context.Context, kubeconfigJsonBytes []byte, image string) error {
|
||||
var args []string
|
||||
if len(d.Headers) != 0 {
|
||||
args = append(args, "--headers", labels.Set(d.Headers).String())
|
||||
@@ -229,8 +229,8 @@ func (d *CloneOptions) DoClone(ctx context.Context, kubeconfigJsonBytes []byte)
|
||||
Value: "1",
|
||||
},
|
||||
}...)*/
|
||||
container := genVPNContainer(workload, d.Engine, d.Namespace, args)
|
||||
containerSync := genSyncthingContainer(d.RemoteDir, syncDataDirName)
|
||||
container := genVPNContainer(workload, d.Engine, d.Namespace, image, args)
|
||||
containerSync := genSyncthingContainer(d.RemoteDir, syncDataDirName, image)
|
||||
spec.Spec.Containers = append(containers, *container, *containerSync)
|
||||
//set spec
|
||||
marshal, err := json.Marshal(spec)
|
||||
@@ -275,10 +275,10 @@ func (d *CloneOptions) DoClone(ctx context.Context, kubeconfigJsonBytes []byte)
|
||||
return nil
|
||||
}
|
||||
|
||||
func genSyncthingContainer(remoteDir string, syncDataDirName string) *v1.Container {
|
||||
func genSyncthingContainer(remoteDir string, syncDataDirName string, image string) *v1.Container {
|
||||
containerSync := &v1.Container{
|
||||
Name: config.ContainerSidecarSyncthing,
|
||||
Image: config.Image,
|
||||
Image: image,
|
||||
// https://stackoverflow.com/questions/32918849/what-process-signal-does-pod-receive-when-executing-kubectl-rolling-update
|
||||
Command: []string{
|
||||
"kubevpn",
|
||||
@@ -317,10 +317,10 @@ func genSyncthingContainer(remoteDir string, syncDataDirName string) *v1.Contain
|
||||
return containerSync
|
||||
}
|
||||
|
||||
func genVPNContainer(workload string, engine config.Engine, namespace string, args []string) *v1.Container {
|
||||
func genVPNContainer(workload string, engine config.Engine, namespace string, image string, args []string) *v1.Container {
|
||||
container := &v1.Container{
|
||||
Name: config.ContainerSidecarVPN,
|
||||
Image: config.Image,
|
||||
Image: image,
|
||||
// https://stackoverflow.com/questions/32918849/what-process-signal-does-pod-receive-when-executing-kubectl-rolling-update
|
||||
Command: append([]string{
|
||||
"kubevpn",
|
||||
@@ -328,7 +328,7 @@ func genVPNContainer(workload string, engine config.Engine, namespace string, ar
|
||||
workload,
|
||||
"--kubeconfig", "/tmp/.kube/" + config.KUBECONFIG,
|
||||
"--namespace", namespace,
|
||||
"--image", config.Image,
|
||||
"--image", image,
|
||||
"--netstack", string(engine),
|
||||
"--foreground",
|
||||
}, args...),
|
||||
|
@@ -69,6 +69,7 @@ type ConnectOptions struct {
|
||||
OriginKubeconfigPath string
|
||||
OriginNamespace string
|
||||
Lock *sync.Mutex
|
||||
Image string
|
||||
ImagePullSecretName string
|
||||
|
||||
ctx context.Context
|
||||
@@ -152,7 +153,7 @@ func (c *ConnectOptions) GetIPFromContext(ctx context.Context, logger *log.Logge
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) CreateRemoteInboundPod(ctx context.Context, namespace string, workloads []string, headers map[string]string, portMap []string) (err error) {
|
||||
func (c *ConnectOptions) CreateRemoteInboundPod(ctx context.Context, namespace string, workloads []string, headers map[string]string, portMap []string, image string) (err error) {
|
||||
if c.localTunIPv4 == nil || c.localTunIPv6 == nil {
|
||||
return fmt.Errorf("local tun IP is invalid")
|
||||
}
|
||||
@@ -186,11 +187,11 @@ func (c *ConnectOptions) CreateRemoteInboundPod(ctx context.Context, namespace s
|
||||
// https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/
|
||||
// means mesh mode
|
||||
if c.Engine == config.EngineGvisor {
|
||||
err = inject.InjectEnvoySidecar(ctx, nodeID, c.factory, c.clientset, c.Namespace, object, controller, headers, portMap, tlsSecret)
|
||||
err = inject.InjectEnvoySidecar(ctx, nodeID, c.factory, c.Namespace, object, controller, headers, portMap, image)
|
||||
} else if len(headers) != 0 || len(portMap) != 0 {
|
||||
err = inject.InjectVPNAndEnvoySidecar(ctx, nodeID, c.factory, c.clientset.CoreV1().ConfigMaps(c.Namespace), c.Namespace, controller, configInfo, headers, portMap, tlsSecret)
|
||||
err = inject.InjectVPNAndEnvoySidecar(ctx, nodeID, c.factory, c.Namespace, controller, configInfo, headers, portMap, tlsSecret, image)
|
||||
} else {
|
||||
err = inject.InjectVPNSidecar(ctx, nodeID, c.factory, c.Namespace, controller, configInfo, tlsSecret)
|
||||
err = inject.InjectVPNSidecar(ctx, nodeID, c.factory, c.Namespace, controller, configInfo, tlsSecret, image)
|
||||
}
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Injecting inbound sidecar for %s in namespace %s failed: %s", workload, namespace, err.Error())
|
||||
@@ -220,7 +221,7 @@ func (c *ConnectOptions) DoConnect(ctx context.Context, isLite bool) (err error)
|
||||
plog.G(ctx).Errorf("Failed to get network CIDR: %v", err)
|
||||
return
|
||||
}
|
||||
if err = createOutboundPod(c.ctx, c.clientset, c.Namespace, c.Engine == config.EngineGvisor, c.ImagePullSecretName); err != nil {
|
||||
if err = createOutboundPod(c.ctx, c.clientset, c.Namespace, c.Engine == config.EngineGvisor, c.Image, c.ImagePullSecretName); err != nil {
|
||||
return
|
||||
}
|
||||
if err = c.upgradeDeploy(c.ctx); err != nil {
|
||||
@@ -838,7 +839,7 @@ func (c *ConnectOptions) getCIDR(ctx context.Context, m *dhcp.Manager) error {
|
||||
}
|
||||
|
||||
// (2) get CIDR from cni
|
||||
cidrs := util.GetCIDR(ctx, c.clientset, c.config, c.Namespace)
|
||||
cidrs := util.GetCIDR(ctx, c.clientset, c.config, c.Namespace, c.Image)
|
||||
c.cidrs = util.RemoveCIDRsContainingIPs(util.RemoveLargerOverlappingCIDRs(cidrs), c.apiServerIPs)
|
||||
s := sets.New[string]()
|
||||
for _, cidr := range c.cidrs {
|
||||
@@ -983,15 +984,15 @@ func (c *ConnectOptions) upgradeDeploy(ctx context.Context) error {
|
||||
return fmt.Errorf("can not found any container in deploy %s", deploy.Name)
|
||||
}
|
||||
// check running pod, sometime deployment is rolling back, so need to check running pod
|
||||
list, err := c.GetRunningPodList(ctx)
|
||||
podList, err := c.GetRunningPodList(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientVer := config.Version
|
||||
clientImg := config.Image
|
||||
clientImg := c.Image
|
||||
serverImg := deploy.Spec.Template.Spec.Containers[0].Image
|
||||
runningPodImg := list[0].Spec.Containers[0].Image
|
||||
runningPodImg := podList[0].Spec.Containers[0].Image
|
||||
|
||||
isNeedUpgrade, err := util.IsNewer(clientVer, clientImg, serverImg)
|
||||
isPodNeedUpgrade, err1 := util.IsNewer(clientVer, clientImg, runningPodImg)
|
||||
@@ -1013,7 +1014,7 @@ func (c *ConnectOptions) upgradeDeploy(ctx context.Context) error {
|
||||
|
||||
// 2) update deploy
|
||||
plog.G(ctx).Infof("Set image %s --> %s...", serverImg, clientImg)
|
||||
err = upgradeDeploySpec(ctx, c.factory, c.Namespace, deploy.Name, c.Engine == config.EngineGvisor)
|
||||
err = upgradeDeploySpec(ctx, c.factory, c.Namespace, deploy.Name, c.Engine == config.EngineGvisor, clientImg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1028,7 +1029,7 @@ func (c *ConnectOptions) upgradeDeploy(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func upgradeDeploySpec(ctx context.Context, f cmdutil.Factory, ns, name string, gvisor bool) error {
|
||||
func upgradeDeploySpec(ctx context.Context, f cmdutil.Factory, ns, name string, gvisor bool, image string) error {
|
||||
r := f.NewBuilder().
|
||||
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
|
||||
NamespaceParam(ns).DefaultNamespace().
|
||||
@@ -1074,7 +1075,7 @@ func upgradeDeploySpec(ctx context.Context, f cmdutil.Factory, ns, name string,
|
||||
break
|
||||
}
|
||||
}
|
||||
deploySpec := genDeploySpec(ns, udp8422, tcp10800, tcp9002, udp53, tcp80, gvisor, imagePullSecret)
|
||||
deploySpec := genDeploySpec(ns, udp8422, tcp10800, tcp9002, udp53, tcp80, gvisor, image, imagePullSecret)
|
||||
*spec = deploySpec.Spec.Template.Spec
|
||||
return nil
|
||||
})
|
||||
|
@@ -174,14 +174,15 @@ func healthChecker(t *testing.T, endpoint string, header map[string]string, keyw
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: time.Second * 1}
|
||||
err = retry.OnError(
|
||||
wait.Backoff{Duration: time.Second, Factor: 1, Jitter: 0, Steps: 120},
|
||||
func(err error) bool { return err != nil },
|
||||
func() error {
|
||||
var resp *http.Response
|
||||
resp, err = (&http.Client{Timeout: time.Second * 5}).Do(req)
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Logf("failed to do health check endpoint: %s: %v", endpoint, err)
|
||||
t.Logf("%s failed to do health check endpoint: %s: %v", time.Now().Format(time.DateTime), endpoint, err)
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
|
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func createOutboundPod(ctx context.Context, clientset *kubernetes.Clientset, namespace string, gvisor bool, imagePullSecretName string) (err error) {
|
||||
func createOutboundPod(ctx context.Context, clientset *kubernetes.Clientset, namespace string, gvisor bool, image, imagePullSecretName string) (err error) {
|
||||
var exists bool
|
||||
exists, err = util.DetectPodExists(ctx, clientset, namespace)
|
||||
if err != nil {
|
||||
@@ -138,7 +138,7 @@ func createOutboundPod(ctx context.Context, clientset *kubernetes.Clientset, nam
|
||||
|
||||
// 7) create deployment
|
||||
plog.G(ctx).Infof("Creating Deployment %s", config.ConfigMapPodTrafficManager)
|
||||
deploy := genDeploySpec(namespace, udp8422, tcp10800, tcp9002, udp53, tcp80, gvisor, imagePullSecretName)
|
||||
deploy := genDeploySpec(namespace, udp8422, tcp10800, tcp9002, udp53, tcp80, gvisor, image, imagePullSecretName)
|
||||
deploy, err = clientset.AppsV1().Deployments(namespace).Create(ctx, deploy, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Failed to create deployment for %s: %v", config.ConfigMapPodTrafficManager, err)
|
||||
@@ -296,7 +296,7 @@ func genSecret(namespace string, crt []byte, key []byte, host []byte) *v1.Secret
|
||||
return secret
|
||||
}
|
||||
|
||||
func genDeploySpec(namespace string, udp8422 string, tcp10800 string, tcp9002 string, udp53 string, tcp80 string, gvisor bool, imagePullSecretName string) *appsv1.Deployment {
|
||||
func genDeploySpec(namespace string, udp8422 string, tcp10800 string, tcp9002 string, udp53 string, tcp80 string, gvisor bool, image, imagePullSecretName string) *appsv1.Deployment {
|
||||
var resourcesSmall = v1.ResourceRequirements{
|
||||
Requests: map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
@@ -360,7 +360,7 @@ func genDeploySpec(namespace string, udp8422 string, tcp10800 string, tcp9002 st
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.ContainerSidecarVPN,
|
||||
Image: config.Image,
|
||||
Image: image,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{util.If(
|
||||
gvisor,
|
||||
@@ -432,7 +432,7 @@ kubevpn server -l "tcp://:10800" -l "tun://:8422?net=${TunIPv4}&net6=${TunIPv6}"
|
||||
},
|
||||
{
|
||||
Name: config.ContainerSidecarControlPlane,
|
||||
Image: config.Image,
|
||||
Image: image,
|
||||
Command: []string{"kubevpn"},
|
||||
Args: []string{"control-plane", "--watchDirectoryFilename", "/etc/envoy/envoy-config.yaml"},
|
||||
Ports: []v1.ContainerPort{
|
||||
@@ -459,7 +459,7 @@ kubevpn server -l "tcp://:10800" -l "tun://:8422?net=${TunIPv4}&net6=${TunIPv6}"
|
||||
},
|
||||
{
|
||||
Name: config.ContainerSidecarWebhook,
|
||||
Image: config.Image,
|
||||
Image: image,
|
||||
Command: []string{"kubevpn"},
|
||||
Args: []string{"webhook"},
|
||||
Ports: []v1.ContainerPort{{
|
||||
|
@@ -38,13 +38,13 @@ func RemoveContainers(spec *v1.PodTemplateSpec) {
|
||||
}
|
||||
|
||||
// AddMeshContainer todo envoy support ipv6
|
||||
func AddMeshContainer(spec *v1.PodTemplateSpec, ns, nodeID string, c util.PodRouteConfig, ipv6 bool, connectNamespace string, secret *v1.Secret) {
|
||||
func AddMeshContainer(spec *v1.PodTemplateSpec, ns, nodeID string, ipv6 bool, connectNamespace string, secret *v1.Secret, image string) {
|
||||
// remove envoy proxy containers if already exist
|
||||
RemoveContainers(spec)
|
||||
|
||||
spec.Spec.Containers = append(spec.Spec.Containers, v1.Container{
|
||||
Name: config.ContainerSidecarVPN,
|
||||
Image: config.Image,
|
||||
Image: image,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{`
|
||||
echo 1 > /proc/sys/net/ipv4/ip_forward
|
||||
@@ -136,7 +136,7 @@ kubevpn server -l "tun:/localhost:8422?net=${TunIPv4}&net6=${TunIPv6}&route=${CI
|
||||
})
|
||||
spec.Spec.Containers = append(spec.Spec.Containers, v1.Container{
|
||||
Name: config.ContainerSidecarEnvoyProxy,
|
||||
Image: config.Image,
|
||||
Image: image,
|
||||
Command: []string{
|
||||
"envoy",
|
||||
"-l",
|
||||
@@ -171,13 +171,13 @@ kubevpn server -l "tun:/localhost:8422?net=${TunIPv4}&net6=${TunIPv6}&route=${CI
|
||||
})
|
||||
}
|
||||
|
||||
func AddEnvoyContainer(spec *v1.PodTemplateSpec, ns, nodeID string, ipv6 bool, connectNamespace string, secret *v1.Secret) {
|
||||
func AddEnvoyContainer(spec *v1.PodTemplateSpec, ns, nodeID string, ipv6 bool, connectNamespace string, image string) {
|
||||
// remove envoy proxy containers if already exist
|
||||
RemoveContainers(spec)
|
||||
|
||||
spec.Spec.Containers = append(spec.Spec.Containers, v1.Container{
|
||||
Name: config.ContainerSidecarVPN,
|
||||
Image: config.Image,
|
||||
Image: image,
|
||||
Command: []string{"/bin/sh", "-c"},
|
||||
Args: []string{`
|
||||
kubevpn server -l "ssh://:2222"`,
|
||||
@@ -200,7 +200,7 @@ kubevpn server -l "ssh://:2222"`,
|
||||
})
|
||||
spec.Spec.Containers = append(spec.Spec.Containers, v1.Container{
|
||||
Name: config.ContainerSidecarEnvoyProxy,
|
||||
Image: config.Image,
|
||||
Image: image,
|
||||
Command: []string{
|
||||
"envoy",
|
||||
"-l",
|
||||
|
@@ -20,12 +20,12 @@ func RemoveContainer(spec *corev1.PodSpec) {
|
||||
}
|
||||
}
|
||||
|
||||
func AddContainer(spec *corev1.PodSpec, c util.PodRouteConfig, connectNamespace string, secret *corev1.Secret) {
|
||||
func AddContainer(spec *corev1.PodSpec, c util.PodRouteConfig, connectNamespace string, secret *corev1.Secret, image string) {
|
||||
// remove vpn container if already exist
|
||||
RemoveContainer(spec)
|
||||
spec.Containers = append(spec.Containers, corev1.Container{
|
||||
Name: config.ContainerSidecarVPN,
|
||||
Image: config.Image,
|
||||
Image: image,
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "LocalTunIPv4",
|
||||
|
@@ -27,7 +27,12 @@ import (
|
||||
|
||||
// InjectEnvoySidecar patch a sidecar, using iptables to do port-forward let this pod decide should go to 233.254.254.100 or request to 127.0.0.1
|
||||
// https://istio.io/latest/docs/ops/deployment/requirements/#ports-used-by-istio
|
||||
func InjectEnvoySidecar(ctx context.Context, nodeID string, f cmdutil.Factory, clientset *kubernetes.Clientset, connectNamespace string, current, object *runtimeresource.Info, headers map[string]string, portMap []string, secret *v1.Secret) (err error) {
|
||||
func InjectEnvoySidecar(ctx context.Context, nodeID string, f cmdutil.Factory, connectNamespace string, current, object *runtimeresource.Info, headers map[string]string, portMap []string, image string) (err error) {
|
||||
var clientset *kubernetes.Clientset
|
||||
clientset, err = f.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u := object.Object.(*unstructured.Unstructured)
|
||||
var templateSpec *v1.PodTemplateSpec
|
||||
var path []string
|
||||
@@ -63,7 +68,7 @@ func InjectEnvoySidecar(ctx context.Context, nodeID string, f cmdutil.Factory, c
|
||||
|
||||
enableIPv6, _ := util.DetectPodSupportIPv6(ctx, f, connectNamespace)
|
||||
// (1) add mesh container
|
||||
AddEnvoyContainer(templateSpec, object.Namespace, nodeID, enableIPv6, connectNamespace, secret)
|
||||
AddEnvoyContainer(templateSpec, object.Namespace, nodeID, enableIPv6, connectNamespace, image)
|
||||
helper := pkgresource.NewHelper(object.Client, object.Mapping)
|
||||
ps := []P{
|
||||
{
|
||||
|
@@ -18,6 +18,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
pkgresource "k8s.io/cli-runtime/pkg/resource"
|
||||
runtimeresource "k8s.io/cli-runtime/pkg/resource"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
v12 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"sigs.k8s.io/yaml"
|
||||
@@ -31,7 +32,13 @@ import (
|
||||
// https://istio.io/latest/docs/ops/deployment/requirements/#ports-used-by-istio
|
||||
|
||||
// InjectVPNAndEnvoySidecar patch a sidecar, using iptables to do port-forward let this pod decide should go to 233.254.254.100 or request to 127.0.0.1
|
||||
func InjectVPNAndEnvoySidecar(ctx context.Context, nodeID string, f cmdutil.Factory, mapInterface v12.ConfigMapInterface, connectNamespace string, object *runtimeresource.Info, c util.PodRouteConfig, headers map[string]string, portMaps []string, secret *v1.Secret) (err error) {
|
||||
func InjectVPNAndEnvoySidecar(ctx context.Context, nodeID string, f cmdutil.Factory, connectNamespace string, object *runtimeresource.Info, c util.PodRouteConfig, headers map[string]string, portMaps []string, secret *v1.Secret, image string) (err error) {
|
||||
var clientset *kubernetes.Clientset
|
||||
clientset, err = f.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u := object.Object.(*unstructured.Unstructured)
|
||||
var templateSpec *v1.PodTemplateSpec
|
||||
var path []string
|
||||
@@ -70,7 +77,7 @@ func InjectVPNAndEnvoySidecar(ctx context.Context, nodeID string, f cmdutil.Fact
|
||||
}
|
||||
}
|
||||
|
||||
err = addEnvoyConfig(mapInterface, object.Namespace, nodeID, c, headers, ports, portmap)
|
||||
err = addEnvoyConfig(clientset.CoreV1().ConfigMaps(connectNamespace), object.Namespace, nodeID, c, headers, ports, portmap)
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Failed to add envoy config: %v", err)
|
||||
return err
|
||||
@@ -88,7 +95,7 @@ func InjectVPNAndEnvoySidecar(ctx context.Context, nodeID string, f cmdutil.Fact
|
||||
|
||||
enableIPv6, _ := util.DetectPodSupportIPv6(ctx, f, connectNamespace)
|
||||
// (1) add mesh container
|
||||
AddMeshContainer(templateSpec, object.Namespace, nodeID, c, enableIPv6, connectNamespace, secret)
|
||||
AddMeshContainer(templateSpec, object.Namespace, nodeID, enableIPv6, connectNamespace, secret, image)
|
||||
helper := pkgresource.NewHelper(object.Client, object.Mapping)
|
||||
ps := []P{
|
||||
{
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
util2 "github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func InjectVPNSidecar(ctx context.Context, nodeID string, f util.Factory, connectNamespace string, object *resource.Info, c util2.PodRouteConfig, secret *v1.Secret) error {
|
||||
func InjectVPNSidecar(ctx context.Context, nodeID string, f util.Factory, connectNamespace string, object *resource.Info, c util2.PodRouteConfig, secret *v1.Secret, image string) error {
|
||||
u := object.Object.(*unstructured.Unstructured)
|
||||
|
||||
podTempSpec, path, err := util2.GetPodTemplateSpecPath(u)
|
||||
@@ -50,7 +50,7 @@ func InjectVPNSidecar(ctx context.Context, nodeID string, f util.Factory, connec
|
||||
return err
|
||||
}
|
||||
|
||||
AddContainer(&podTempSpec.Spec, c, connectNamespace, secret)
|
||||
AddContainer(&podTempSpec.Spec, c, connectNamespace, secret, image)
|
||||
|
||||
workload := fmt.Sprintf("%s/%s", object.Mapping.Resource.Resource, object.Name)
|
||||
helper := resource.NewHelper(object.Client, object.Mapping)
|
||||
|
@@ -33,7 +33,7 @@ import (
|
||||
// 2) grep cmdline
|
||||
// 3) create svc + cat *.conflist
|
||||
// 4) create svc + get pod ip with svc mask
|
||||
func GetCIDR(ctx context.Context, clientset *kubernetes.Clientset, restconfig *rest.Config, namespace string) []*net.IPNet {
|
||||
func GetCIDR(ctx context.Context, clientset *kubernetes.Clientset, restconfig *rest.Config, namespace string, image string) []*net.IPNet {
|
||||
defer func() {
|
||||
_ = clientset.CoreV1().Pods(namespace).Delete(context.Background(), config.CniNetName, v1.DeleteOptions{GracePeriodSeconds: pointer.Int64(0)})
|
||||
}()
|
||||
@@ -47,7 +47,7 @@ func GetCIDR(ctx context.Context, clientset *kubernetes.Clientset, restconfig *r
|
||||
}
|
||||
|
||||
plog.G(ctx).Infoln("Getting network CIDR from CNI...")
|
||||
cni, err := GetCIDRFromCNI(ctx, clientset, restconfig, namespace)
|
||||
cni, err := GetCIDRFromCNI(ctx, clientset, restconfig, namespace, image)
|
||||
if err == nil {
|
||||
plog.G(ctx).Debugf("Getting network CIDR from CNI successfully")
|
||||
result = append(result, cni...)
|
||||
@@ -126,8 +126,8 @@ func GetCIDRByDumpClusterInfo(ctx context.Context, clientset *kubernetes.Clients
|
||||
}
|
||||
|
||||
// GetCIDRFromCNI kube-controller-manager--allocate-node-cidrs=true--authentication-kubeconfig=/etc/kubernetes/controller-manager.conf--authorization-kubeconfig=/etc/kubernetes/controller-manager.conf--bind-address=0.0.0.0--client-ca-file=/etc/kubernetes/ssl/ca.crt--cluster-cidr=10.233.64.0/18--cluster-name=cluster.local--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.crt--cluster-signing-key-file=/etc/kubernetes/ssl/ca.key--configure-cloud-routes=false--controllers=*,bootstrapsigner,tokencleaner--kubeconfig=/etc/kubernetes/controller-manager.conf--leader-elect=true--leader-elect-lease-duration=15s--leader-elect-renew-deadline=10s--node-cidr-mask-size=24--node-monitor-grace-period=40s--node-monitor-period=5s--port=0--profiling=False--requestheader-client-ca-file=/etc/kubernetes/ssl/front-proxy-ca.crt--root-ca-file=/etc/kubernetes/ssl/ca.crt--service-account-private-key-file=/etc/kubernetes/ssl/sa.key--service-cluster-ip-range=10.233.0.0/18--terminated-pod-gc-threshold=12500--use-service-account-credentials=true
|
||||
func GetCIDRFromCNI(ctx context.Context, clientset *kubernetes.Clientset, restconfig *rest.Config, namespace string) ([]*net.IPNet, error) {
|
||||
pod, err := CreateCIDRPod(ctx, clientset, namespace)
|
||||
func GetCIDRFromCNI(ctx context.Context, clientset *kubernetes.Clientset, restconfig *rest.Config, namespace string, image string) ([]*net.IPNet, error) {
|
||||
pod, err := CreateCIDRPod(ctx, clientset, namespace, image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -232,7 +232,7 @@ func GetPodCIDRFromCNI(ctx context.Context, clientset *kubernetes.Clientset, res
|
||||
return cidrList, nil
|
||||
}
|
||||
|
||||
func CreateCIDRPod(ctx context.Context, clientset *kubernetes.Clientset, namespace string) (*v13.Pod, error) {
|
||||
func CreateCIDRPod(ctx context.Context, clientset *kubernetes.Clientset, namespace string, image string) (*v13.Pod, error) {
|
||||
var procName = "proc-dir-kubevpn"
|
||||
pod := &v13.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
@@ -263,7 +263,7 @@ func CreateCIDRPod(ctx context.Context, clientset *kubernetes.Clientset, namespa
|
||||
Containers: []v13.Container{
|
||||
{
|
||||
Name: config.CniNetName,
|
||||
Image: config.Image,
|
||||
Image: image,
|
||||
Command: []string{"tail", "-f", "/dev/null"},
|
||||
Resources: v13.ResourceRequirements{
|
||||
Requests: map[v13.ResourceName]resource.Quantity{
|
||||
|
@@ -9,6 +9,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -73,7 +74,7 @@ func TestByCreateSvc(t *testing.T) {
|
||||
|
||||
func TestElegant(t *testing.T) {
|
||||
before()
|
||||
elegant := GetCIDR(context.Background(), clientset, restconfig, namespace)
|
||||
elegant := GetCIDR(context.Background(), clientset, restconfig, namespace, config.Image)
|
||||
for _, ipNet := range elegant {
|
||||
t.Log(ipNet.String())
|
||||
}
|
||||
|
@@ -5,7 +5,6 @@ import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
@@ -14,9 +13,8 @@ import (
|
||||
"google.golang.org/grpc/health"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/reflection"
|
||||
"k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/dhcp"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/dhcp/rpc"
|
||||
@@ -24,14 +22,8 @@ import (
|
||||
putil "github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func Main(f util.Factory) error {
|
||||
clientset, err := f.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ns := os.Getenv(config.EnvPodNamespace)
|
||||
h := &admissionReviewHandler{f: f, clientset: clientset, ns: ns}
|
||||
func Main(manager *dhcp.Manager, clientset *kubernetes.Clientset) error {
|
||||
h := &admissionReviewHandler{dhcp: manager}
|
||||
http.HandleFunc("/pods", func(w http.ResponseWriter, r *http.Request) {
|
||||
serve(w, r, newDelegateToV1AdmitHandler(h.admitPods))
|
||||
})
|
||||
|
@@ -12,19 +12,16 @@ import (
|
||||
"k8s.io/api/admission/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/dhcp"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
)
|
||||
|
||||
// admissionReviewHandler is a handler to handle business logic, holding an util.Factory
|
||||
type admissionReviewHandler struct {
|
||||
sync.Mutex
|
||||
f cmdutil.Factory
|
||||
ns string
|
||||
clientset *kubernetes.Clientset
|
||||
dhcp *dhcp.Manager
|
||||
}
|
||||
|
||||
// admitv1beta1Func handles a v1beta1 admission
|
||||
|
@@ -16,7 +16,6 @@ import (
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/dhcp"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
@@ -85,8 +84,6 @@ func (h *admissionReviewHandler) handleCreate(ar v1.AdmissionReview) *v1.Admissi
|
||||
// 2) release old ip
|
||||
h.Lock()
|
||||
defer h.Unlock()
|
||||
mapInterface := h.clientset.CoreV1().ConfigMaps(util.If(h.ns != "", h.ns, ar.Request.Namespace))
|
||||
manager := dhcp.NewDHCPManager(mapInterface, util.If(h.ns != "", h.ns, ar.Request.Namespace))
|
||||
var ips []net.IP
|
||||
for k := 0; k < len(container.Env); k++ {
|
||||
envVar := container.Env[k]
|
||||
@@ -96,11 +93,11 @@ func (h *admissionReviewHandler) handleCreate(ar v1.AdmissionReview) *v1.Admissi
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = manager.ReleaseIP(context.Background(), ips...)
|
||||
_ = h.dhcp.ReleaseIP(context.Background(), ips...)
|
||||
|
||||
// 3) rent new ip
|
||||
var v4, v6 *net.IPNet
|
||||
v4, v6, err = manager.RentIP(context.Background())
|
||||
v4, v6, err = h.dhcp.RentIP(context.Background())
|
||||
if err != nil {
|
||||
plog.G(context.Background()).Errorf("Rent IP random failed: %v", err)
|
||||
return toV1AdmissionResponse(err)
|
||||
@@ -185,8 +182,7 @@ func (h *admissionReviewHandler) handleDelete(ar v1.AdmissionReview) *v1.Admissi
|
||||
if len(ips) != 0 {
|
||||
h.Lock()
|
||||
defer h.Unlock()
|
||||
mapInterface := h.clientset.CoreV1().ConfigMaps(util.If(h.ns != "", h.ns, ar.Request.Namespace))
|
||||
err := dhcp.NewDHCPManager(mapInterface, util.If(h.ns != "", h.ns, ar.Request.Namespace)).ReleaseIP(context.Background(), ips...)
|
||||
err := h.dhcp.ReleaseIP(context.Background(), ips...)
|
||||
if err != nil {
|
||||
plog.G(context.Background()).Errorf("Failed to release IP %v to DHCP server: %v", ips, err)
|
||||
} else {
|
||||
|
Reference in New Issue
Block a user