feat: add mode connect-mode

This commit is contained in:
fengcaiwen
2023-03-22 20:31:28 +08:00
committed by wencaiwulue
parent 2ccf5776a8
commit 593f42aeca
7 changed files with 461 additions and 161 deletions

View File

@@ -1,16 +1,10 @@
package cmds package cmds
import ( import (
"context"
"fmt"
"os" "os"
"syscall"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/opts" "github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util" cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/completion" "k8s.io/kubectl/pkg/util/completion"
@@ -25,18 +19,15 @@ import (
func CmdDev(f cmdutil.Factory) *cobra.Command { func CmdDev(f cmdutil.Factory) *cobra.Command {
var devOptions = dev.Options{ var devOptions = dev.Options{
Factory: f, Factory: f,
Entrypoint: "", Entrypoint: "",
Publish: opts.NewListOpts(nil), Publish: opts.NewListOpts(nil),
Expose: opts.NewListOpts(nil), Expose: opts.NewListOpts(nil),
Env: opts.NewListOpts(nil), Env: opts.NewListOpts(nil),
Volumes: opts.NewListOpts(nil), Volumes: opts.NewListOpts(nil),
ExtraHosts: opts.NewListOpts(nil), ExtraHosts: opts.NewListOpts(nil),
Aliases: opts.NewListOpts(nil), NoProxy: false,
Links: opts.NewListOpts(nil), ExtraCIDR: []string{},
LinkLocalIPs: opts.NewListOpts(nil),
NoProxy: false,
ExtraCIDR: []string{},
} }
var sshConf = &util.SshConfig{} var sshConf = &util.SshConfig{}
cmd := &cobra.Command{ cmd := &cobra.Command{
@@ -85,69 +76,7 @@ Startup your kubernetes workloads in local Docker container with same volume、e
return handler.SshJump(sshConf, cmd.Flags()) return handler.SshJump(sshConf, cmd.Flags())
}, },
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
connect := handler.ConnectOptions{ return dev.DoDev(devOptions, args, f)
Headers: devOptions.Headers,
Workloads: args,
ExtraCIDR: devOptions.ExtraCIDR,
ExtraDomain: devOptions.ExtraDomain,
}
mode := container.NetworkMode(devOptions.NetMode.NetworkMode())
if mode.IsContainer() {
client, _, err := dev.GetClient()
if err != nil {
return err
}
var inspect types.ContainerJSON
inspect, err = client.ContainerInspect(context.Background(), mode.ConnectedContainer())
if err != nil {
return err
}
if inspect.State == nil {
return fmt.Errorf("can not get container status, please make contianer name is valid")
}
if !inspect.State.Running {
return fmt.Errorf("container %s status is %s, expect is running, please make sure your outer docker name is correct", mode.ConnectedContainer(), inspect.State.Status)
}
}
if err := connect.InitClient(f); err != nil {
return err
}
err := connect.PreCheckResource()
if err != nil {
return err
}
if len(connect.Workloads) > 1 {
return fmt.Errorf("can only dev one workloads at same time, workloads: %v", connect.Workloads)
}
if len(connect.Workloads) < 1 {
return fmt.Errorf("you must provide resource to dev, workloads : %v is invaild", connect.Workloads)
}
devOptions.Workload = connect.Workloads[0]
// if no-proxy is true, not needs to intercept traffic
if devOptions.NoProxy {
if len(connect.Headers) != 0 {
return fmt.Errorf("not needs to provide headers if is no-proxy mode")
}
connect.Workloads = []string{}
}
defer func() {
handler.Cleanup(syscall.SIGQUIT)
select {}
}()
if err = connect.DoConnect(); err != nil {
log.Errorln(err)
return err
}
devOptions.Namespace = connect.Namespace
err = devOptions.Main(context.Background())
if err != nil {
log.Errorln(err)
}
return err
}, },
} }
cmd.Flags().StringToStringVarP(&devOptions.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2") cmd.Flags().StringToStringVarP(&devOptions.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
@@ -158,6 +87,7 @@ Startup your kubernetes workloads in local Docker container with same volume、e
cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc("container", completion.ContainerCompletionFunc(f))) cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc("container", completion.ContainerCompletionFunc(f)))
cmd.Flags().StringArrayVar(&devOptions.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32") cmd.Flags().StringArrayVar(&devOptions.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
cmd.Flags().StringArrayVar(&devOptions.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com") cmd.Flags().StringArrayVar(&devOptions.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
cmd.Flags().StringVar((*string)(&devOptions.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
// docker options // docker options
cmd.Flags().Var(&devOptions.ExtraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") cmd.Flags().Var(&devOptions.ExtraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)")
@@ -165,12 +95,6 @@ Startup your kubernetes workloads in local Docker container with same volume、e
cmd.Flags().Var(&devOptions.NetMode, "net", "Connect a container to a network, eg: [default|bridge|host|none|container:$CONTAINER_ID]") cmd.Flags().Var(&devOptions.NetMode, "net", "Connect a container to a network, eg: [default|bridge|host|none|container:$CONTAINER_ID]")
cmd.Flags().Var(&devOptions.NetMode, "network", "Connect a container to a network") cmd.Flags().Var(&devOptions.NetMode, "network", "Connect a container to a network")
cmd.Flags().MarkHidden("net") cmd.Flags().MarkHidden("net")
cmd.Flags().Var(&devOptions.Links, "link", "Add link to another container")
cmd.Flags().Var(&devOptions.LinkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses")
// We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way.
cmd.Flags().Var(&devOptions.Aliases, "net-alias", "Add network-scoped alias for the container")
cmd.Flags().Var(&devOptions.Aliases, "network-alias", "Add network-scoped alias for the container")
cmd.Flags().MarkHidden("net-alias")
cmd.Flags().VarP(&devOptions.Volumes, "volume", "v", "Bind mount a volume") cmd.Flags().VarP(&devOptions.Volumes, "volume", "v", "Bind mount a volume")
cmd.Flags().Var(&devOptions.Mounts, "mount", "Attach a filesystem mount to the container") cmd.Flags().Var(&devOptions.Mounts, "mount", "Attach a filesystem mount to the container")
cmd.Flags().Var(&devOptions.Expose, "expose", "Expose a port or a range of ports") cmd.Flags().Var(&devOptions.Expose, "expose", "Expose a port or a range of ports")

View File

@@ -32,6 +32,22 @@ const (
VolumeEnvoyConfig = "envoy-config" VolumeEnvoyConfig = "envoy-config"
innerIPv4Pool = "223.254.0.100/16" innerIPv4Pool = "223.254.0.100/16"
// 原因在docker环境中设置docker的 gateway 和 subnet不能 inner 的冲突,也不能和 docker的 172.17 冲突
// 不然的话,请求会不通的
// 解决的问题:在 k8s 中的 名叫 kubernetes 的 service ip 为
// ➜ ~ kubectl get service kubernetes
//NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
//kubernetes ClusterIP 172.17.0.1 <none> 443/TCP 190d
//
// ➜ ~ docker network inspect bridge | jq '.[0].IPAM.Config'
//[
// {
// "Subnet": "172.17.0.0/16",
// "Gateway": "172.17.0.1"
// }
//]
// 如果不创建 network那么是无法请求到 这个 kubernetes 的 service 的
dockerInnerIPv4Pool = "223.255.0.100/16"
DefaultNetDir = "/etc/cni/net.d" DefaultNetDir = "/etc/cni/net.d"
@@ -61,6 +77,9 @@ const (
// pprof port // pprof port
PProfPort = 32345 PProfPort = 32345
// startup by KubeVPN
EnvStartSudoKubeVPNByKubeVPN = "DEPTH_SIGNED_BY_NAISON"
) )
var ( var (
@@ -68,12 +87,18 @@ var (
Image = "docker.io/naison/kubevpn:latest" Image = "docker.io/naison/kubevpn:latest"
) )
var CIDR *net.IPNet var (
CIDR *net.IPNet
RouterIP net.IP
var RouterIP net.IP // for creating docker network
DockerCIDR *net.IPNet
DockerRouterIP net.IP
)
func init() { func init() {
RouterIP, CIDR, _ = net.ParseCIDR(innerIPv4Pool) RouterIP, CIDR, _ = net.ParseCIDR(innerIPv4Pool)
DockerRouterIP, DockerCIDR, _ = net.ParseCIDR(dockerInnerIPv4Pool)
} }
var Debug bool var Debug bool

View File

@@ -1,14 +1,18 @@
package dev package dev
import ( import (
"bytes"
"context" "context"
"errors"
"fmt" "fmt"
"io"
"math/rand" "math/rand"
"net"
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"sort" "sort"
"strings"
"syscall"
"time" "time"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
@@ -19,9 +23,12 @@ import (
containertypes "github.com/docker/docker/api/types/container" containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/docker/docker/errdefs" "github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/stdcopy"
"github.com/google/uuid"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -34,10 +41,16 @@ import (
"github.com/wencaiwulue/kubevpn/pkg/config" "github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/handler" "github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/mesh" "github.com/wencaiwulue/kubevpn/pkg/mesh"
"github.com/wencaiwulue/kubevpn/pkg/tun"
"github.com/wencaiwulue/kubevpn/pkg/util" "github.com/wencaiwulue/kubevpn/pkg/util"
) )
type ConnectMode string
const (
ConnectModeContainer ConnectMode = "container"
ConnectModeHost ConnectMode = "host"
)
type Options struct { type Options struct {
Headers map[string]string Headers map[string]string
Namespace string Namespace string
@@ -47,6 +60,7 @@ type Options struct {
NoProxy bool NoProxy bool
ExtraCIDR []string ExtraCIDR []string
ExtraDomain []string ExtraDomain []string
ConnectMode ConnectMode
// docker options // docker options
Platform string Platform string
@@ -58,9 +72,6 @@ type Options struct {
Expose opts.ListOpts Expose opts.ListOpts
ExtraHosts opts.ListOpts ExtraHosts opts.ListOpts
NetMode opts.NetworkOpt NetMode opts.NetworkOpt
Aliases opts.ListOpts
Links opts.ListOpts
LinkLocalIPs opts.ListOpts
Env opts.ListOpts Env opts.ListOpts
Mounts opts.MountOpt Mounts opts.MountOpt
Volumes opts.ListOpts Volumes opts.ListOpts
@@ -136,7 +147,7 @@ func (d Options) Main(ctx context.Context) error {
return fmt.Errorf("your pod resource request is bigger than docker-desktop resource, please adjust your docker-desktop resource") return fmt.Errorf("your pod resource request is bigger than docker-desktop resource, please adjust your docker-desktop resource")
} }
mode := container.NetworkMode(d.NetMode.NetworkMode()) mode := container.NetworkMode(d.NetMode.NetworkMode())
if mode.IsBridge() || mode.IsHost() || mode.IsContainer() || mode.IsNone() { if len(d.NetMode.Value()) != 0 {
for _, runConfig := range list[:] { for _, runConfig := range list[:] {
// remove expose port // remove expose port
runConfig.config.ExposedPorts = nil runConfig.config.ExposedPorts = nil
@@ -155,7 +166,7 @@ func (d Options) Main(ctx context.Context) error {
} }
} else { } else {
var networkID string var networkID string
networkID, err = createNetwork(ctx, cli, list[0].containerName) networkID, err = createKubevpnNetwork(ctx, cli, list[0].containerName)
if err != nil { if err != nil {
return err return err
} }
@@ -190,50 +201,6 @@ func (d Options) Main(ctx context.Context) error {
return terminal(list[0].containerName, dockerCli) return terminal(list[0].containerName, dockerCli)
} }
func createNetwork(ctx context.Context, cli *client.Client, networkName string) (string, error) {
getInterface, err := tun.GetInterface()
if err != nil {
return "", err
}
addrs, err := getInterface.Addrs()
if err != nil {
return "", err
}
cidr, _, err := net.ParseCIDR(addrs[0].String())
if err != nil {
return "", err
}
by := map[string]string{"created_by": config.ConfigMapPodTrafficManager}
create, err := cli.NetworkCreate(ctx, networkName, types.NetworkCreate{
Driver: "bridge",
Scope: "local",
IPAM: &network.IPAM{
Driver: "",
Options: nil,
Config: []network.IPAMConfig{
{
Subnet: config.CIDR.String(),
Gateway: cidr.String(),
},
},
},
Internal: true,
Labels: by,
})
if err != nil {
if errdefs.IsForbidden(err) {
list, _ := cli.NetworkList(ctx, types.NetworkListOptions{})
for _, resource := range list {
if reflect.DeepEqual(resource.Labels, by) {
return resource.ID, nil
}
}
}
return "", err
}
return create.ID, nil
}
type Run []*RunConfig type Run []*RunConfig
func (r Run) Remove(ctx context.Context) error { func (r Run) Remove(ctx context.Context) error {
@@ -396,3 +363,345 @@ func checkOutOfMemory(spec *v1.PodTemplateSpec, cli *client.Client) (outOfMemory
} }
return return
} }
func DoDev(devOptions Options, args []string, f cmdutil.Factory) error {
connect := handler.ConnectOptions{
Headers: devOptions.Headers,
Workloads: args,
ExtraCIDR: devOptions.ExtraCIDR,
ExtraDomain: devOptions.ExtraDomain,
}
mode := container.NetworkMode(devOptions.NetMode.NetworkMode())
if mode.IsContainer() {
client, _, err := GetClient()
if err != nil {
return err
}
var inspect types.ContainerJSON
inspect, err = client.ContainerInspect(context.Background(), mode.ConnectedContainer())
if err != nil {
return err
}
if inspect.State == nil {
return fmt.Errorf("can not get container status, please make contianer name is valid")
}
if !inspect.State.Running {
return fmt.Errorf("container %s status is %s, expect is running, please make sure your outer docker name is correct", mode.ConnectedContainer(), inspect.State.Status)
}
}
if err := connect.InitClient(f); err != nil {
return err
}
err := connect.PreCheckResource()
if err != nil {
return err
}
if len(connect.Workloads) > 1 {
return fmt.Errorf("can only dev one workloads at same time, workloads: %v", connect.Workloads)
}
if len(connect.Workloads) < 1 {
return fmt.Errorf("you must provide resource to dev, workloads : %v is invaild", connect.Workloads)
}
devOptions.Workload = connect.Workloads[0]
// if no-proxy is true, not needs to intercept traffic
if devOptions.NoProxy {
if len(connect.Headers) != 0 {
return fmt.Errorf("not needs to provide headers if is no-proxy mode")
}
connect.Workloads = []string{}
}
path, err := connect.GetKubeconfigPath()
if err != nil {
return err
}
switch devOptions.ConnectMode {
case ConnectModeHost:
defer func() {
handler.Cleanup(syscall.SIGQUIT)
select {}
}()
if err = connect.DoConnect(); err != nil {
log.Errorln(err)
return err
}
case ConnectModeContainer:
var dockerCli *command.DockerCli
var cli *client.Client
cli, dockerCli, err = GetClient()
if err != nil {
return err
}
var entrypoint []string
if devOptions.NoProxy {
entrypoint = []string{"kubevpn", "connect", "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image}
for _, v := range connect.ExtraCIDR {
entrypoint = append(entrypoint, "--extra-cidr", v)
}
for _, v := range connect.ExtraDomain {
entrypoint = append(entrypoint, "--extra-domain", v)
}
} else {
entrypoint = []string{"kubevpn", "proxy", connect.Workloads[0], "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image}
for k, v := range connect.Headers {
entrypoint = append(entrypoint, "--headers", fmt.Sprintf("%s=%s", k, v))
}
for _, v := range connect.ExtraCIDR {
entrypoint = append(entrypoint, "--extra-cidr", v)
}
for _, v := range connect.ExtraDomain {
entrypoint = append(entrypoint, "--extra-domain", v)
}
}
runConfig := &container.Config{
User: "root",
AttachStdin: false,
AttachStdout: false,
AttachStderr: false,
ExposedPorts: nil,
StdinOnce: false,
Env: []string{fmt.Sprintf("%s=1", config.EnvStartSudoKubeVPNByKubeVPN)},
Cmd: []string{},
Healthcheck: nil,
ArgsEscaped: false,
Image: config.Image,
Volumes: nil,
Entrypoint: entrypoint,
NetworkDisabled: false,
MacAddress: "",
OnBuild: nil,
StopSignal: "",
StopTimeout: nil,
Shell: nil,
}
hostConfig := &container.HostConfig{
Binds: []string{fmt.Sprintf("%s:%s", path, "/root/.kube/config")},
LogConfig: container.LogConfig{},
PortBindings: nil,
RestartPolicy: container.RestartPolicy{},
AutoRemove: true,
VolumeDriver: "",
VolumesFrom: nil,
ConsoleSize: [2]uint{},
CapAdd: strslice.StrSlice{"SYS_PTRACE", "SYS_ADMIN"}, // for dlv
CgroupnsMode: "",
ExtraHosts: nil,
GroupAdd: nil,
IpcMode: "",
Cgroup: "",
Links: nil,
OomScoreAdj: 0,
PidMode: "",
Privileged: true,
PublishAllPorts: false,
ReadonlyRootfs: false,
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
StorageOpt: nil,
Tmpfs: nil,
UTSMode: "",
UsernsMode: "",
ShmSize: 0,
Sysctls: nil,
Runtime: "",
Isolation: "",
Resources: container.Resources{},
MaskedPaths: nil,
ReadonlyPaths: nil,
Init: nil,
}
var suffix string
if newUUID, err := uuid.NewUUID(); err == nil {
suffix = strings.ReplaceAll(newUUID.String(), "-", "")[:5]
}
name := fmt.Sprintf("%s_%s_%s", "kubevpn", "local", suffix)
var kubevpnNetwork string
kubevpnNetwork, err = createKubevpnNetwork(context.Background(), cli, name)
if err != nil {
return err
}
c := &RunConfig{
config: runConfig,
hostConfig: hostConfig,
networkingConfig: &network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{name: {
NetworkID: kubevpnNetwork,
}},
},
platform: nil,
containerName: name,
k8sContainerName: name,
}
ctx, cancel := context.WithCancel(context.Background())
handler.AddCleanUpResourceHandler(connect.GetClientset(), connect.Namespace, nil)
handler.RollbackFuncList = append(handler.RollbackFuncList, cancel)
var id string
if id, err = run(ctx, c, cli, dockerCli); err != nil {
return err
}
handler.RollbackFuncList = append(handler.RollbackFuncList, func() {
_ = cli.ContainerKill(context.Background(), id, "KILL")
})
if err = runLogs(dockerCli, id); err != nil {
return err
}
if err = devOptions.NetMode.Set("container:" + id); err != nil {
return err
}
default:
return fmt.Errorf("unsupport connect mode: %s", devOptions.ConnectMode)
}
defer func() {
handler.Cleanup(os.Kill)
select {}
}()
devOptions.Namespace = connect.Namespace
err = devOptions.Main(context.Background())
if err != nil {
log.Errorln(err)
}
return err
}
func runLogs(dockerCli command.Cli, container string) error {
ctx := context.Background()
c, err := dockerCli.Client().ContainerInspect(ctx, container)
if err != nil {
return err
}
options := types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
}
responseBody, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
if err != nil {
return err
}
defer responseBody.Close()
buf := bytes.NewBuffer(nil)
writer := io.MultiWriter(buf, dockerCli.Out())
var errChan = make(chan error)
var stopChan = make(chan struct{})
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
if strings.Contains(buf.String(), "enjoy it") {
close(stopChan)
return
}
}
}()
go func() {
var err error
if c.Config.Tty {
_, err = io.Copy(writer, responseBody)
} else {
_, err = stdcopy.StdCopy(writer, dockerCli.Err(), responseBody)
}
if err != nil {
errChan <- err
}
}()
select {
case err = <-errChan:
return err
case <-stopChan:
return nil
}
}
func runKill(dockerCli command.Cli, containers ...string) error {
var errs []string
ctx := context.Background()
errChan := parallelOperation(ctx, append([]string{}, containers...), func(ctx context.Context, container string) error {
return dockerCli.Client().ContainerKill(ctx, container, "KILL")
})
for _, name := range containers {
if err := <-errChan; err != nil {
errs = append(errs, err.Error())
} else {
fmt.Fprintln(dockerCli.Out(), name)
}
}
if len(errs) > 0 {
return errors.New(strings.Join(errs, "\n"))
}
return nil
}
func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error {
if len(containers) == 0 {
return nil
}
const defaultParallel int = 50
sem := make(chan struct{}, defaultParallel)
errChan := make(chan error)
// make sure result is printed in correct order
output := map[string]chan error{}
for _, c := range containers {
output[c] = make(chan error, 1)
}
go func() {
for _, c := range containers {
err := <-output[c]
errChan <- err
}
}()
go func() {
for _, c := range containers {
sem <- struct{}{} // Wait for active queue sem to drain.
go func(container string) {
output[container] <- op(ctx, container)
<-sem
}(c)
}
}()
return errChan
}
func createKubevpnNetwork(ctx context.Context, cli *client.Client, networkName string) (string, error) {
by := map[string]string{"owner": config.ConfigMapPodTrafficManager}
create, err := cli.NetworkCreate(ctx, networkName, types.NetworkCreate{
Driver: "bridge",
Scope: "local",
IPAM: &network.IPAM{
Driver: "",
Options: nil,
Config: []network.IPAMConfig{
{
Subnet: config.DockerCIDR.String(),
Gateway: config.DockerRouterIP.String(),
},
},
},
//Options: map[string]string{"--icc": "", "--ip-masq": ""},
Labels: by,
})
if err != nil {
if errdefs.IsForbidden(err) {
list, _ := cli.NetworkList(ctx, types.NetworkListOptions{})
for _, resource := range list {
if reflect.DeepEqual(resource.Labels, by) {
return resource.ID, nil
}
}
}
return "", err
}
return create.ID, nil
}

View File

@@ -201,14 +201,6 @@ func parseNetworkOpts(copts Options) (map[string]*network.EndpointSettings, erro
} else { } else {
hasNonUserDefined = true hasNonUserDefined = true
} }
if i == 0 {
// The first network corresponds with what was previously the "only"
// network, and what would be used when using the non-advanced syntax
// `--network-alias`, `--link`, `--ip`, `--ip6`, and `--link-local-ip`
// are set on this network, to preserve backward compatibility with
// the non-advanced notation
n.Aliases = copts.Aliases.GetAll()
}
ep, err := parseNetworkAttachmentOpt(n) ep, err := parseNetworkAttachmentOpt(n)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -246,7 +238,9 @@ func parseNetworkAttachmentOpt(ep opts.NetworkAttachmentOpts) (*network.Endpoint
} }
} }
epConfig := &network.EndpointSettings{} epConfig := &network.EndpointSettings{
NetworkID: ep.Target,
}
epConfig.Aliases = append(epConfig.Aliases, ep.Aliases...) epConfig.Aliases = append(epConfig.Aliases, ep.Aliases...)
if len(ep.DriverOpts) > 0 { if len(ep.DriverOpts) > 0 {
epConfig.DriverOpts = make(map[string]string) epConfig.DriverOpts = make(map[string]string)

View File

@@ -3,6 +3,7 @@ package handler
import ( import (
"context" "context"
"fmt" "fmt"
"net"
"os" "os"
"os/signal" "os/signal"
"strconv" "strconv"
@@ -27,28 +28,29 @@ var stopChan = make(chan os.Signal)
var RollbackFuncList = make([]func(), 2) var RollbackFuncList = make([]func(), 2)
var ctx, cancel = context.WithCancel(context.Background()) var ctx, cancel = context.WithCancel(context.Background())
func (c *ConnectOptions) addCleanUpResourceHandler() { func AddCleanUpResourceHandler(clientset *kubernetes.Clientset, ns string, dhcp *DHCPManager, usedIPs ...*net.IPNet) {
signal.Notify(stopChan, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGKILL) signal.Notify(stopChan, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGKILL)
go func() { go func() {
<-stopChan <-stopChan
log.Info("prepare to exit, cleaning up") log.Info("prepare to exit, cleaning up")
err := c.dhcp.ReleaseIpToDHCP(c.usedIPs...) if dhcp != nil {
if err != nil { err := dhcp.ReleaseIpToDHCP(usedIPs...)
log.Errorf("failed to release ip to dhcp, err: %v", err) if err != nil {
log.Errorf("failed to release ip to dhcp, err: %v", err)
}
} }
for _, function := range RollbackFuncList { for _, function := range RollbackFuncList {
if function != nil { if function != nil {
function() function()
} }
} }
_ = c.clientset.CoreV1().Pods(c.Namespace).Delete(context.Background(), config.CniNetName, v1.DeleteOptions{GracePeriodSeconds: pointer.Int64(0)}) _ = clientset.CoreV1().Pods(ns).Delete(context.Background(), config.CniNetName, v1.DeleteOptions{GracePeriodSeconds: pointer.Int64(0)})
var count int count, err := updateRefCount(clientset.CoreV1().ConfigMaps(ns), config.ConfigMapPodTrafficManager, -1)
count, err = updateRefCount(c.clientset.CoreV1().ConfigMaps(c.Namespace), config.ConfigMapPodTrafficManager, -1)
if err == nil { if err == nil {
// if ref-count is less than zero or equals to zero, means nobody is using this traffic pod, so clean it // if ref-count is less than zero or equals to zero, means nobody is using this traffic pod, so clean it
if count <= 0 { if count <= 0 {
log.Info("ref-count is zero, prepare to clean up resource") log.Info("ref-count is zero, prepare to clean up resource")
cleanup(c.clientset, c.Namespace, config.ConfigMapPodTrafficManager, true) cleanup(clientset, ns, config.ConfigMapPodTrafficManager, true)
} }
} else { } else {
log.Error(err) log.Error(err)

View File

@@ -23,6 +23,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
utilnet "k8s.io/apimachinery/pkg/util/net" utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/runtime"
@@ -121,12 +122,12 @@ func Rollback(f cmdutil.Factory, ns, workload string) {
} }
func (c *ConnectOptions) DoConnect() (err error) { func (c *ConnectOptions) DoConnect() (err error) {
c.addCleanUpResourceHandler()
trafficMangerNet := net.IPNet{IP: config.RouterIP, Mask: config.CIDR.Mask} trafficMangerNet := net.IPNet{IP: config.RouterIP, Mask: config.CIDR.Mask}
c.dhcp = NewDHCPManager(c.clientset.CoreV1().ConfigMaps(c.Namespace), c.Namespace, &trafficMangerNet) c.dhcp = NewDHCPManager(c.clientset.CoreV1().ConfigMaps(c.Namespace), c.Namespace, &trafficMangerNet)
if err = c.dhcp.InitDHCP(ctx); err != nil { if err = c.dhcp.InitDHCP(ctx); err != nil {
return return
} }
AddCleanUpResourceHandler(c.clientset, c.Namespace, c.dhcp, c.usedIPs...)
err = c.GetCIDR(ctx) err = c.GetCIDR(ctx)
if err != nil { if err != nil {
return return
@@ -859,3 +860,45 @@ func (c *ConnectOptions) addExtraRoute(ctx context.Context) (err error) {
} }
return return
} }
func (c *ConnectOptions) GetKubeconfigPath() (string, error) {
rawConfig, err := c.factory.ToRawKubeConfigLoader().RawConfig()
if err != nil {
return "", err
}
err = api.FlattenConfig(&rawConfig)
if err != nil {
return "", err
}
rawConfig.SetGroupVersionKind(schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"})
var convertedObj pkgruntime.Object
convertedObj, err = latest.Scheme.ConvertToVersion(&rawConfig, latest.ExternalVersion)
if err != nil {
return "", err
}
var kubeconfigJsonBytes []byte
kubeconfigJsonBytes, err = json.Marshal(convertedObj)
if err != nil {
return "", err
}
temp, err := os.CreateTemp("", "*.kubeconfig")
if err != nil {
return "", err
}
temp.Close()
err = os.WriteFile(temp.Name(), kubeconfigJsonBytes, 0644)
if err != nil {
return "", err
}
err = os.Chmod(temp.Name(), 0644)
if err != nil {
return "", err
}
return temp.Name(), nil
}
func (c ConnectOptions) GetClientset() *kubernetes.Clientset {
return c.clientset
}

View File

@@ -6,17 +6,20 @@ package util
import ( import (
"flag" "flag"
"fmt" "fmt"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/tools/clientcmd"
"os" "os"
"os/exec" "os/exec"
"os/signal" "os/signal"
"runtime" "runtime"
"syscall" "syscall"
"time" "time"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/tools/clientcmd"
"github.com/wencaiwulue/kubevpn/pkg/config"
) )
const envStartSudoKubeVPNByKubeVPN = "DEPTH_SIGNED_BY_NAISON" const envStartSudoKubeVPNByKubeVPN = config.EnvStartSudoKubeVPNByKubeVPN
func RunWithElevated() { func RunWithElevated() {
// fix if startup with normal user, after elevated home dir will change to root user in linux // fix if startup with normal user, after elevated home dir will change to root user in linux