refactor: refactor command dev (#309)

This commit is contained in:
naison
2024-07-26 21:11:59 +08:00
committed by GitHub
parent 9283c2f8f7
commit 49adeac14c
16 changed files with 1011 additions and 2379 deletions

View File

@@ -400,7 +400,7 @@ Run the Kubernetes pod in the local Docker container, and cooperate with the ser
the specified header to the local, or all the traffic to the local. the specified header to the local, or all the traffic to the local.
```shell ```shell
➜ ~ kubevpn dev deployment/authors --headers a=1 -it --rm --entrypoint sh ➜ ~ kubevpn dev deployment/authors --headers a=1 --entrypoint sh
connectting to cluster connectting to cluster
start to connect start to connect
got cidr from cache got cidr from cache
@@ -500,13 +500,13 @@ docker logs $(docker ps --format '{{.Names}}' | grep nginx_default_kubevpn)
If you just want to start up a docker image, you can use a simple way like this: If you just want to start up a docker image, you can use a simple way like this:
```shell ```shell
kubevpn dev deployment/authors --no-proxy -it --rm kubevpn dev deployment/authors --no-proxy
``` ```
Example Example
```shell ```shell
➜ ~ kubevpn dev deployment/authors --no-proxy -it --rm ➜ ~ kubevpn dev deployment/authors --no-proxy
connectting to cluster connectting to cluster
start to connect start to connect
got cidr from cache got cidr from cache
@@ -567,14 +567,7 @@ e008f553422a: Pull complete
33f0298d1d4f: Pull complete 33f0298d1d4f: Pull complete
Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e
Status: Downloaded newer image for naison/kubevpn:v2.0.0 Status: Downloaded newer image for naison/kubevpn:v2.0.0
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison -it --entrypoint sh root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison --entrypoint sh
----------------------------------------------------------------------------------
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
Because of sudo user env and user env are different.
Current env KUBECONFIG value:
----------------------------------------------------------------------------------
hostname is d0b3dab8912a hostname is d0b3dab8912a
connectting to cluster connectting to cluster
start to connect start to connect

View File

@@ -323,7 +323,7 @@ leave workload deployments/productpage successfully
Docker。 Docker。
```shell ```shell
➜ ~ kubevpn dev deployment/authors --headers a=1 -it --rm --entrypoint sh ➜ ~ kubevpn dev deployment/authors --headers a=1 --entrypoint sh
connectting to cluster connectting to cluster
start to connect start to connect
got cidr from cache got cidr from cache
@@ -406,13 +406,13 @@ fc04e42799a5 nginx:latest "/docker-entrypoint.…" 37 sec
如果你只是想在本地启动镜像,可以用一种简单的方式: 如果你只是想在本地启动镜像,可以用一种简单的方式:
```shell ```shell
kubevpn dev deployment/authors --no-proxy -it --rm kubevpn dev deployment/authors --no-proxy
``` ```
例如: 例如:
```shell ```shell
➜ ~ kubevpn dev deployment/authors --no-proxy -it --rm ➜ ~ kubevpn dev deployment/authors --no-proxy
connectting to cluster connectting to cluster
start to connect start to connect
got cidr from cache got cidr from cache
@@ -471,14 +471,7 @@ e008f553422a: Pull complete
33f0298d1d4f: Pull complete 33f0298d1d4f: Pull complete
Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e
Status: Downloaded newer image for naison/kubevpn:v2.0.0 Status: Downloaded newer image for naison/kubevpn:v2.0.0
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison -it --entrypoint sh root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison --entrypoint sh
----------------------------------------------------------------------------------
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
Because of sudo user env and user env are different.
Current env KUBECONFIG value:
----------------------------------------------------------------------------------
hostname is d0b3dab8912a hostname is d0b3dab8912a
connectting to cluster connectting to cluster
start to connect start to connect

View File

@@ -4,7 +4,7 @@ import (
"fmt" "fmt"
"os" "os"
dockercomp "github.com/docker/cli/cli/command/completion" "github.com/containerd/containerd/platforms"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util" cmdutil "k8s.io/kubectl/pkg/cmd/util"
@@ -20,15 +20,8 @@ import (
) )
func CmdDev(f cmdutil.Factory) *cobra.Command { func CmdDev(f cmdutil.Factory) *cobra.Command {
client, cli, err := util.GetClient()
if err != nil {
log.Fatal(err)
}
var options = &dev.Options{ var options = &dev.Options{
Factory: f,
NoProxy: false, NoProxy: false,
Cli: client,
DockerCli: cli,
ExtraRouteInfo: handler.ExtraRouteInfo{}, ExtraRouteInfo: handler.ExtraRouteInfo{},
} }
var sshConf = &util.SshConfig{} var sshConf = &util.SshConfig{}
@@ -67,21 +60,21 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
kubevpn dev deployment/productpage --ssh-alias <alias> kubevpn dev deployment/productpage --ssh-alias <alias>
# Switch to terminal mode; send stdin to 'bash' and sends stdout/stderror from 'bash' back to the client # Switch to terminal mode; send stdin to 'bash' and sends stdout/stderror from 'bash' back to the client
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev -i -t --entrypoint /bin/bash kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev --entrypoint /bin/bash
or or
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev -it --entrypoint /bin/bash kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev --entrypoint /bin/bash
# Support ssh auth GSSAPI # Support ssh auth GSSAPI
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab -it --entrypoint /bin/bash kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab --entrypoint /bin/bash
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache -it --entrypoint /bin/bash kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache --entrypoint /bin/bash
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD> -it --entrypoint /bin/bash kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD> --entrypoint /bin/bash
`)), `)),
ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f), ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f),
Args: cobra.MatchAll(cobra.OnlyValidArgs), Args: cobra.MatchAll(cobra.OnlyValidArgs),
DisableFlagsInUseLine: true, DisableFlagsInUseLine: true,
PreRunE: func(cmd *cobra.Command, args []string) error { PreRunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 { if len(args) == 0 {
fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn")) _, _ = fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
fullCmdName := cmd.Parent().CommandPath() fullCmdName := cmd.Parent().CommandPath()
usageString := "Required resource not specified." usageString := "Required resource not specified."
if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") { if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") {
@@ -89,7 +82,7 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
} }
return cmdutil.UsageErrorf(cmd, usageString) return cmdutil.UsageErrorf(cmd, usageString)
} }
err = cmd.Flags().Parse(args[1:]) err := cmd.Flags().Parse(args[1:])
if err != nil { if err != nil {
return err return err
} }
@@ -99,6 +92,15 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw) return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
} }
if p := options.RunOptions.Platform; p != "" {
if _, err = platforms.Parse(p); err != nil {
return fmt.Errorf("error parsing specified platform: %v", err)
}
}
if err = validatePullOpt(options.RunOptions.Pull); err != nil {
return err
}
err = daemon.StartupDaemon(cmd.Context()) err = daemon.StartupDaemon(cmd.Context())
if err != nil { if err != nil {
return err return err
@@ -109,7 +111,7 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
options.Workload = args[0] options.Workload = args[0]
for i, arg := range args { for i, arg := range args {
if arg == "--" && i != len(args)-1 { if arg == "--" && i != len(args)-1 {
options.Copts.Args = args[i+1:] options.ContainerOptions.Args = args[i+1:]
break break
} }
} }
@@ -123,7 +125,12 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
} }
} }
}() }()
err = dev.DoDev(cmd.Context(), options, sshConf, cmd.Flags(), f, transferImage)
if err := options.InitClient(f); err != nil {
return err
}
err := options.Main(cmd.Context(), sshConf, cmd.Flags(), transferImage)
return err return err
}, },
} }
@@ -141,26 +148,25 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
// diy docker options // diy docker options
cmd.Flags().StringVar(&options.DevImage, "dev-image", "", "Use to startup docker container, Default is pod image") cmd.Flags().StringVar(&options.DevImage, "dev-image", "", "Use to startup docker container, Default is pod image")
// origin docker options // origin docker options
dev.AddDockerFlags(options, cmd.Flags(), cli) dev.AddDockerFlags(options, cmd.Flags())
_ = cmd.RegisterFlagCompletionFunc(
"env",
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return os.Environ(), cobra.ShellCompDirectiveNoFileComp
},
)
_ = cmd.RegisterFlagCompletionFunc(
"env-file",
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveDefault
},
)
_ = cmd.RegisterFlagCompletionFunc(
"network",
dockercomp.NetworkNames(cli),
)
handler.AddExtraRoute(cmd.Flags(), &options.ExtraRouteInfo) handler.AddExtraRoute(cmd.Flags(), &options.ExtraRouteInfo)
util.AddSshFlags(cmd.Flags(), sshConf) util.AddSshFlags(cmd.Flags(), sshConf)
return cmd return cmd
} }
func validatePullOpt(val string) error {
switch val {
case dev.PullImageAlways, dev.PullImageMissing, dev.PullImageNever, "":
// valid option, but nothing to do yet
return nil
default:
return fmt.Errorf(
"invalid pull option: '%s': must be one of %q, %q or %q",
val,
dev.PullImageAlways,
dev.PullImageMissing,
dev.PullImageNever,
)
}
}

View File

@@ -1,328 +0,0 @@
package dev
import (
"context"
"fmt"
"io"
"os"
"regexp"
"github.com/containerd/containerd/platforms"
"github.com/distribution/reference"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/completion"
"github.com/docker/cli/cli/command/image"
"github.com/docker/cli/cli/streams"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types/container"
imagetypes "github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/jsonmessage"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// Pull constants
const (
PullImageAlways = "always"
PullImageMissing = "missing" // Default (matches previous behavior)
PullImageNever = "never"
)
type createOptions struct {
name string
platform string
untrusted bool
pull string // always, missing, never
quiet bool
}
// NewCreateCommand creates a new cobra.Command for `docker create`
func NewCreateCommand(dockerCli command.Cli) *cobra.Command {
var options createOptions
var copts *containerOptions
cmd := &cobra.Command{
Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]",
Short: "Create a new container",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
copts.Image = args[0]
if len(args) > 1 {
copts.Args = args[1:]
}
return runCreate(cmd.Context(), dockerCli, cmd.Flags(), &options, copts)
},
Annotations: map[string]string{
"aliases": "docker container create, docker create",
},
ValidArgsFunction: completion.ImageNames(dockerCli),
}
flags := cmd.Flags()
flags.SetInterspersed(false)
flags.StringVar(&options.name, "name", "", "Assign a name to the container")
flags.StringVar(&options.pull, "pull", PullImageMissing, `Pull image before creating ("`+PullImageAlways+`", "|`+PullImageMissing+`", "`+PullImageNever+`")`)
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the pull output")
// Add an explicit help that doesn't have a `-h` to prevent the conflict
// with hostname
flags.Bool("help", false, "Print usage")
command.AddPlatformFlag(flags, &options.platform)
command.AddTrustVerificationFlags(flags, &options.untrusted, dockerCli.ContentTrustEnabled())
copts = addFlags(flags)
return cmd
}
func runCreate(ctx context.Context, dockerCli command.Cli, flags *pflag.FlagSet, options *createOptions, copts *containerOptions) error {
if err := validatePullOpt(options.pull); err != nil {
reportError(dockerCli.Err(), "create", err.Error(), true)
return cli.StatusError{StatusCode: 125}
}
proxyConfig := dockerCli.ConfigFile().ParseProxyConfig(dockerCli.Client().DaemonHost(), opts.ConvertKVStringsToMapWithNil(copts.env.GetAll()))
newEnv := []string{}
for k, v := range proxyConfig {
if v == nil {
newEnv = append(newEnv, k)
} else {
newEnv = append(newEnv, k+"="+*v)
}
}
copts.env = *opts.NewListOptsRef(&newEnv, nil)
containerCfg, err := parse(flags, copts, dockerCli.ServerInfo().OSType)
if err != nil {
reportError(dockerCli.Err(), "create", err.Error(), true)
return cli.StatusError{StatusCode: 125}
}
if err = validateAPIVersion(containerCfg, dockerCli.Client().ClientVersion()); err != nil {
reportError(dockerCli.Err(), "create", err.Error(), true)
return cli.StatusError{StatusCode: 125}
}
id, err := createContainer(ctx, dockerCli, containerCfg, options)
if err != nil {
return err
}
_, _ = fmt.Fprintln(dockerCli.Out(), id)
return nil
}
// FIXME(thaJeztah): this is the only code-path that uses APIClient.ImageCreate. Rewrite this to use the regular "pull" code (or vice-versa).
func pullImage(ctx context.Context, dockerCli command.Cli, img string, options *createOptions) error {
encodedAuth, err := command.RetrieveAuthTokenFromImage(dockerCli.ConfigFile(), img)
if err != nil {
return err
}
responseBody, err := dockerCli.Client().ImageCreate(ctx, img, imagetypes.CreateOptions{
RegistryAuth: encodedAuth,
Platform: options.platform,
})
if err != nil {
return err
}
defer responseBody.Close()
out := dockerCli.Err()
if options.quiet {
out = io.Discard
}
return jsonmessage.DisplayJSONMessagesToStream(responseBody, streams.NewOut(out), nil)
}
type cidFile struct {
path string
file *os.File
written bool
}
func (cid *cidFile) Close() error {
if cid.file == nil {
return nil
}
cid.file.Close()
if cid.written {
return nil
}
if err := os.Remove(cid.path); err != nil {
return errors.Wrapf(err, "failed to remove the CID file '%s'", cid.path)
}
return nil
}
func (cid *cidFile) Write(id string) error {
if cid.file == nil {
return nil
}
if _, err := cid.file.Write([]byte(id)); err != nil {
return errors.Wrap(err, "failed to write the container ID to the file")
}
cid.written = true
return nil
}
func newCIDFile(path string) (*cidFile, error) {
if path == "" {
return &cidFile{}, nil
}
if _, err := os.Stat(path); err == nil {
return nil, errors.Errorf("container ID file found, make sure the other container isn't running or delete %s", path)
}
f, err := os.Create(path)
if err != nil {
return nil, errors.Wrap(err, "failed to create the container ID file")
}
return &cidFile{path: path, file: f}, nil
}
//nolint:gocyclo
func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *containerConfig, options *createOptions) (containerID string, err error) {
config := containerCfg.Config
hostConfig := containerCfg.HostConfig
networkingConfig := containerCfg.NetworkingConfig
warnOnOomKillDisable(*hostConfig, dockerCli.Err())
warnOnLocalhostDNS(*hostConfig, dockerCli.Err())
var (
trustedRef reference.Canonical
namedRef reference.Named
)
containerIDFile, err := newCIDFile(hostConfig.ContainerIDFile)
if err != nil {
return "", err
}
defer containerIDFile.Close()
ref, err := reference.ParseAnyReference(config.Image)
if err != nil {
return "", err
}
if named, ok := ref.(reference.Named); ok {
namedRef = reference.TagNameOnly(named)
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && !options.untrusted {
var err error
trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef)
if err != nil {
return "", err
}
config.Image = reference.FamiliarString(trustedRef)
}
}
pullAndTagImage := func() error {
if err := pullImage(ctx, dockerCli, config.Image, options); err != nil {
return err
}
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil {
return image.TagTrusted(ctx, dockerCli, trustedRef, taggedRef)
}
return nil
}
var platform *specs.Platform
// Engine API version 1.41 first introduced the option to specify platform on
// create. It will produce an error if you try to set a platform on older API
// versions, so check the API version here to maintain backwards
// compatibility for CLI users.
if options.platform != "" && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.41") {
p, err := platforms.Parse(options.platform)
if err != nil {
return "", errors.Wrap(err, "error parsing specified platform")
}
platform = &p
}
if options.pull == PullImageAlways {
if err := pullAndTagImage(); err != nil {
return "", err
}
}
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize()
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, options.name)
if err != nil {
// Pull image if it does not exist locally and we have the PullImageMissing option. Default behavior.
if errdefs.IsNotFound(err) && namedRef != nil && options.pull == PullImageMissing {
if !options.quiet {
// we don't want to write to stdout anything apart from container.ID
fmt.Fprintf(dockerCli.Err(), "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef))
}
if err := pullAndTagImage(); err != nil {
return "", err
}
var retryErr error
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, options.name)
if retryErr != nil {
return "", retryErr
}
} else {
return "", err
}
}
for _, w := range response.Warnings {
_, _ = fmt.Fprintf(dockerCli.Err(), "WARNING: %s\n", w)
}
err = containerIDFile.Write(response.ID)
return response.ID, err
}
func warnOnOomKillDisable(hostConfig container.HostConfig, stderr io.Writer) {
if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 {
fmt.Fprintln(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.")
}
}
// check the DNS settings passed via --dns against localhost regexp to warn if
// they are trying to set a DNS to a localhost address
func warnOnLocalhostDNS(hostConfig container.HostConfig, stderr io.Writer) {
for _, dnsIP := range hostConfig.DNS {
if isLocalhost(dnsIP) {
fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
return
}
}
}
// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range.
const ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)`
var localhostIPRegexp = regexp.MustCompile(ipLocalhost)
// IsLocalhost returns true if ip matches the localhost IP regular expression.
// Used for determining if nameserver settings are being passed which are
// localhost addresses
func isLocalhost(ip string) bool {
return localhostIPRegexp.MatchString(ip)
}
func validatePullOpt(val string) error {
switch val {
case PullImageAlways, PullImageMissing, PullImageNever, "":
// valid option, but nothing to do yet
return nil
default:
return fmt.Errorf(
"invalid pull option: '%s': must be one of %q, %q or %q",
val,
PullImageAlways,
PullImageMissing,
PullImageNever,
)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,315 +0,0 @@
package dev
import (
"context"
"fmt"
"io"
"os"
"strings"
"syscall"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/completion"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types/container"
"github.com/moby/sys/signal"
"github.com/moby/term"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
type runOptions struct {
createOptions
detach bool
sigProxy bool
detachKeys string
}
// NewRunCommand create a new `docker run` command
func NewRunCommand(dockerCli command.Cli) *cobra.Command {
var options runOptions
var copts *containerOptions
cmd := &cobra.Command{
Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]",
Short: "Create and run a new container from an image",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
copts.Image = args[0]
if len(args) > 1 {
copts.Args = args[1:]
}
return runRun(cmd.Context(), dockerCli, cmd.Flags(), &options, copts)
},
ValidArgsFunction: completion.ImageNames(dockerCli),
Annotations: map[string]string{
"category-top": "1",
"aliases": "docker container run, docker run",
},
}
flags := cmd.Flags()
flags.SetInterspersed(false)
// These are flags not stored in Config/HostConfig
flags.BoolVarP(&options.detach, "detach", "d", false, "Run container in background and print container ID")
flags.BoolVar(&options.sigProxy, "sig-proxy", true, "Proxy received signals to the process")
flags.StringVar(&options.name, "name", "", "Assign a name to the container")
flags.StringVar(&options.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container")
flags.StringVar(&options.pull, "pull", PullImageMissing, `Pull image before running ("`+PullImageAlways+`", "`+PullImageMissing+`", "`+PullImageNever+`")`)
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the pull output")
// Add an explicit help that doesn't have a `-h` to prevent the conflict
// with hostname
flags.Bool("help", false, "Print usage")
command.AddPlatformFlag(flags, &options.platform)
command.AddTrustVerificationFlags(flags, &options.untrusted, dockerCli.ContentTrustEnabled())
copts = addFlags(flags)
cmd.RegisterFlagCompletionFunc(
"env",
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return os.Environ(), cobra.ShellCompDirectiveNoFileComp
},
)
cmd.RegisterFlagCompletionFunc(
"env-file",
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveDefault
},
)
cmd.RegisterFlagCompletionFunc(
"network",
completion.NetworkNames(dockerCli),
)
return cmd
}
func runRun(ctx context.Context, dockerCli command.Cli, flags *pflag.FlagSet, ropts *runOptions, copts *containerOptions) error {
if err := validatePullOpt(ropts.pull); err != nil {
reportError(dockerCli.Err(), "run", err.Error(), true)
return cli.StatusError{StatusCode: 125}
}
proxyConfig := dockerCli.ConfigFile().ParseProxyConfig(dockerCli.Client().DaemonHost(), opts.ConvertKVStringsToMapWithNil(copts.env.GetAll()))
newEnv := []string{}
for k, v := range proxyConfig {
if v == nil {
newEnv = append(newEnv, k)
} else {
newEnv = append(newEnv, k+"="+*v)
}
}
copts.env = *opts.NewListOptsRef(&newEnv, nil)
containerCfg, err := parse(flags, copts, dockerCli.ServerInfo().OSType)
// just in case the parse does not exit
if err != nil {
reportError(dockerCli.Err(), "run", err.Error(), true)
return cli.StatusError{StatusCode: 125}
}
if err = validateAPIVersion(containerCfg, dockerCli.CurrentVersion()); err != nil {
reportError(dockerCli.Err(), "run", err.Error(), true)
return cli.StatusError{StatusCode: 125}
}
return runContainer(ctx, dockerCli, ropts, copts, containerCfg)
}
//nolint:gocyclo
func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOptions, copts *containerOptions, containerCfg *containerConfig) error {
config := containerCfg.Config
stdout, stderr := dockerCli.Out(), dockerCli.Err()
apiClient := dockerCli.Client()
config.ArgsEscaped = false
if !runOpts.detach {
if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil {
return err
}
} else {
if copts.attach.Len() != 0 {
return errors.New("Conflicting options: -a and -d")
}
config.AttachStdin = false
config.AttachStdout = false
config.AttachStderr = false
config.StdinOnce = false
}
ctx, cancelFun := context.WithCancel(ctx)
defer cancelFun()
containerID, err := createContainer(ctx, dockerCli, containerCfg, &runOpts.createOptions)
if err != nil {
reportError(stderr, "run", err.Error(), true)
return runStartContainerErr(err)
}
if runOpts.sigProxy {
sigc := notifyAllSignals()
go ForwardAllSignals(ctx, apiClient, containerID, sigc)
defer signal.StopCatch(sigc)
}
var (
waitDisplayID chan struct{}
errCh chan error
)
if !config.AttachStdout && !config.AttachStderr {
// Make this asynchronous to allow the client to write to stdin before having to read the ID
waitDisplayID = make(chan struct{})
go func() {
defer close(waitDisplayID)
_, _ = fmt.Fprintln(stdout, containerID)
}()
}
attach := config.AttachStdin || config.AttachStdout || config.AttachStderr
if attach {
detachKeys := dockerCli.ConfigFile().DetachKeys
if runOpts.detachKeys != "" {
detachKeys = runOpts.detachKeys
}
closeFn, err := attachContainer(ctx, dockerCli, containerID, &errCh, config, container.AttachOptions{
Stream: true,
Stdin: config.AttachStdin,
Stdout: config.AttachStdout,
Stderr: config.AttachStderr,
DetachKeys: detachKeys,
})
if err != nil {
return err
}
defer closeFn()
}
statusChan := waitExitOrRemoved(ctx, apiClient, containerID, copts.autoRemove)
// start the container
if err := apiClient.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil {
// If we have hijackedIOStreamer, we should notify
// hijackedIOStreamer we are going to exit and wait
// to avoid the terminal are not restored.
if attach {
cancelFun()
<-errCh
}
reportError(stderr, "run", err.Error(), false)
if copts.autoRemove {
// wait container to be removed
<-statusChan
}
return runStartContainerErr(err)
}
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() {
if err := MonitorTtySize(ctx, dockerCli, containerID, false); err != nil {
_, _ = fmt.Fprintln(stderr, "Error monitoring TTY size:", err)
}
}
if errCh != nil {
if err := <-errCh; err != nil {
if _, ok := err.(term.EscapeError); ok {
// The user entered the detach escape sequence.
return nil
}
logrus.Debugf("Error hijack: %s", err)
return err
}
}
// Detached mode: wait for the id to be displayed and return.
if !config.AttachStdout && !config.AttachStderr {
// Detached mode
<-waitDisplayID
return nil
}
status := <-statusChan
if status != 0 {
return cli.StatusError{StatusCode: status}
}
return nil
}
func attachContainer(ctx context.Context, dockerCli command.Cli, containerID string, errCh *chan error, config *container.Config, options container.AttachOptions) (func(), error) {
resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options)
if errAttach != nil {
return nil, errAttach
}
var (
out, cerr io.Writer
in io.ReadCloser
)
if options.Stdin {
in = dockerCli.In()
}
if options.Stdout {
out = dockerCli.Out()
}
if options.Stderr {
if config.Tty {
cerr = dockerCli.Out()
} else {
cerr = dockerCli.Err()
}
}
ch := make(chan error, 1)
*errCh = ch
go func() {
ch <- func() error {
streamer := hijackedIOStreamer{
streams: dockerCli,
inputStream: in,
outputStream: out,
errorStream: cerr,
resp: resp,
tty: config.Tty,
detachKeys: options.DetachKeys,
}
if errHijack := streamer.stream(ctx); errHijack != nil {
return errHijack
}
return errAttach
}()
}()
return resp.Close, nil
}
// reportError is a utility method that prints a user-friendly message
// containing the error that occurred during parsing and a suggestion to get help
func reportError(stderr io.Writer, name string, str string, withHelp bool) {
str = strings.TrimSuffix(str, ".") + "."
if withHelp {
str += "\nSee 'docker " + name + " --help'."
}
_, _ = fmt.Fprintln(stderr, "docker:", str)
}
// if container start fails with 'not found'/'no such' error, return 127
// if container start fails with 'permission denied' error, return 126
// return 125 for generic docker daemon failures
func runStartContainerErr(err error) error {
trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ")
statusError := cli.StatusError{StatusCode: 125}
if strings.Contains(trimmedErr, "executable file not found") ||
strings.Contains(trimmedErr, "no such file or directory") ||
strings.Contains(trimmedErr, "system cannot find the file specified") {
statusError = cli.StatusError{StatusCode: 127}
} else if strings.Contains(trimmedErr, syscall.EACCES.Error()) ||
strings.Contains(trimmedErr, syscall.EISDIR.Error()) {
statusError = cli.StatusError{StatusCode: 126}
}
return statusError
}

View File

@@ -1,16 +1,41 @@
package dev package dev
import ( import (
"bytes"
"context" "context"
"errors"
"fmt"
"io"
"math/rand"
"reflect"
"strconv" "strconv"
"strings"
"syscall"
"time"
"github.com/distribution/reference"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
image2 "github.com/docker/cli/cli/command/image"
"github.com/docker/cli/cli/streams"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/versions" "github.com/docker/docker/api/types/versions"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/stdcopy"
"github.com/moby/sys/signal"
"github.com/moby/term"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
) )
func waitExitOrRemoved(ctx context.Context, apiClient client.APIClient, containerID string, waitRemove bool) <-chan int { func waitExitOrRemoved(ctx context.Context, apiClient client.APIClient, containerID string, waitRemove bool) <-chan int {
@@ -127,34 +152,478 @@ func legacyWaitExitOrRemoved(ctx context.Context, apiClient client.APIClient, co
return statusChan return statusChan
} }
func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, containerID string) error) chan error { func runLogsWaitRunning(ctx context.Context, dockerCli command.Cli, id string) error {
if len(containers) == 0 { c, err := dockerCli.Client().ContainerInspect(ctx, id)
return nil if err != nil {
return err
} }
const defaultParallel int = 50
sem := make(chan struct{}, defaultParallel)
errChan := make(chan error)
// make sure result is printed in correct order options := container.LogsOptions{
output := map[string]chan error{} ShowStdout: true,
for _, c := range containers { ShowStderr: true,
output[c] = make(chan error, 1) Follow: true,
} }
logStream, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
if err != nil {
return err
}
defer logStream.Close()
buf := bytes.NewBuffer(nil)
w := io.MultiWriter(buf, dockerCli.Out())
cancel, cancelFunc := context.WithCancel(ctx)
defer cancelFunc()
go func() { go func() {
for _, c := range containers { t := time.NewTicker(time.Second)
err := <-output[c] defer t.Stop()
for range t.C {
// keyword, maybe can find another way more elegant
if strings.Contains(buf.String(), "Now you can access resources in the kubernetes cluster, enjoy it :)") {
cancelFunc()
return
}
}
}()
var errChan = make(chan error)
go func() {
var err error
if c.Config.Tty {
_, err = io.Copy(w, logStream)
} else {
_, err = stdcopy.StdCopy(w, dockerCli.Err(), logStream)
}
if err != nil {
errChan <- err errChan <- err
} }
}() }()
select {
case err = <-errChan:
return err
case <-cancel.Done():
return nil
}
}
func runLogsSinceNow(dockerCli command.Cli, id string, follow bool) error {
ctx := context.Background()
c, err := dockerCli.Client().ContainerInspect(ctx, id)
if err != nil {
return err
}
options := container.LogsOptions{
ShowStdout: true,
ShowStderr: true,
Since: "0m",
Follow: follow,
}
responseBody, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
if err != nil {
return err
}
defer responseBody.Close()
if c.Config.Tty {
_, err = io.Copy(dockerCli.Out(), responseBody)
} else {
_, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody)
}
return err
}
func createNetwork(ctx context.Context, cli *client.Client) (string, error) {
by := map[string]string{"owner": config.ConfigMapPodTrafficManager}
list, _ := cli.NetworkList(ctx, types.NetworkListOptions{})
for _, resource := range list {
if reflect.DeepEqual(resource.Labels, by) {
return resource.ID, nil
}
}
create, err := cli.NetworkCreate(ctx, config.ConfigMapPodTrafficManager, types.NetworkCreate{
Driver: "bridge",
Scope: "local",
IPAM: &network.IPAM{
Driver: "",
Options: nil,
Config: []network.IPAMConfig{
{
Subnet: config.DockerCIDR.String(),
Gateway: config.DockerRouterIP.String(),
},
},
},
//Options: map[string]string{"--icc": "", "--ip-masq": ""},
Labels: by,
})
if err != nil {
if errdefs.IsForbidden(err) {
list, _ = cli.NetworkList(ctx, types.NetworkListOptions{})
for _, resource := range list {
if reflect.DeepEqual(resource.Labels, by) {
return resource.ID, nil
}
}
}
return "", err
}
return create.ID, nil
}
// Pull constants
const (
PullImageAlways = "always"
PullImageMissing = "missing" // Default (matches previous behavior)
PullImageNever = "never"
)
func pullImage(ctx context.Context, dockerCli command.Cli, img string, options RunOptions) error {
encodedAuth, err := command.RetrieveAuthTokenFromImage(dockerCli.ConfigFile(), img)
if err != nil {
return err
}
responseBody, err := dockerCli.Client().ImageCreate(ctx, img, image.CreateOptions{
RegistryAuth: encodedAuth,
Platform: options.Platform,
})
if err != nil {
return err
}
defer responseBody.Close()
out := dockerCli.Err()
return jsonmessage.DisplayJSONMessagesToStream(responseBody, streams.NewOut(out), nil)
}
//nolint:gocyclo
func createContainer(ctx context.Context, dockerCli command.Cli, runConfig *RunConfig) (string, error) {
config := runConfig.config
hostConfig := runConfig.hostConfig
networkingConfig := runConfig.networkingConfig
var (
trustedRef reference.Canonical
namedRef reference.Named
)
ref, err := reference.ParseAnyReference(config.Image)
if err != nil {
return "", err
}
if named, ok := ref.(reference.Named); ok {
namedRef = reference.TagNameOnly(named)
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && dockerCli.ContentTrustEnabled() {
var err error
trustedRef, err = image2.TrustedReference(ctx, dockerCli, taggedRef)
if err != nil {
return "", err
}
config.Image = reference.FamiliarString(trustedRef)
}
}
pullAndTagImage := func() error {
if err = pullImage(ctx, dockerCli, config.Image, runConfig.Options); err != nil {
return err
}
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil {
return image2.TagTrusted(ctx, dockerCli, trustedRef, taggedRef)
}
return nil
}
if runConfig.Options.Pull == PullImageAlways {
if err = pullAndTagImage(); err != nil {
return "", err
}
}
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize()
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, runConfig.platform, runConfig.name)
if err != nil {
// Pull image if it does not exist locally and we have the PullImageMissing option. Default behavior.
if errdefs.IsNotFound(err) && namedRef != nil && runConfig.Options.Pull == PullImageMissing {
// we don't want to write to stdout anything apart from container.ID
_, _ = fmt.Fprintf(dockerCli.Err(), "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef))
if err = pullAndTagImage(); err != nil {
return "", err
}
var retryErr error
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, runConfig.platform, runConfig.name)
if retryErr != nil {
return "", retryErr
}
} else {
return "", err
}
}
for _, w := range response.Warnings {
_, _ = fmt.Fprintf(dockerCli.Err(), "WARNING: %s\n", w)
}
return response.ID, err
}
func runContainer(ctx context.Context, dockerCli command.Cli, runConfig *RunConfig) error {
config := runConfig.config
stdout, stderr := dockerCli.Out(), dockerCli.Err()
apiClient := dockerCli.Client()
config.ArgsEscaped = false
if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil {
return err
}
ctx, cancelFun := context.WithCancel(ctx)
defer cancelFun()
containerID, err := createContainer(ctx, dockerCli, runConfig)
if err != nil {
reportError(stderr, err.Error())
return runStartContainerErr(err)
}
if runConfig.Options.SigProxy {
sigc := notifyAllSignals()
go ForwardAllSignals(ctx, apiClient, containerID, sigc)
defer signal.StopCatch(sigc)
}
var (
waitDisplayID chan struct{}
errCh chan error
)
if !config.AttachStdout && !config.AttachStderr {
// Make this asynchronous to allow the client to write to stdin before having to read the ID
waitDisplayID = make(chan struct{})
go func() { go func() {
for _, c := range containers { defer close(waitDisplayID)
sem <- struct{}{} // Wait for active queue sem to drain. _, _ = fmt.Fprintln(stdout, containerID)
go func(container string) {
output[container] <- op(ctx, container)
<-sem
}(c)
}
}() }()
return errChan }
attach := config.AttachStdin || config.AttachStdout || config.AttachStderr
if attach {
closeFn, err := attachContainer(ctx, dockerCli, containerID, &errCh, config, container.AttachOptions{
Stream: true,
Stdin: config.AttachStdin,
Stdout: config.AttachStdout,
Stderr: config.AttachStderr,
DetachKeys: dockerCli.ConfigFile().DetachKeys,
})
if err != nil {
return err
}
defer closeFn()
}
statusChan := waitExitOrRemoved(ctx, apiClient, containerID, runConfig.hostConfig.AutoRemove)
// start the container
if err := apiClient.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil {
// If we have hijackedIOStreamer, we should notify
// hijackedIOStreamer we are going to exit and wait
// to avoid the terminal are not restored.
if attach {
cancelFun()
<-errCh
}
reportError(stderr, err.Error())
if runConfig.hostConfig.AutoRemove {
// wait container to be removed
<-statusChan
}
return runStartContainerErr(err)
}
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() {
if err := MonitorTtySize(ctx, dockerCli, containerID, false); err != nil {
_, _ = fmt.Fprintln(stderr, "Error monitoring TTY size:", err)
}
}
if errCh != nil {
if err := <-errCh; err != nil {
if _, ok := err.(term.EscapeError); ok {
// The user entered the detach escape sequence.
return nil
}
logrus.Debugf("Error hijack: %s", err)
return err
}
}
// Detached mode: wait for the id to be displayed and return.
if !config.AttachStdout && !config.AttachStderr {
// Detached mode
<-waitDisplayID
return nil
}
status := <-statusChan
if status != 0 {
return cli.StatusError{StatusCode: status}
}
return nil
}
func attachContainer(ctx context.Context, dockerCli command.Cli, containerID string, errCh *chan error, config *container.Config, options container.AttachOptions) (func(), error) {
resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options)
if errAttach != nil {
return nil, errAttach
}
var (
out, cerr io.Writer
in io.ReadCloser
)
if options.Stdin {
in = dockerCli.In()
}
if options.Stdout {
out = dockerCli.Out()
}
if options.Stderr {
if config.Tty {
cerr = dockerCli.Out()
} else {
cerr = dockerCli.Err()
}
}
ch := make(chan error, 1)
*errCh = ch
go func() {
ch <- func() error {
streamer := hijackedIOStreamer{
streams: dockerCli,
inputStream: in,
outputStream: out,
errorStream: cerr,
resp: resp,
tty: config.Tty,
detachKeys: options.DetachKeys,
}
if errHijack := streamer.stream(ctx); errHijack != nil {
return errHijack
}
return errAttach
}()
}()
return resp.Close, nil
}
// reportError is a utility method that prints a user-friendly message
// containing the error that occurred during parsing and a suggestion to get help
func reportError(stderr io.Writer, str string) {
str = strings.TrimSuffix(str, ".") + "."
_, _ = fmt.Fprintln(stderr, "docker:", str)
}
// if container start fails with 'not found'/'no such' error, return 127
// if container start fails with 'permission denied' error, return 126
// return 125 for generic docker daemon failures
func runStartContainerErr(err error) error {
trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ")
statusError := cli.StatusError{StatusCode: 125, Status: trimmedErr}
if strings.Contains(trimmedErr, "executable file not found") ||
strings.Contains(trimmedErr, "no such file or directory") ||
strings.Contains(trimmedErr, "system cannot find the file specified") {
statusError = cli.StatusError{StatusCode: 127, Status: trimmedErr}
} else if strings.Contains(trimmedErr, syscall.EACCES.Error()) ||
strings.Contains(trimmedErr, syscall.EISDIR.Error()) {
statusError = cli.StatusError{StatusCode: 126, Status: trimmedErr}
}
return statusError
}
func run(ctx context.Context, cli *client.Client, dockerCli *command.DockerCli, runConfig *RunConfig) (id string, err error) {
rand.New(rand.NewSource(time.Now().UnixNano()))
var config = runConfig.config
var hostConfig = runConfig.hostConfig
var platform = runConfig.platform
var networkConfig = runConfig.networkingConfig
var name = runConfig.name
var needPull bool
var img types.ImageInspect
img, _, err = cli.ImageInspectWithRaw(ctx, config.Image)
if errdefs.IsNotFound(err) {
logrus.Infof("needs to pull image %s", config.Image)
needPull = true
err = nil
} else if err != nil {
logrus.Errorf("image inspect failed: %v", err)
return
}
if platform != nil && platform.Architecture != "" && platform.OS != "" {
if img.Os != platform.OS || img.Architecture != platform.Architecture {
needPull = true
}
}
if needPull {
err = util.PullImage(ctx, runConfig.platform, cli, dockerCli, config.Image, nil)
if err != nil {
logrus.Errorf("Failed to pull image: %s, err: %s", config.Image, err)
return
}
}
var create container.CreateResponse
create, err = cli.ContainerCreate(ctx, config, hostConfig, networkConfig, platform, name)
if err != nil {
logrus.Errorf("Failed to create container: %s, err: %s", name, err)
return
}
id = create.ID
logrus.Infof("Created container: %s", name)
err = cli.ContainerStart(ctx, create.ID, container.StartOptions{})
if err != nil {
logrus.Errorf("failed to startup container %s: %v", name, err)
return
}
logrus.Infof("Wait container %s to be running...", name)
var inspect types.ContainerJSON
ctx2, cancelFunc := context.WithCancel(ctx)
wait.UntilWithContext(ctx2, func(ctx context.Context) {
inspect, err = cli.ContainerInspect(ctx, create.ID)
if errdefs.IsNotFound(err) {
cancelFunc()
return
} else if err != nil {
cancelFunc()
return
}
if inspect.State != nil && (inspect.State.Status == "exited" || inspect.State.Status == "dead" || inspect.State.Dead) {
cancelFunc()
err = errors.New(fmt.Sprintf("container status: %s", inspect.State.Status))
return
}
if inspect.State != nil && inspect.State.Running {
cancelFunc()
return
}
}, time.Second)
if err != nil {
logrus.Errorf("failed to wait container to be ready: %v", err)
_ = runLogsSinceNow(dockerCli, id, false)
return
}
logrus.Infof("Container %s is running now", name)
return
} }

View File

@@ -1,96 +0,0 @@
package dev
import (
"context"
"fmt"
"os"
"github.com/containerd/containerd/platforms"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
util2 "k8s.io/kubectl/pkg/cmd/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func DoDev(ctx context.Context, option *Options, conf *util.SshConfig, flags *pflag.FlagSet, f util2.Factory, transferImage bool) error {
if p := option.Options.platform; p != "" {
_, err := platforms.Parse(p)
if err != nil {
return fmt.Errorf("error parsing specified platform: %v", err)
}
}
client, cli, err := util.GetClient()
if err != nil {
return err
}
mode := container.NetworkMode(option.Copts.netMode.NetworkMode())
if mode.IsContainer() {
logrus.Infof("network mode container is %s", mode.ConnectedContainer())
var inspect types.ContainerJSON
inspect, err = client.ContainerInspect(ctx, mode.ConnectedContainer())
if err != nil {
logrus.Errorf("can not inspect container %s, err: %v", mode.ConnectedContainer(), err)
return err
}
if inspect.State == nil {
return fmt.Errorf("can not get container status, please make container name is valid")
}
if !inspect.State.Running {
return fmt.Errorf("container %s status is %s, expect is running, please make sure your outer docker name is correct", mode.ConnectedContainer(), inspect.State.Status)
}
logrus.Infof("container %s is running", mode.ConnectedContainer())
} else if mode.IsDefault() && util.RunningInContainer() {
var hostname string
if hostname, err = os.Hostname(); err != nil {
return err
}
logrus.Infof("hostname is %s", hostname)
err = option.Copts.netMode.Set(fmt.Sprintf("container:%s", hostname))
if err != nil {
return err
}
}
err = validatePullOpt(option.Options.pull)
if err != nil {
return err
}
proxyConfig := cli.ConfigFile().ParseProxyConfig(cli.Client().DaemonHost(), opts.ConvertKVStringsToMapWithNil(option.Copts.env.GetAll()))
var newEnv []string
for k, v := range proxyConfig {
if v == nil {
newEnv = append(newEnv, k)
} else {
newEnv = append(newEnv, fmt.Sprintf("%s=%s", k, *v))
}
}
option.Copts.env = *opts.NewListOptsRef(&newEnv, nil)
var c *containerConfig
c, err = parse(flags, option.Copts, cli.ServerInfo().OSType)
// just in case the parse does not exit
if err != nil {
return err
}
err = validateAPIVersion(c, cli.Client().ClientVersion())
if err != nil {
return err
}
// connect to cluster, in container or host
cancel, err := option.connect(ctx, f, conf, transferImage, c)
defer func() {
if cancel != nil {
cancel()
}
}()
if err != nil {
logrus.Errorf("connect to cluster failed, err: %v", err)
return err
}
return option.Main(ctx, c)
}

View File

@@ -1,109 +0,0 @@
package dev
import (
"fmt"
"net"
"github.com/containerd/containerd/platforms"
"github.com/docker/docker/api/types/network"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
// 这里的逻辑是找到指定的容器。然后以传入的参数 tempContainerConfig 为准。即也就是用户命令行指定的参数为准。
// 然后附加上 deployment 中原本的声明
func mergeDockerOptions(r ConfigList, copts *Options, tempContainerConfig *containerConfig) {
if copts.ContainerName != "" {
var index = -1
for i, config := range r {
if config.k8sContainerName == copts.ContainerName {
index = i
break
}
}
if index != -1 {
r[0], r[index] = r[index], r[0]
}
}
config := r[0]
config.Options = copts.Options
config.Copts = copts.Copts
if copts.DevImage != "" {
config.config.Image = copts.DevImage
}
if copts.Options.name != "" {
config.containerName = copts.Options.name
} else {
config.Options.name = config.containerName
}
if copts.Options.platform != "" {
p, _ := platforms.Parse(copts.Options.platform)
config.platform = &p
}
tempContainerConfig.HostConfig.CapAdd = append(tempContainerConfig.HostConfig.CapAdd, config.hostConfig.CapAdd...)
tempContainerConfig.HostConfig.SecurityOpt = append(tempContainerConfig.HostConfig.SecurityOpt, config.hostConfig.SecurityOpt...)
tempContainerConfig.HostConfig.VolumesFrom = append(tempContainerConfig.HostConfig.VolumesFrom, config.hostConfig.VolumesFrom...)
tempContainerConfig.HostConfig.DNS = append(tempContainerConfig.HostConfig.DNS, config.hostConfig.DNS...)
tempContainerConfig.HostConfig.DNSOptions = append(tempContainerConfig.HostConfig.DNSOptions, config.hostConfig.DNSOptions...)
tempContainerConfig.HostConfig.DNSSearch = append(tempContainerConfig.HostConfig.DNSSearch, config.hostConfig.DNSSearch...)
tempContainerConfig.HostConfig.Mounts = append(tempContainerConfig.HostConfig.Mounts, config.hostConfig.Mounts...)
for port, bindings := range config.hostConfig.PortBindings {
if v, ok := tempContainerConfig.HostConfig.PortBindings[port]; ok {
tempContainerConfig.HostConfig.PortBindings[port] = append(v, bindings...)
} else {
tempContainerConfig.HostConfig.PortBindings[port] = bindings
}
}
config.hostConfig = tempContainerConfig.HostConfig
config.networkingConfig.EndpointsConfig = util.Merge[string, *network.EndpointSettings](tempContainerConfig.NetworkingConfig.EndpointsConfig, config.networkingConfig.EndpointsConfig)
c := tempContainerConfig.Config
var entrypoint = config.config.Entrypoint
var args = config.config.Cmd
// if special --entrypoint, then use it
if len(c.Entrypoint) != 0 {
entrypoint = c.Entrypoint
args = c.Cmd
}
if len(c.Cmd) != 0 {
args = c.Cmd
}
c.Entrypoint = entrypoint
c.Cmd = args
c.Env = append(config.config.Env, c.Env...)
c.Image = config.config.Image
if c.User == "" {
c.User = config.config.User
}
c.Labels = util.Merge[string, string](config.config.Labels, c.Labels)
c.Volumes = util.Merge[string, struct{}](c.Volumes, config.config.Volumes)
if c.WorkingDir == "" {
c.WorkingDir = config.config.WorkingDir
}
for k, v := range config.config.ExposedPorts {
if _, found := c.ExposedPorts[k]; !found {
c.ExposedPorts[k] = v
}
}
var hosts []string
for _, domain := range copts.ExtraRouteInfo.ExtraDomain {
ips, err := net.LookupIP(domain)
if err != nil {
continue
}
for _, ip := range ips {
if ip.To4() != nil {
hosts = append(hosts, fmt.Sprintf("%s:%s", domain, ip.To4().String()))
break
}
}
}
config.hostConfig.ExtraHosts = hosts
config.config = c
}

View File

@@ -1,14 +1,11 @@
package dev package dev
import ( import (
"bytes"
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math/rand"
"os" "os"
"reflect"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@@ -16,26 +13,23 @@ import (
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/container"
typescontainer "github.com/docker/docker/api/types/container" typescontainer "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice" "github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/go-connections/nat" "github.com/docker/go-connections/nat"
"github.com/google/uuid" "github.com/google/uuid"
specs "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/image-spec/specs-go/v1"
pkgerr "github.com/pkg/errors"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/pflag" "github.com/spf13/pflag"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
cmdutil "k8s.io/kubectl/pkg/cmd/util" cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/polymorphichelpers" "k8s.io/kubectl/pkg/polymorphichelpers"
"k8s.io/kubectl/pkg/util/interrupt"
"k8s.io/kubectl/pkg/util/podutils" "k8s.io/kubectl/pkg/util/podutils"
"k8s.io/utils/ptr" "k8s.io/utils/ptr"
@@ -58,7 +52,6 @@ type Options struct {
Headers map[string]string Headers map[string]string
Namespace string Namespace string
Workload string Workload string
Factory cmdutil.Factory
ContainerName string ContainerName string
NoProxy bool NoProxy bool
ExtraRouteInfo handler.ExtraRouteInfo ExtraRouteInfo handler.ExtraRouteInfo
@@ -67,34 +60,159 @@ type Options struct {
// docker options // docker options
DevImage string DevImage string
Options runOptions
Copts *containerOptions RunOptions RunOptions
ContainerOptions *ContainerOptions
// inner // inner
Cli *client.Client cli *client.Client
DockerCli *command.DockerCli dockerCli *command.DockerCli
factory cmdutil.Factory
clientset *kubernetes.Clientset
restclient *rest.RESTClient
config *rest.Config
// rollback // rollback
rollbackFuncList []func() error rollbackFuncList []func() error
} }
func (option *Options) Main(ctx context.Context, c *containerConfig) error { func (option *Options) Main(ctx context.Context, sshConfig *util.SshConfig, flags *pflag.FlagSet, transferImage bool) error {
rand.NewSource(time.Now().UnixNano()) mode := typescontainer.NetworkMode(option.ContainerOptions.netMode.NetworkMode())
object, err := util.GetUnstructuredObject(option.Factory, option.Namespace, option.Workload) if mode.IsContainer() {
log.Infof("network mode container is %s", mode.ConnectedContainer())
inspect, err := option.cli.ContainerInspect(ctx, mode.ConnectedContainer())
if err != nil { if err != nil {
log.Errorf("get unstructured object error: %v", err) log.Errorf("can not inspect container %s, err: %v", mode.ConnectedContainer(), err)
return err return err
} }
if inspect.State == nil {
return fmt.Errorf("can not get container status, please make container name is valid")
}
if !inspect.State.Running {
return fmt.Errorf("container %s status is %s, expect is running, please make sure your outer docker name is correct", mode.ConnectedContainer(), inspect.State.Status)
}
log.Infof("container %s is running", mode.ConnectedContainer())
} else if mode.IsDefault() && util.RunningInContainer() {
hostname, err := os.Hostname()
if err != nil {
return err
}
log.Infof("hostname is %s", hostname)
err = option.ContainerOptions.netMode.Set(fmt.Sprintf("container:%s", hostname))
if err != nil {
return err
}
}
u := object.Object.(*unstructured.Unstructured) config, hostConfig, err := Parse(flags, option.ContainerOptions)
var templateSpec *v1.PodTemplateSpec // just in case the Parse does not exit
//var path []string
templateSpec, _, err = util.GetPodTemplateSpecPath(u)
if err != nil { if err != nil {
return err return err
} }
clientSet, err := option.Factory.KubernetesClientSet() // Connect to cluster, in container or host
err = option.Connect(ctx, sshConfig, transferImage, hostConfig.PortBindings)
if err != nil {
log.Errorf("Connect to cluster failed, err: %v", err)
return err
}
return option.Dev(ctx, config, hostConfig)
}
// Connect to cluster network on docker container or host
func (option *Options) Connect(ctx context.Context, sshConfig *util.SshConfig, transferImage bool, portBindings nat.PortMap) error {
switch option.ConnectMode {
case ConnectModeHost:
daemonCli := daemon.GetClient(false)
if daemonCli == nil {
return fmt.Errorf("get nil daemon client")
}
kubeConfigBytes, ns, err := util.ConvertToKubeConfigBytes(option.factory)
if err != nil {
return err
}
logLevel := log.ErrorLevel
if config.Debug {
logLevel = log.DebugLevel
}
// not needs to ssh jump in daemon, because dev mode will hang up until user exit,
// so just ssh jump in client is enough
req := &rpc.ConnectRequest{
KubeconfigBytes: string(kubeConfigBytes),
Namespace: ns,
Headers: option.Headers,
Workloads: []string{option.Workload},
ExtraRoute: option.ExtraRouteInfo.ToRPC(),
Engine: string(option.Engine),
OriginKubeconfigPath: util.GetKubeConfigPath(option.factory),
TransferImage: transferImage,
Image: config.Image,
Level: int32(logLevel),
SshJump: sshConfig.ToRPC(),
}
if option.NoProxy {
req.Workloads = nil
}
option.AddRollbackFunc(func() error {
_ = disconnect(ctx, daemonCli, &rpc.DisconnectRequest{
KubeconfigBytes: ptr.To(string(kubeConfigBytes)),
Namespace: ptr.To(ns),
SshJump: sshConfig.ToRPC(),
})
return nil
})
var resp rpc.Daemon_ConnectClient
resp, err = daemonCli.Proxy(ctx, req)
if err != nil {
log.Errorf("Connect to cluster error: %s", err.Error())
return err
}
for {
resp, err := resp.Recv()
if err == io.EOF {
return nil
} else if err != nil {
return err
}
_, _ = fmt.Fprint(os.Stdout, resp.Message)
}
case ConnectModeContainer:
runConfig, err := option.CreateConnectContainer(portBindings)
if err != nil {
return err
}
var id string
log.Infof("starting container connect to cluster")
id, err = run(ctx, option.cli, option.dockerCli, runConfig)
if err != nil {
return err
}
option.AddRollbackFunc(func() error {
_ = option.cli.ContainerKill(context.Background(), id, "SIGTERM")
_ = runLogsSinceNow(option.dockerCli, id, true)
return nil
})
err = runLogsWaitRunning(ctx, option.dockerCli, id)
if err != nil {
// interrupt by signal KILL
if errors.Is(err, context.Canceled) {
return nil
}
return err
}
log.Infof("container connect to cluster successfully")
err = option.ContainerOptions.netMode.Set(fmt.Sprintf("container:%s", id))
return err
default:
return fmt.Errorf("unsupport connect mode: %s", option.ConnectMode)
}
}
func (option *Options) Dev(ctx context.Context, cConfig *Config, hostConfig *HostConfig) error {
templateSpec, err := option.GetPodTemplateSpec()
if err != nil { if err != nil {
return err return err
} }
@@ -109,20 +227,18 @@ func (option *Options) Main(ctx context.Context, c *containerConfig) error {
return sort.Reverse(podutils.ActivePods(pods)) return sort.Reverse(podutils.ActivePods(pods))
} }
label := labels.SelectorFromSet(templateSpec.Labels).String() label := labels.SelectorFromSet(templateSpec.Labels).String()
firstPod, _, err := polymorphichelpers.GetFirstPod(clientSet.CoreV1(), option.Namespace, label, time.Second*5, sortBy) firstPod, _, err := polymorphichelpers.GetFirstPod(option.clientset.CoreV1(), option.Namespace, label, time.Second*5, sortBy)
if err != nil { if err != nil {
log.Errorf("get first running pod from k8s: %v", err) log.Errorf("get first running pod from k8s: %v", err)
return err return err
} }
pod := firstPod.Name env, err := util.GetEnv(ctx, option.clientset, option.config, option.Namespace, firstPod.Name)
env, err := util.GetEnv(ctx, option.Factory, option.Namespace, pod)
if err != nil { if err != nil {
log.Errorf("get env from k8s: %v", err) log.Errorf("get env from k8s: %v", err)
return err return err
} }
volume, err := util.GetVolume(ctx, option.Factory, option.Namespace, pod) volume, err := util.GetVolume(ctx, option.factory, option.Namespace, firstPod.Name)
if err != nil { if err != nil {
log.Errorf("get volume from k8s: %v", err) log.Errorf("get volume from k8s: %v", err)
return err return err
@@ -130,24 +246,37 @@ func (option *Options) Main(ctx context.Context, c *containerConfig) error {
option.AddRollbackFunc(func() error { option.AddRollbackFunc(func() error {
return util.RemoveDir(volume) return util.RemoveDir(volume)
}) })
dns, err := util.GetDNS(ctx, option.Factory, option.Namespace, pod) dns, err := util.GetDNS(ctx, option.clientset, option.config, option.Namespace, firstPod.Name)
if err != nil { if err != nil {
log.Errorf("get dns from k8s: %v", err) log.Errorf("get dns from k8s: %v", err)
return err return err
} }
inject.RemoveContainers(templateSpec) inject.RemoveContainers(templateSpec)
list := convertKubeResourceToContainer(option.Namespace, *templateSpec, env, volume, dns) if option.ContainerName != "" {
mergeDockerOptions(list, option, c) var index = -1
mode := container.NetworkMode(option.Copts.netMode.NetworkMode()) for i, c := range templateSpec.Spec.Containers {
if len(option.Copts.netMode.Value()) != 0 { if option.ContainerName == c.Name {
log.Infof("network mode is %s", option.Copts.netMode.NetworkMode()) index = i
for _, runConfig := range list[:] { break
}
}
if index != -1 {
templateSpec.Spec.Containers[0], templateSpec.Spec.Containers[index] = templateSpec.Spec.Containers[index], templateSpec.Spec.Containers[0]
}
}
configList := ConvertPodToContainer(option.Namespace, *templateSpec, env, volume, dns)
MergeDockerOptions(configList, option, cConfig, hostConfig)
mode := container.NetworkMode(option.ContainerOptions.netMode.NetworkMode())
if len(option.ContainerOptions.netMode.Value()) != 0 {
log.Infof("network mode is %s", option.ContainerOptions.netMode.NetworkMode())
for _, runConfig := range configList[:] {
// remove expose port // remove expose port
runConfig.config.ExposedPorts = nil runConfig.config.ExposedPorts = nil
runConfig.hostConfig.NetworkMode = mode runConfig.hostConfig.NetworkMode = mode
if mode.IsContainer() { if mode.IsContainer() {
runConfig.hostConfig.PidMode = typescontainer.PidMode(option.Copts.netMode.NetworkMode()) runConfig.hostConfig.PidMode = typescontainer.PidMode(option.ContainerOptions.netMode.NetworkMode())
} }
runConfig.hostConfig.PortBindings = nil runConfig.hostConfig.PortBindings = nil
@@ -160,19 +289,19 @@ func (option *Options) Main(ctx context.Context, c *containerConfig) error {
} }
} else { } else {
var networkID string var networkID string
networkID, err = createKubevpnNetwork(ctx, option.Cli) networkID, err = createNetwork(ctx, option.cli)
if err != nil { if err != nil {
log.Errorf("create network for %s: %v", option.Workload, err) log.Errorf("create network for %s: %v", option.Workload, err)
return err return err
} }
log.Infof("create docker network %s", networkID) log.Infof("create docker network %s", networkID)
list[len(list)-1].networkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{ configList[len(configList)-1].networkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{
list[len(list)-1].containerName: {NetworkID: networkID}, configList[len(configList)-1].name: {NetworkID: networkID},
} }
var portMap = nat.PortMap{} var portMap = nat.PortMap{}
var portSet = nat.PortSet{} var portSet = nat.PortSet{}
for _, runConfig := range list { for _, runConfig := range configList {
for k, v := range runConfig.hostConfig.PortBindings { for k, v := range runConfig.hostConfig.PortBindings {
if oldValue, ok := portMap[k]; ok { if oldValue, ok := portMap[k]; ok {
portMap[k] = append(oldValue, v...) portMap[k] = append(oldValue, v...)
@@ -184,15 +313,15 @@ func (option *Options) Main(ctx context.Context, c *containerConfig) error {
portSet[k] = v portSet[k] = v
} }
} }
list[len(list)-1].hostConfig.PortBindings = portMap configList[len(configList)-1].hostConfig.PortBindings = portMap
list[len(list)-1].config.ExposedPorts = portSet configList[len(configList)-1].config.ExposedPorts = portSet
// skip last, use last container network // skip last, use last container network
for _, runConfig := range list[:len(list)-1] { for _, runConfig := range configList[:len(configList)-1] {
// remove expose port // remove expose port
runConfig.config.ExposedPorts = nil runConfig.config.ExposedPorts = nil
runConfig.hostConfig.NetworkMode = typescontainer.NetworkMode("container:" + list[len(list)-1].containerName) runConfig.hostConfig.NetworkMode = typescontainer.NetworkMode("container:" + configList[len(configList)-1].name)
runConfig.hostConfig.PidMode = typescontainer.PidMode("container:" + list[len(list)-1].containerName) runConfig.hostConfig.PidMode = typescontainer.PidMode("container:" + configList[len(configList)-1].name)
runConfig.hostConfig.PortBindings = nil runConfig.hostConfig.PortBindings = nil
// remove dns // remove dns
@@ -205,435 +334,112 @@ func (option *Options) Main(ctx context.Context, c *containerConfig) error {
} }
option.AddRollbackFunc(func() error { option.AddRollbackFunc(func() error {
return list.Remove(ctx, option.Cli) return configList.Remove(ctx, option.cli)
}) })
return list.Run(ctx, volume, option.Cli, option.DockerCli) return configList.Run(ctx, volume, option.cli, option.dockerCli)
} }
// connect to cluster network on docker container or host func disconnect(ctx context.Context, daemonClient rpc.DaemonClient, req *rpc.DisconnectRequest) error {
func (option *Options) connect(ctx context.Context, f cmdutil.Factory, conf *util.SshConfig, transferImage bool, c *containerConfig) (func(), error) { resp, err := daemonClient.Disconnect(ctx, req)
connect := &handler.ConnectOptions{
Headers: option.Headers,
Workloads: []string{option.Workload},
ExtraRouteInfo: option.ExtraRouteInfo,
Engine: option.Engine,
OriginKubeconfigPath: util.GetKubeConfigPath(f),
}
if err := connect.InitClient(f); err != nil {
return nil, err
}
option.Namespace = connect.Namespace
if err := connect.PreCheckResource(); err != nil {
return nil, err
}
if len(connect.Workloads) > 1 {
return nil, fmt.Errorf("can only dev one workloads at same time, workloads: %v", connect.Workloads)
}
if len(connect.Workloads) < 1 {
return nil, fmt.Errorf("you must provide resource to dev, workloads : %v is invaild", connect.Workloads)
}
option.Workload = connect.Workloads[0]
// if no-proxy is true, not needs to intercept traffic
if option.NoProxy {
if len(connect.Headers) != 0 {
return nil, fmt.Errorf("not needs to provide headers if is no-proxy mode")
}
connect.Workloads = []string{}
}
switch option.ConnectMode {
case ConnectModeHost:
daemonCli := daemon.GetClient(false)
if daemonCli == nil {
return nil, fmt.Errorf("get nil daemon client")
}
kubeConfigBytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil { if err != nil {
return nil, err return err
}
logLevel := log.ErrorLevel
if config.Debug {
logLevel = log.DebugLevel
}
// not needs to ssh jump in daemon, because dev mode will hang up until user exit,
// so just ssh jump in client is enough
req := &rpc.ConnectRequest{
KubeconfigBytes: string(kubeConfigBytes),
Namespace: ns,
Headers: connect.Headers,
Workloads: connect.Workloads,
ExtraRoute: connect.ExtraRouteInfo.ToRPC(),
Engine: string(connect.Engine),
OriginKubeconfigPath: util.GetKubeConfigPath(f),
TransferImage: transferImage,
Image: config.Image,
Level: int32(logLevel),
SshJump: conf.ToRPC(),
}
cancel := disconnect(ctx, daemonCli, &rpc.LeaveRequest{Workloads: connect.Workloads}, &rpc.DisconnectRequest{
KubeconfigBytes: ptr.To(string(kubeConfigBytes)),
Namespace: ptr.To(ns),
SshJump: conf.ToRPC(),
})
var resp rpc.Daemon_ConnectClient
resp, err = daemonCli.Proxy(ctx, req)
if err != nil {
log.Errorf("connect to cluster error: %s", err.Error())
return cancel, err
} }
for { for {
response, err := resp.Recv() recv, err := resp.Recv()
if err == io.EOF { if err == io.EOF {
return cancel, nil
} else if err != nil {
return cancel, err
}
fmt.Fprint(os.Stdout, response.Message)
}
case ConnectModeContainer:
port, set, err := option.GetExposePort(c)
if err != nil {
return nil, err
}
var path = os.Getenv(config.EnvSSHJump)
if path != "" {
path, err = util.ConvertK8sApiServerToDomain(path)
} else {
path, err = util.GetKubeconfigPath(connect.GetFactory())
}
if err != nil {
return nil, err
}
var platform specs.Platform
if option.Options.platform != "" {
platform, err = platforms.Parse(option.Options.platform)
if err != nil {
return nil, pkgerr.Wrap(err, "error parsing specified platform")
}
}
var connectContainer *RunConfig
connectContainer, err = createConnectContainer(option.NoProxy, *connect, path, option.Cli, &platform, port, set)
if err != nil {
return nil, err
}
cancelCtx, cancelFunc := context.WithCancel(ctx)
defer cancelFunc()
var id string
log.Infof("starting container connect to cluster")
id, err = run(cancelCtx, connectContainer, option.Cli, option.DockerCli)
if err != nil {
return nil, err
}
h := interrupt.New(
func(signal os.Signal) { return },
func() {
cancelFunc()
_ = option.Cli.ContainerKill(context.Background(), id, "SIGTERM")
_ = runLogsSinceNow(option.DockerCli, id, true)
},
)
go h.Run(func() error { select {} })
option.AddRollbackFunc(func() error {
h.Close()
return nil return nil
}) } else if err != nil {
err = runLogsWaitRunning(cancelCtx, option.DockerCli, id) return err
if err != nil {
// interrupt by signal KILL
if errors.Is(err, context.Canceled) {
return nil, nil
} }
return nil, err _, _ = fmt.Fprint(os.Stdout, recv.Message)
}
log.Infof("container connect to cluster successfully")
err = option.Copts.netMode.Set(fmt.Sprintf("container:%s", id))
return nil, err
default:
return nil, fmt.Errorf("unsupport connect mode: %s", option.ConnectMode)
} }
} }
func disconnect(ctx context.Context, daemonClient rpc.DaemonClient, leaveReq *rpc.LeaveRequest, req *rpc.DisconnectRequest) func() { func (option *Options) CreateConnectContainer(portBindings nat.PortMap) (*RunConfig, error) {
return func() { portMap, portSet, err := option.GetExposePort(portBindings)
resp, err := daemonClient.Leave(ctx, leaveReq)
if err == nil {
for {
msg, err := resp.Recv()
if err == io.EOF {
break
} else if err != nil {
log.Errorf("leave resource %s error: %v", strings.Join(leaveReq.Workloads, " "), err)
break
}
fmt.Fprint(os.Stdout, msg.Message)
}
}
resp1, err := daemonClient.Disconnect(ctx, req)
if err != nil { if err != nil {
log.Errorf("disconnect error: %v", err) return nil, err
return
}
for {
msg, err := resp1.Recv()
if err == io.EOF {
return
} else if err != nil {
log.Errorf("disconnect error: %v", err)
return
}
fmt.Fprint(os.Stdout, msg.Message)
}
}
} }
func createConnectContainer(noProxy bool, connect handler.ConnectOptions, path string, cli *client.Client, platform *specs.Platform, port nat.PortMap, set nat.PortSet) (*RunConfig, error) { var kubeconfigPath = os.Getenv(config.EnvSSHJump)
var entrypoint []string if kubeconfigPath != "" {
if noProxy { kubeconfigPath, err = util.ConvertK8sApiServerToDomain(kubeconfigPath)
entrypoint = []string{"kubevpn", "connect", "--foreground", "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image, "--engine", string(connect.Engine)}
} else { } else {
entrypoint = []string{"kubevpn", "proxy", connect.Workloads[0], "--foreground", "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image, "--engine", string(connect.Engine)} kubeconfigPath, err = util.GetKubeconfigPath(option.factory)
for k, v := range connect.Headers { }
if err != nil {
return nil, err
}
var entrypoint []string
if option.NoProxy {
entrypoint = []string{"kubevpn", "connect", "--foreground", "-n", option.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image, "--engine", string(option.Engine)}
} else {
entrypoint = []string{"kubevpn", "proxy", option.Workload, "--foreground", "-n", option.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image, "--engine", string(option.Engine)}
for k, v := range option.Headers {
entrypoint = append(entrypoint, "--headers", fmt.Sprintf("%s=%s", k, v)) entrypoint = append(entrypoint, "--headers", fmt.Sprintf("%s=%s", k, v))
} }
} }
for _, v := range connect.ExtraRouteInfo.ExtraCIDR { for _, v := range option.ExtraRouteInfo.ExtraCIDR {
entrypoint = append(entrypoint, "--extra-cidr", v) entrypoint = append(entrypoint, "--extra-cidr", v)
} }
for _, v := range connect.ExtraRouteInfo.ExtraDomain { for _, v := range option.ExtraRouteInfo.ExtraDomain {
entrypoint = append(entrypoint, "--extra-domain", v) entrypoint = append(entrypoint, "--extra-domain", v)
} }
if connect.ExtraRouteInfo.ExtraNodeIP { if option.ExtraRouteInfo.ExtraNodeIP {
entrypoint = append(entrypoint, "--extra-node-ip") entrypoint = append(entrypoint, "--extra-node-ip")
} }
runConfig := &container.Config{ runConfig := &container.Config{
User: "root", User: "root",
AttachStdin: false, ExposedPorts: portSet,
AttachStdout: false,
AttachStderr: false,
ExposedPorts: set,
StdinOnce: false,
Env: []string{}, Env: []string{},
Cmd: []string{}, Cmd: []string{},
Healthcheck: nil, Healthcheck: nil,
ArgsEscaped: false,
Image: config.Image, Image: config.Image,
Volumes: nil,
Entrypoint: entrypoint, Entrypoint: entrypoint,
NetworkDisabled: false,
MacAddress: "",
OnBuild: nil,
StopSignal: "",
StopTimeout: nil,
Shell: nil,
} }
hostConfig := &container.HostConfig{ hostConfig := &container.HostConfig{
Binds: []string{fmt.Sprintf("%s:%s", path, "/root/.kube/config")}, Binds: []string{fmt.Sprintf("%s:%s", kubeconfigPath, "/root/.kube/config")},
LogConfig: container.LogConfig{}, LogConfig: container.LogConfig{},
PortBindings: port, PortBindings: portMap,
AutoRemove: true,
Privileged: true,
RestartPolicy: container.RestartPolicy{}, RestartPolicy: container.RestartPolicy{},
AutoRemove: false,
VolumeDriver: "",
VolumesFrom: nil,
ConsoleSize: [2]uint{},
CapAdd: strslice.StrSlice{"SYS_PTRACE", "SYS_ADMIN"}, // for dlv CapAdd: strslice.StrSlice{"SYS_PTRACE", "SYS_ADMIN"}, // for dlv
CgroupnsMode: "",
// https://stackoverflow.com/questions/24319662/from-inside-of-a-docker-container-how-do-i-connect-to-the-localhost-of-the-mach // https://stackoverflow.com/questions/24319662/from-inside-of-a-docker-container-how-do-i-connect-to-the-localhost-of-the-mach
// couldn't get current server API group list: Get "https://host.docker.internal:62844/api?timeout=32s": tls: failed to verify certificate: x509: certificate is valid for kubernetes.default.svc.cluster.local, kubernetes.default.svc, kubernetes.default, kubernetes, istio-sidecar-injector.istio-system.svc, proxy-exporter.kube-system.svc, not host.docker.internal // couldn't get current server API group list: Get "https://host.docker.internal:62844/api?timeout=32s": tls: failed to verify certificate: x509: certificate is valid for kubernetes.default.svc.cluster.local, kubernetes.default.svc, kubernetes.default, kubernetes, istio-sidecar-injector.istio-system.svc, proxy-exporter.kube-system.svc, not host.docker.internal
ExtraHosts: []string{"host.docker.internal:host-gateway", "kubernetes:host-gateway"}, ExtraHosts: []string{"host.docker.internal:host-gateway", "kubernetes:host-gateway"},
GroupAdd: nil,
IpcMode: "",
Cgroup: "",
Links: nil,
OomScoreAdj: 0,
PidMode: "",
Privileged: true,
PublishAllPorts: false,
ReadonlyRootfs: false,
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"}, SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
StorageOpt: nil,
Tmpfs: nil,
UTSMode: "",
UsernsMode: "",
ShmSize: 0,
Sysctls: map[string]string{"net.ipv6.conf.all.disable_ipv6": strconv.Itoa(0)}, Sysctls: map[string]string{"net.ipv6.conf.all.disable_ipv6": strconv.Itoa(0)},
Runtime: "",
Isolation: "",
Resources: container.Resources{}, Resources: container.Resources{},
MaskedPaths: nil,
ReadonlyPaths: nil,
Init: nil,
} }
var suffix string newUUID, err := uuid.NewUUID()
if newUUID, err := uuid.NewUUID(); err == nil {
suffix = strings.ReplaceAll(newUUID.String(), "-", "")[:5]
}
networkID, err := createKubevpnNetwork(context.Background(), cli)
if err != nil { if err != nil {
return nil, err return nil, err
} }
name := fmt.Sprintf("%s_%s_%s", "kubevpn", "local", suffix) suffix := strings.ReplaceAll(newUUID.String(), "-", "")[:5]
name := util.Join(option.Namespace, "kubevpn", suffix)
networkID, err := createNetwork(context.Background(), option.cli)
if err != nil {
return nil, err
}
var platform *specs.Platform
if option.RunOptions.Platform != "" {
plat, _ := platforms.Parse(option.RunOptions.Platform)
platform = &plat
}
c := &RunConfig{ c := &RunConfig{
config: runConfig, config: runConfig,
hostConfig: hostConfig, hostConfig: hostConfig,
networkingConfig: &network.NetworkingConfig{ networkingConfig: &network.NetworkingConfig{EndpointsConfig: map[string]*network.EndpointSettings{name: {NetworkID: networkID}}},
EndpointsConfig: map[string]*network.EndpointSettings{name: {
NetworkID: networkID,
}},
},
platform: platform, platform: platform,
containerName: name, name: name,
k8sContainerName: name, Options: RunOptions{Pull: PullImageMissing},
} }
return c, nil return c, nil
} }
func runLogsWaitRunning(ctx context.Context, dockerCli command.Cli, container string) error {
c, err := dockerCli.Client().ContainerInspect(ctx, container)
if err != nil {
return err
}
options := typescontainer.LogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
}
logStream, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
if err != nil {
return err
}
defer logStream.Close()
buf := bytes.NewBuffer(nil)
w := io.MultiWriter(buf, dockerCli.Out())
cancel, cancelFunc := context.WithCancel(ctx)
defer cancelFunc()
go func() {
t := time.NewTicker(time.Second)
defer t.Stop()
for range t.C {
// keyword, maybe can find another way more elegant
if strings.Contains(buf.String(), "dns service ok") {
cancelFunc()
return
}
}
}()
var errChan = make(chan error)
go func() {
var err error
if c.Config.Tty {
_, err = io.Copy(w, logStream)
} else {
_, err = stdcopy.StdCopy(w, dockerCli.Err(), logStream)
}
if err != nil {
errChan <- err
}
}()
select {
case err = <-errChan:
return err
case <-cancel.Done():
return nil
}
}
func runLogsSinceNow(dockerCli command.Cli, container string, follow bool) error {
ctx := context.Background()
c, err := dockerCli.Client().ContainerInspect(ctx, container)
if err != nil {
return err
}
options := typescontainer.LogsOptions{
ShowStdout: true,
ShowStderr: true,
Since: "0m",
Follow: follow,
}
responseBody, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
if err != nil {
return err
}
defer responseBody.Close()
if c.Config.Tty {
_, err = io.Copy(dockerCli.Out(), responseBody)
} else {
_, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody)
}
return err
}
func runKill(dockerCli command.Cli, containers ...string) error {
var errs []string
ctx := context.Background()
errChan := parallelOperation(ctx, append([]string{}, containers...), func(ctx context.Context, container string) error {
return dockerCli.Client().ContainerKill(ctx, container, "SIGTERM")
})
for _, name := range containers {
if err := <-errChan; err != nil {
errs = append(errs, err.Error())
} else {
fmt.Fprintln(dockerCli.Out(), name)
}
}
if len(errs) > 0 {
return errors.New(strings.Join(errs, "\n"))
}
return nil
}
func createKubevpnNetwork(ctx context.Context, cli *client.Client) (string, error) {
by := map[string]string{"owner": config.ConfigMapPodTrafficManager}
list, _ := cli.NetworkList(ctx, types.NetworkListOptions{})
for _, resource := range list {
if reflect.DeepEqual(resource.Labels, by) {
return resource.ID, nil
}
}
create, err := cli.NetworkCreate(ctx, config.ConfigMapPodTrafficManager, types.NetworkCreate{
Driver: "bridge",
Scope: "local",
IPAM: &network.IPAM{
Driver: "",
Options: nil,
Config: []network.IPAMConfig{
{
Subnet: config.DockerCIDR.String(),
Gateway: config.DockerRouterIP.String(),
},
},
},
//Options: map[string]string{"--icc": "", "--ip-masq": ""},
Labels: by,
})
if err != nil {
if errdefs.IsForbidden(err) {
list, _ = cli.NetworkList(ctx, types.NetworkListOptions{})
for _, resource := range list {
if reflect.DeepEqual(resource.Labels, by) {
return resource.ID, nil
}
}
}
return "", err
}
return create.ID, nil
}
func (option *Options) AddRollbackFunc(f func() error) { func (option *Options) AddRollbackFunc(f func() error) {
option.rollbackFuncList = append(option.rollbackFuncList, f) option.rollbackFuncList = append(option.rollbackFuncList, f)
} }
@@ -642,35 +448,23 @@ func (option *Options) GetRollbackFuncList() []func() error {
return option.rollbackFuncList return option.rollbackFuncList
} }
func AddDockerFlags(options *Options, p *pflag.FlagSet, cli *command.DockerCli) { func AddDockerFlags(options *Options, p *pflag.FlagSet) {
p.SetInterspersed(false) p.SetInterspersed(false)
// These are flags not stored in Config/HostConfig // These are flags not stored in Config/HostConfig
p.BoolVarP(&options.Options.detach, "detach", "d", false, "Run container in background and print container ID") p.StringVar(&options.RunOptions.Pull, "pull", PullImageMissing, `Pull image before running ("`+PullImageAlways+`"|"`+PullImageMissing+`"|"`+PullImageNever+`")`)
p.StringVar(&options.Options.name, "name", "", "Assign a name to the container") p.BoolVar(&options.RunOptions.SigProxy, "sig-proxy", true, "Proxy received signals to the process")
p.StringVar(&options.Options.pull, "pull", PullImageMissing, `Pull image before running ("`+PullImageAlways+`"|"`+PullImageMissing+`"|"`+PullImageNever+`")`)
p.BoolVarP(&options.Options.quiet, "quiet", "q", false, "Suppress the pull output")
// Add an explicit help that doesn't have a `-h` to prevent the conflict // Add an explicit help that doesn't have a `-h` to prevent the conflict
// with hostname // with hostname
p.Bool("help", false, "Print usage") p.Bool("help", false, "Print usage")
command.AddPlatformFlag(p, &options.Options.platform) command.AddPlatformFlag(p, &options.RunOptions.Platform)
command.AddTrustVerificationFlags(p, &options.Options.untrusted, cli.ContentTrustEnabled()) options.ContainerOptions = addFlags(p)
options.Copts = addFlags(p)
} }
func (option *Options) GetExposePort(containerCfg *containerConfig) (nat.PortMap, nat.PortSet, error) { func (option *Options) GetExposePort(portBinds nat.PortMap) (nat.PortMap, nat.PortSet, error) {
object, err := util.GetUnstructuredObject(option.Factory, option.Namespace, option.Workload) templateSpec, err := option.GetPodTemplateSpec()
if err != nil {
log.Errorf("get unstructured object error: %v", err)
return nil, nil, err
}
u := object.Object.(*unstructured.Unstructured)
var templateSpec *v1.PodTemplateSpec
templateSpec, _, err = util.GetPodTemplateSpecPath(u)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@@ -691,10 +485,43 @@ func (option *Options) GetExposePort(containerCfg *containerConfig) (nat.PortMap
} }
} }
for port, bindings := range containerCfg.HostConfig.PortBindings { for port, bindings := range portBinds {
portMap[port] = bindings portMap[port] = bindings
portSet[port] = struct{}{} portSet[port] = struct{}{}
} }
return portMap, portSet, nil return portMap, portSet, nil
} }
func (option *Options) InitClient(f cmdutil.Factory) (err error) {
option.factory = f
if option.config, err = option.factory.ToRESTConfig(); err != nil {
return
}
if option.restclient, err = option.factory.RESTClient(); err != nil {
return
}
if option.clientset, err = option.factory.KubernetesClientSet(); err != nil {
return
}
if option.Namespace, _, err = option.factory.ToRawKubeConfigLoader().Namespace(); err != nil {
return
}
if option.cli, option.dockerCli, err = util.GetClient(); err != nil {
return err
}
return
}
func (option *Options) GetPodTemplateSpec() (*v1.PodTemplateSpec, error) {
object, err := util.GetUnstructuredObject(option.factory, option.Namespace, option.Workload)
if err != nil {
log.Errorf("get unstructured object error: %v", err)
return nil, err
}
u := object.Object.(*unstructured.Unstructured)
var templateSpec *v1.PodTemplateSpec
templateSpec, _, err = util.GetPodTemplateSpecPath(u)
return templateSpec, err
}

View File

@@ -2,46 +2,41 @@ package dev
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"math/rand" "net"
"strconv" "strconv"
"strings" "strings"
"time"
"unsafe" "unsafe"
"github.com/containerd/containerd/platforms"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
typescommand "github.com/docker/docker/api/types/container"
typescontainer "github.com/docker/docker/api/types/container" typescontainer "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice" "github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/go-connections/nat" "github.com/docker/go-connections/nat"
"github.com/google/uuid"
"github.com/miekg/dns" "github.com/miekg/dns"
"github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/image-spec/specs-go/v1"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
v12 "k8s.io/api/core/v1" v12 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/ptr"
"github.com/wencaiwulue/kubevpn/v2/pkg/config" "github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/util" "github.com/wencaiwulue/kubevpn/v2/pkg/util"
) )
type RunConfig struct { type RunConfig struct {
containerName string name string
k8sContainerName string
config *typescontainer.Config config *typescontainer.Config
hostConfig *typescontainer.HostConfig hostConfig *typescontainer.HostConfig
networkingConfig *network.NetworkingConfig networkingConfig *network.NetworkingConfig
platform *v1.Platform platform *v1.Platform
Options runOptions Options RunOptions
Copts *containerOptions Copts ContainerOptions
} }
type ConfigList []*RunConfig type ConfigList []*RunConfig
@@ -58,11 +53,11 @@ func (c ConfigList) Remove(ctx context.Context, cli *client.Client) error {
return nil return nil
} }
for _, runConfig := range c { for _, runConfig := range c {
err := cli.NetworkDisconnect(ctx, runConfig.containerName, runConfig.containerName, true) err := cli.NetworkDisconnect(ctx, runConfig.name, runConfig.name, true)
if err != nil { if err != nil {
log.Debug(err) log.Debug(err)
} }
err = cli.ContainerRemove(ctx, runConfig.containerName, typescontainer.RemoveOptions{Force: true}) err = cli.ContainerRemove(ctx, runConfig.name, typescontainer.RemoveOptions{Force: true})
if err != nil { if err != nil {
log.Debug(err) log.Debug(err)
} }
@@ -81,31 +76,30 @@ func (c ConfigList) Run(ctx context.Context, volume map[string][]mount.Mount, cl
for index := len(c) - 1; index >= 0; index-- { for index := len(c) - 1; index >= 0; index-- {
runConfig := c[index] runConfig := c[index]
if index == 0 { if index == 0 {
err := runAndAttach(ctx, runConfig, dockerCli) err := runContainer(ctx, dockerCli, runConfig)
if err != nil { if err != nil {
return err return err
} }
} else { }
id, err := run(ctx, runConfig, cli, dockerCli) _, err := run(ctx, cli, dockerCli, runConfig)
if err != nil { if err != nil {
// try another way to startup container // try to copy volume into container, why?
log.Infof("occur err: %v, try to use copy to startup container...", err)
runConfig.hostConfig.Mounts = nil runConfig.hostConfig.Mounts = nil
id, err = run(ctx, runConfig, cli, dockerCli) id, err1 := run(ctx, cli, dockerCli, runConfig)
if err != nil { if err1 != nil {
// return first error
return err return err
} }
err = util.CopyVolumeIntoContainer(ctx, volume[runConfig.k8sContainerName], cli, id) err = util.CopyVolumeIntoContainer(ctx, volume[runConfig.name], cli, id)
if err != nil { if err != nil {
return err return err
} }
} }
} }
}
return nil return nil
} }
func convertKubeResourceToContainer(ns string, temp v12.PodTemplateSpec, envMap map[string][]string, mountVolume map[string][]mount.Mount, dnsConfig *dns.ClientConfig) (list ConfigList) { func ConvertPodToContainer(ns string, temp v12.PodTemplateSpec, envMap map[string][]string, mountVolume map[string][]mount.Mount, dnsConfig *dns.ClientConfig) (list ConfigList) {
getHostname := func(containerName string) string { getHostname := func(containerName string) string {
for _, envEntry := range envMap[containerName] { for _, envEntry := range envMap[containerName] {
env := strings.Split(envEntry, "=") env := strings.Split(envEntry, "=")
@@ -118,17 +112,17 @@ func convertKubeResourceToContainer(ns string, temp v12.PodTemplateSpec, envMap
for _, c := range temp.Spec.Containers { for _, c := range temp.Spec.Containers {
containerConf := &typescontainer.Config{ containerConf := &typescontainer.Config{
Hostname: getHostname(c.Name), Hostname: getHostname(util.Join(ns, c.Name)),
Domainname: temp.Spec.Subdomain, Domainname: temp.Spec.Subdomain,
User: "root", User: "root",
AttachStdin: false, AttachStdin: c.Stdin,
AttachStdout: false, AttachStdout: false,
AttachStderr: false, AttachStderr: false,
ExposedPorts: nil, ExposedPorts: nil,
Tty: c.TTY, Tty: c.TTY,
OpenStdin: c.Stdin, OpenStdin: c.Stdin,
StdinOnce: false, StdinOnce: c.StdinOnce,
Env: envMap[c.Name], Env: envMap[util.Join(ns, c.Name)],
Cmd: c.Args, Cmd: c.Args,
Healthcheck: nil, Healthcheck: nil,
ArgsEscaped: false, ArgsEscaped: false,
@@ -169,7 +163,7 @@ func convertKubeResourceToContainer(ns string, temp v12.PodTemplateSpec, envMap
Links: nil, Links: nil,
OomScoreAdj: 0, OomScoreAdj: 0,
PidMode: "", PidMode: "",
Privileged: true, Privileged: ptr.Deref(ptr.Deref(c.SecurityContext, v12.SecurityContext{}).Privileged, false),
PublishAllPorts: false, PublishAllPorts: false,
ReadonlyRootfs: false, ReadonlyRootfs: false,
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"}, SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
@@ -182,7 +176,7 @@ func convertKubeResourceToContainer(ns string, temp v12.PodTemplateSpec, envMap
Runtime: "", Runtime: "",
Isolation: "", Isolation: "",
Resources: typescontainer.Resources{}, Resources: typescontainer.Resources{},
Mounts: mountVolume[c.Name], Mounts: mountVolume[util.Join(ns, c.Name)],
MaskedPaths: nil, MaskedPaths: nil,
ReadonlyPaths: nil, ReadonlyPaths: nil,
Init: nil, Init: nil,
@@ -206,113 +200,86 @@ func convertKubeResourceToContainer(ns string, temp v12.PodTemplateSpec, envMap
hostConfig.CapAdd = append(hostConfig.CapAdd, *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Add))...) hostConfig.CapAdd = append(hostConfig.CapAdd, *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Add))...)
hostConfig.CapDrop = *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Drop)) hostConfig.CapDrop = *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Drop))
} }
var suffix string
newUUID, err := uuid.NewUUID() var r = RunConfig{
if err == nil { name: util.Join(ns, c.Name),
suffix = strings.ReplaceAll(newUUID.String(), "-", "")[:5] config: containerConf,
hostConfig: hostConfig,
networkingConfig: &network.NetworkingConfig{EndpointsConfig: make(map[string]*network.EndpointSettings)},
platform: nil,
Options: RunOptions{Pull: PullImageMissing},
} }
var r RunConfig
r.containerName = fmt.Sprintf("%s_%s_%s_%s", c.Name, ns, "kubevpn", suffix)
r.k8sContainerName = c.Name
r.config = containerConf
r.hostConfig = hostConfig
r.networkingConfig = &network.NetworkingConfig{EndpointsConfig: make(map[string]*network.EndpointSettings)}
r.platform = nil
list = append(list, &r) list = append(list, &r)
} }
return list return list
} }
func run(ctx context.Context, runConfig *RunConfig, cli *client.Client, c *command.DockerCli) (id string, err error) { func MergeDockerOptions(list ConfigList, options *Options, config *Config, hostConfig *HostConfig) {
rand.New(rand.NewSource(time.Now().UnixNano())) conf := list[0]
conf.Options = options.RunOptions
conf.Copts = *options.ContainerOptions
var config = runConfig.config if options.RunOptions.Platform != "" {
var hostConfig = runConfig.hostConfig p, _ := platforms.Parse(options.RunOptions.Platform)
var platform = runConfig.platform conf.platform = &p
var networkConfig = runConfig.networkingConfig }
var name = runConfig.containerName
var needPull bool // container config
var img types.ImageInspect var entrypoint = conf.config.Entrypoint
img, _, err = cli.ImageInspectWithRaw(ctx, config.Image) var args = conf.config.Cmd
if errdefs.IsNotFound(err) { // if special --entrypoint, then use it
log.Infof("needs to pull image %s", config.Image) if len(config.Entrypoint) != 0 {
needPull = true entrypoint = config.Entrypoint
err = nil args = config.Cmd
} else if err != nil {
log.Errorf("image inspect failed: %v", err)
return
} }
if platform != nil && platform.Architecture != "" && platform.OS != "" { if len(config.Cmd) != 0 {
if img.Os != platform.OS || img.Architecture != platform.Architecture { args = config.Cmd
needPull = true }
conf.config.Entrypoint = entrypoint
conf.config.Cmd = args
if options.DevImage != "" {
conf.config.Image = options.DevImage
}
conf.config.Volumes = util.Merge[string, struct{}](conf.config.Volumes, config.Volumes)
for k, v := range config.ExposedPorts {
if _, found := conf.config.ExposedPorts[k]; !found {
conf.config.ExposedPorts[k] = v
} }
} }
if needPull { conf.config.StdinOnce = config.StdinOnce
err = util.PullImage(ctx, runConfig.platform, cli, c, config.Image, nil) conf.config.AttachStdin = config.AttachStdin
conf.config.AttachStdout = config.AttachStdout
conf.config.AttachStderr = config.AttachStderr
conf.config.Tty = config.Tty
conf.config.OpenStdin = config.OpenStdin
// host config
var hosts []string
for _, domain := range options.ExtraRouteInfo.ExtraDomain {
ips, err := net.LookupIP(domain)
if err != nil { if err != nil {
log.Errorf("Failed to pull image: %s, err: %s", config.Image, err) continue
return }
for _, ip := range ips {
if ip.To4() != nil {
hosts = append(hosts, fmt.Sprintf("%s:%s", domain, ip.To4().String()))
break
} }
} }
var create typescommand.CreateResponse
create, err = cli.ContainerCreate(ctx, config, hostConfig, networkConfig, platform, name)
if err != nil {
log.Errorf("Failed to create container: %s, err: %s", name, err)
return
} }
id = create.ID conf.hostConfig.ExtraHosts = hosts
log.Infof("Created container: %s", name) conf.hostConfig.AutoRemove = hostConfig.AutoRemove
defer func() { conf.hostConfig.Privileged = hostConfig.Privileged
if err != nil && runConfig.hostConfig.AutoRemove { conf.hostConfig.PublishAllPorts = hostConfig.PublishAllPorts
_ = cli.ContainerRemove(ctx, id, typescontainer.RemoveOptions{Force: true}) conf.hostConfig.Mounts = append(conf.hostConfig.Mounts, hostConfig.Mounts...)
conf.hostConfig.Binds = append(conf.hostConfig.Binds, hostConfig.Binds...)
for port, bindings := range hostConfig.PortBindings {
if v, ok := conf.hostConfig.PortBindings[port]; ok {
conf.hostConfig.PortBindings[port] = append(v, bindings...)
} else {
conf.hostConfig.PortBindings[port] = bindings
} }
}()
err = cli.ContainerStart(ctx, create.ID, typescontainer.StartOptions{})
if err != nil {
log.Errorf("failed to startup container %s: %v", name, err)
return
} }
log.Infof("Wait container %s to be running...", name)
var inspect types.ContainerJSON
ctx2, cancelFunc := context.WithTimeout(ctx, time.Minute*5)
wait.UntilWithContext(ctx2, func(ctx context.Context) {
inspect, err = cli.ContainerInspect(ctx, create.ID)
if errdefs.IsNotFound(err) {
cancelFunc()
return
} else if err != nil {
cancelFunc()
return
}
if inspect.State != nil && (inspect.State.Status == "exited" || inspect.State.Status == "dead" || inspect.State.Dead) {
cancelFunc()
err = errors.New(fmt.Sprintf("container status: %s", inspect.State.Status))
return
}
if inspect.State != nil && inspect.State.Running {
cancelFunc()
return
}
}, time.Second)
if err != nil {
log.Errorf("failed to wait container to be ready: %v", err)
_ = runLogsSinceNow(c, id, false)
return
}
log.Infof("Container %s is running now", name)
return
}
func runAndAttach(ctx context.Context, runConfig *RunConfig, cli *command.DockerCli) error {
c := &containerConfig{
Config: runConfig.config,
HostConfig: runConfig.hostConfig,
NetworkingConfig: runConfig.networkingConfig,
}
return runContainer(ctx, cli, &runConfig.Options, runConfig.Copts, c)
} }

View File

@@ -12,7 +12,6 @@ import (
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/kubectl/pkg/cmd/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config" "github.com/wencaiwulue/kubevpn/v2/pkg/config"
) )
@@ -75,16 +74,8 @@ func GetDNSIPFromDnsPod(ctx context.Context, clientset *kubernetes.Clientset) (i
return return
} }
func GetDNS(ctx context.Context, f util.Factory, ns, pod string) (*dns.ClientConfig, error) { func GetDNS(ctx context.Context, clientSet *kubernetes.Clientset, restConfig *rest.Config, ns, pod string) (*dns.ClientConfig, error) {
clientSet, err := f.KubernetesClientSet() _, err := clientSet.CoreV1().Pods(ns).Get(ctx, pod, v12.GetOptions{})
if err != nil {
return nil, err
}
_, err = clientSet.CoreV1().Pods(ns).Get(ctx, pod, v12.GetOptions{})
if err != nil {
return nil, err
}
restConfig, err := f.ToRESTConfig()
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -31,23 +31,23 @@ import (
) )
func GetClient() (*client.Client, *command.DockerCli, error) { func GetClient() (*client.Client, *command.DockerCli, error) {
client, err := client.NewClientWithOpts( cli, err := client.NewClientWithOpts(
client.FromEnv, client.FromEnv,
client.WithAPIVersionNegotiation(), client.WithAPIVersionNegotiation(),
) )
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("can not create docker client from env, err: %v", err) return nil, nil, err
} }
var cli *command.DockerCli var dockerCli *command.DockerCli
cli, err = command.NewDockerCli(command.WithAPIClient(client)) dockerCli, err = command.NewDockerCli(command.WithAPIClient(cli))
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("can not create docker client from env, err: %v", err) return nil, nil, err
} }
err = cli.Initialize(flags.NewClientOptions()) err = dockerCli.Initialize(flags.NewClientOptions())
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("can not init docker client, err: %v", err) return nil, nil, err
} }
return client, cli, nil return cli, dockerCli, nil
} }
// TransferImage // TransferImage
@@ -160,7 +160,7 @@ func TransferImage(ctx context.Context, conf *SshConfig, imageSource, imageTarge
} }
// PullImage image.RunPull(ctx, c, image.PullOptions{}) // PullImage image.RunPull(ctx, c, image.PullOptions{})
func PullImage(ctx context.Context, platform *v1.Platform, cli *client.Client, c *command.DockerCli, img string, out io.Writer) error { func PullImage(ctx context.Context, platform *v1.Platform, cli *client.Client, dockerCli *command.DockerCli, img string, out io.Writer) error {
var readCloser io.ReadCloser var readCloser io.ReadCloser
var plat string var plat string
if platform != nil && platform.Architecture != "" && platform.OS != "" { if platform != nil && platform.Architecture != "" && platform.OS != "" {
@@ -172,7 +172,7 @@ func PullImage(ctx context.Context, platform *v1.Platform, cli *client.Client, c
return err return err
} }
var imgRefAndAuth trust.ImageRefAndAuth var imgRefAndAuth trust.ImageRefAndAuth
imgRefAndAuth, err = trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(c), distributionRef.String()) imgRefAndAuth, err = trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(dockerCli), distributionRef.String())
if err != nil { if err != nil {
log.Errorf("can not get image auth: %v", err) log.Errorf("can not get image auth: %v", err)
return err return err
@@ -183,7 +183,7 @@ func PullImage(ctx context.Context, platform *v1.Platform, cli *client.Client, c
log.Errorf("can not encode auth config to base64: %v", err) log.Errorf("can not encode auth config to base64: %v", err)
return err return err
} }
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(c, imgRefAndAuth.RepoInfo().Index, "pull") requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, imgRefAndAuth.RepoInfo().Index, "pull")
readCloser, err = cli.ImagePull(ctx, img, typesimage.PullOptions{ readCloser, err = cli.ImagePull(ctx, img, typesimage.PullOptions{
All: false, All: false,
RegistryAuth: encodedAuth, RegistryAuth: encodedAuth,

7
pkg/util/name.go Normal file
View File

@@ -0,0 +1,7 @@
package util
import "strings"
func Join(names ...string) string {
return strings.Join(names, "_")
}

View File

@@ -101,27 +101,19 @@ func max[T constraints.Ordered](a T, b T) T {
return b return b
} }
func GetEnv(ctx context.Context, f util.Factory, ns, pod string) (map[string][]string, error) { func GetEnv(ctx context.Context, set *kubernetes.Clientset, config *rest.Config, ns, podName string) (map[string][]string, error) {
set, err2 := f.KubernetesClientSet() pod, err := set.CoreV1().Pods(ns).Get(ctx, podName, v1.GetOptions{})
if err2 != nil {
return nil, err2
}
config, err2 := f.ToRESTConfig()
if err2 != nil {
return nil, err2
}
get, err := set.CoreV1().Pods(ns).Get(ctx, pod, v1.GetOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
result := map[string][]string{} result := map[string][]string{}
for _, c := range get.Spec.Containers { for _, c := range pod.Spec.Containers {
env, err := Shell(ctx, set, config, pod, c.Name, ns, []string{"env"}) env, err := Shell(ctx, set, config, podName, c.Name, ns, []string{"env"})
if err != nil { if err != nil {
return nil, err return nil, err
} }
split := strings.Split(env, "\n") split := strings.Split(env, "\n")
result[c.Name] = split result[Join(ns, c.Name)] = split
} }
return result, nil return result, nil
} }

View File

@@ -79,7 +79,7 @@ func GetVolume(ctx context.Context, f util.Factory, ns, podName string) (map[str
}) })
logrus.Infof("%s:%s", localPath, volumeMount.MountPath) logrus.Infof("%s:%s", localPath, volumeMount.MountPath)
} }
result[container.Name] = m result[Join(ns, container.Name)] = m
} }
return result, nil return result, nil
} }