mirror of
https://github.com/kubenetworks/kubevpn.git
synced 2025-09-26 19:31:17 +08:00
refactor: refactor command dev (#309)
This commit is contained in:
15
README.md
15
README.md
@@ -400,7 +400,7 @@ Run the Kubernetes pod in the local Docker container, and cooperate with the ser
|
||||
the specified header to the local, or all the traffic to the local.
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn dev deployment/authors --headers a=1 -it --rm --entrypoint sh
|
||||
➜ ~ kubevpn dev deployment/authors --headers a=1 --entrypoint sh
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
@@ -500,13 +500,13 @@ docker logs $(docker ps --format '{{.Names}}' | grep nginx_default_kubevpn)
|
||||
If you just want to start up a docker image, you can use a simple way like this:
|
||||
|
||||
```shell
|
||||
kubevpn dev deployment/authors --no-proxy -it --rm
|
||||
kubevpn dev deployment/authors --no-proxy
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn dev deployment/authors --no-proxy -it --rm
|
||||
➜ ~ kubevpn dev deployment/authors --no-proxy
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
@@ -567,14 +567,7 @@ e008f553422a: Pull complete
|
||||
33f0298d1d4f: Pull complete
|
||||
Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e
|
||||
Status: Downloaded newer image for naison/kubevpn:v2.0.0
|
||||
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison -it --entrypoint sh
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
|
||||
Because of sudo user env and user env are different.
|
||||
Current env KUBECONFIG value:
|
||||
----------------------------------------------------------------------------------
|
||||
|
||||
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison --entrypoint sh
|
||||
hostname is d0b3dab8912a
|
||||
connectting to cluster
|
||||
start to connect
|
||||
|
15
README_ZH.md
15
README_ZH.md
@@ -323,7 +323,7 @@ leave workload deployments/productpage successfully
|
||||
Docker。
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn dev deployment/authors --headers a=1 -it --rm --entrypoint sh
|
||||
➜ ~ kubevpn dev deployment/authors --headers a=1 --entrypoint sh
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
@@ -406,13 +406,13 @@ fc04e42799a5 nginx:latest "/docker-entrypoint.…" 37 sec
|
||||
如果你只是想在本地启动镜像,可以用一种简单的方式:
|
||||
|
||||
```shell
|
||||
kubevpn dev deployment/authors --no-proxy -it --rm
|
||||
kubevpn dev deployment/authors --no-proxy
|
||||
```
|
||||
|
||||
例如:
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn dev deployment/authors --no-proxy -it --rm
|
||||
➜ ~ kubevpn dev deployment/authors --no-proxy
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
@@ -471,14 +471,7 @@ e008f553422a: Pull complete
|
||||
33f0298d1d4f: Pull complete
|
||||
Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e
|
||||
Status: Downloaded newer image for naison/kubevpn:v2.0.0
|
||||
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison -it --entrypoint sh
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
|
||||
Because of sudo user env and user env are different.
|
||||
Current env KUBECONFIG value:
|
||||
----------------------------------------------------------------------------------
|
||||
|
||||
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison --entrypoint sh
|
||||
hostname is d0b3dab8912a
|
||||
connectting to cluster
|
||||
start to connect
|
||||
|
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
dockercomp "github.com/docker/cli/cli/command/completion"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
@@ -20,15 +20,8 @@ import (
|
||||
)
|
||||
|
||||
func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
client, cli, err := util.GetClient()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
var options = &dev.Options{
|
||||
Factory: f,
|
||||
NoProxy: false,
|
||||
Cli: client,
|
||||
DockerCli: cli,
|
||||
ExtraRouteInfo: handler.ExtraRouteInfo{},
|
||||
}
|
||||
var sshConf = &util.SshConfig{}
|
||||
@@ -67,21 +60,21 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
kubevpn dev deployment/productpage --ssh-alias <alias>
|
||||
|
||||
# Switch to terminal mode; send stdin to 'bash' and sends stdout/stderror from 'bash' back to the client
|
||||
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev -i -t --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev --entrypoint /bin/bash
|
||||
or
|
||||
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev -it --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev --entrypoint /bin/bash
|
||||
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab -it --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache -it --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD> -it --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD> --entrypoint /bin/bash
|
||||
`)),
|
||||
ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f),
|
||||
Args: cobra.MatchAll(cobra.OnlyValidArgs),
|
||||
DisableFlagsInUseLine: true,
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
|
||||
_, _ = fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
|
||||
fullCmdName := cmd.Parent().CommandPath()
|
||||
usageString := "Required resource not specified."
|
||||
if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") {
|
||||
@@ -89,7 +82,7 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
}
|
||||
return cmdutil.UsageErrorf(cmd, usageString)
|
||||
}
|
||||
err = cmd.Flags().Parse(args[1:])
|
||||
err := cmd.Flags().Parse(args[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -99,6 +92,15 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
|
||||
}
|
||||
|
||||
if p := options.RunOptions.Platform; p != "" {
|
||||
if _, err = platforms.Parse(p); err != nil {
|
||||
return fmt.Errorf("error parsing specified platform: %v", err)
|
||||
}
|
||||
}
|
||||
if err = validatePullOpt(options.RunOptions.Pull); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = daemon.StartupDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -109,7 +111,7 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
options.Workload = args[0]
|
||||
for i, arg := range args {
|
||||
if arg == "--" && i != len(args)-1 {
|
||||
options.Copts.Args = args[i+1:]
|
||||
options.ContainerOptions.Args = args[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -123,7 +125,12 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
}
|
||||
}
|
||||
}()
|
||||
err = dev.DoDev(cmd.Context(), options, sshConf, cmd.Flags(), f, transferImage)
|
||||
|
||||
if err := options.InitClient(f); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := options.Main(cmd.Context(), sshConf, cmd.Flags(), transferImage)
|
||||
return err
|
||||
},
|
||||
}
|
||||
@@ -141,26 +148,25 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
// diy docker options
|
||||
cmd.Flags().StringVar(&options.DevImage, "dev-image", "", "Use to startup docker container, Default is pod image")
|
||||
// origin docker options
|
||||
dev.AddDockerFlags(options, cmd.Flags(), cli)
|
||||
|
||||
_ = cmd.RegisterFlagCompletionFunc(
|
||||
"env",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return os.Environ(), cobra.ShellCompDirectiveNoFileComp
|
||||
},
|
||||
)
|
||||
_ = cmd.RegisterFlagCompletionFunc(
|
||||
"env-file",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
},
|
||||
)
|
||||
_ = cmd.RegisterFlagCompletionFunc(
|
||||
"network",
|
||||
dockercomp.NetworkNames(cli),
|
||||
)
|
||||
dev.AddDockerFlags(options, cmd.Flags())
|
||||
|
||||
handler.AddExtraRoute(cmd.Flags(), &options.ExtraRouteInfo)
|
||||
util.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func validatePullOpt(val string) error {
|
||||
switch val {
|
||||
case dev.PullImageAlways, dev.PullImageMissing, dev.PullImageNever, "":
|
||||
// valid option, but nothing to do yet
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"invalid pull option: '%s': must be one of %q, %q or %q",
|
||||
val,
|
||||
dev.PullImageAlways,
|
||||
dev.PullImageMissing,
|
||||
dev.PullImageNever,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@@ -1,328 +0,0 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/completion"
|
||||
"github.com/docker/cli/cli/command/image"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
imagetypes "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Pull constants
|
||||
const (
|
||||
PullImageAlways = "always"
|
||||
PullImageMissing = "missing" // Default (matches previous behavior)
|
||||
PullImageNever = "never"
|
||||
)
|
||||
|
||||
type createOptions struct {
|
||||
name string
|
||||
platform string
|
||||
untrusted bool
|
||||
pull string // always, missing, never
|
||||
quiet bool
|
||||
}
|
||||
|
||||
// NewCreateCommand creates a new cobra.Command for `docker create`
|
||||
func NewCreateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var options createOptions
|
||||
var copts *containerOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]",
|
||||
Short: "Create a new container",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
copts.Image = args[0]
|
||||
if len(args) > 1 {
|
||||
copts.Args = args[1:]
|
||||
}
|
||||
return runCreate(cmd.Context(), dockerCli, cmd.Flags(), &options, copts)
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"aliases": "docker container create, docker create",
|
||||
},
|
||||
ValidArgsFunction: completion.ImageNames(dockerCli),
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.SetInterspersed(false)
|
||||
|
||||
flags.StringVar(&options.name, "name", "", "Assign a name to the container")
|
||||
flags.StringVar(&options.pull, "pull", PullImageMissing, `Pull image before creating ("`+PullImageAlways+`", "|`+PullImageMissing+`", "`+PullImageNever+`")`)
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the pull output")
|
||||
|
||||
// Add an explicit help that doesn't have a `-h` to prevent the conflict
|
||||
// with hostname
|
||||
flags.Bool("help", false, "Print usage")
|
||||
|
||||
command.AddPlatformFlag(flags, &options.platform)
|
||||
command.AddTrustVerificationFlags(flags, &options.untrusted, dockerCli.ContentTrustEnabled())
|
||||
copts = addFlags(flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runCreate(ctx context.Context, dockerCli command.Cli, flags *pflag.FlagSet, options *createOptions, copts *containerOptions) error {
|
||||
if err := validatePullOpt(options.pull); err != nil {
|
||||
reportError(dockerCli.Err(), "create", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
proxyConfig := dockerCli.ConfigFile().ParseProxyConfig(dockerCli.Client().DaemonHost(), opts.ConvertKVStringsToMapWithNil(copts.env.GetAll()))
|
||||
newEnv := []string{}
|
||||
for k, v := range proxyConfig {
|
||||
if v == nil {
|
||||
newEnv = append(newEnv, k)
|
||||
} else {
|
||||
newEnv = append(newEnv, k+"="+*v)
|
||||
}
|
||||
}
|
||||
copts.env = *opts.NewListOptsRef(&newEnv, nil)
|
||||
containerCfg, err := parse(flags, copts, dockerCli.ServerInfo().OSType)
|
||||
if err != nil {
|
||||
reportError(dockerCli.Err(), "create", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
if err = validateAPIVersion(containerCfg, dockerCli.Client().ClientVersion()); err != nil {
|
||||
reportError(dockerCli.Err(), "create", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
id, err := createContainer(ctx, dockerCli, containerCfg, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprintln(dockerCli.Out(), id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FIXME(thaJeztah): this is the only code-path that uses APIClient.ImageCreate. Rewrite this to use the regular "pull" code (or vice-versa).
|
||||
func pullImage(ctx context.Context, dockerCli command.Cli, img string, options *createOptions) error {
|
||||
encodedAuth, err := command.RetrieveAuthTokenFromImage(dockerCli.ConfigFile(), img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
responseBody, err := dockerCli.Client().ImageCreate(ctx, img, imagetypes.CreateOptions{
|
||||
RegistryAuth: encodedAuth,
|
||||
Platform: options.platform,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
out := dockerCli.Err()
|
||||
if options.quiet {
|
||||
out = io.Discard
|
||||
}
|
||||
return jsonmessage.DisplayJSONMessagesToStream(responseBody, streams.NewOut(out), nil)
|
||||
}
|
||||
|
||||
type cidFile struct {
|
||||
path string
|
||||
file *os.File
|
||||
written bool
|
||||
}
|
||||
|
||||
func (cid *cidFile) Close() error {
|
||||
if cid.file == nil {
|
||||
return nil
|
||||
}
|
||||
cid.file.Close()
|
||||
|
||||
if cid.written {
|
||||
return nil
|
||||
}
|
||||
if err := os.Remove(cid.path); err != nil {
|
||||
return errors.Wrapf(err, "failed to remove the CID file '%s'", cid.path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cid *cidFile) Write(id string) error {
|
||||
if cid.file == nil {
|
||||
return nil
|
||||
}
|
||||
if _, err := cid.file.Write([]byte(id)); err != nil {
|
||||
return errors.Wrap(err, "failed to write the container ID to the file")
|
||||
}
|
||||
cid.written = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func newCIDFile(path string) (*cidFile, error) {
|
||||
if path == "" {
|
||||
return &cidFile{}, nil
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return nil, errors.Errorf("container ID file found, make sure the other container isn't running or delete %s", path)
|
||||
}
|
||||
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create the container ID file")
|
||||
}
|
||||
|
||||
return &cidFile{path: path, file: f}, nil
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *containerConfig, options *createOptions) (containerID string, err error) {
|
||||
config := containerCfg.Config
|
||||
hostConfig := containerCfg.HostConfig
|
||||
networkingConfig := containerCfg.NetworkingConfig
|
||||
|
||||
warnOnOomKillDisable(*hostConfig, dockerCli.Err())
|
||||
warnOnLocalhostDNS(*hostConfig, dockerCli.Err())
|
||||
|
||||
var (
|
||||
trustedRef reference.Canonical
|
||||
namedRef reference.Named
|
||||
)
|
||||
|
||||
containerIDFile, err := newCIDFile(hostConfig.ContainerIDFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer containerIDFile.Close()
|
||||
|
||||
ref, err := reference.ParseAnyReference(config.Image)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if named, ok := ref.(reference.Named); ok {
|
||||
namedRef = reference.TagNameOnly(named)
|
||||
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && !options.untrusted {
|
||||
var err error
|
||||
trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
config.Image = reference.FamiliarString(trustedRef)
|
||||
}
|
||||
}
|
||||
|
||||
pullAndTagImage := func() error {
|
||||
if err := pullImage(ctx, dockerCli, config.Image, options); err != nil {
|
||||
return err
|
||||
}
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil {
|
||||
return image.TagTrusted(ctx, dockerCli, trustedRef, taggedRef)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var platform *specs.Platform
|
||||
// Engine API version 1.41 first introduced the option to specify platform on
|
||||
// create. It will produce an error if you try to set a platform on older API
|
||||
// versions, so check the API version here to maintain backwards
|
||||
// compatibility for CLI users.
|
||||
if options.platform != "" && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.41") {
|
||||
p, err := platforms.Parse(options.platform)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "error parsing specified platform")
|
||||
}
|
||||
platform = &p
|
||||
}
|
||||
|
||||
if options.pull == PullImageAlways {
|
||||
if err := pullAndTagImage(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize()
|
||||
|
||||
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, options.name)
|
||||
if err != nil {
|
||||
// Pull image if it does not exist locally and we have the PullImageMissing option. Default behavior.
|
||||
if errdefs.IsNotFound(err) && namedRef != nil && options.pull == PullImageMissing {
|
||||
if !options.quiet {
|
||||
// we don't want to write to stdout anything apart from container.ID
|
||||
fmt.Fprintf(dockerCli.Err(), "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef))
|
||||
}
|
||||
|
||||
if err := pullAndTagImage(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var retryErr error
|
||||
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, options.name)
|
||||
if retryErr != nil {
|
||||
return "", retryErr
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
for _, w := range response.Warnings {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "WARNING: %s\n", w)
|
||||
}
|
||||
err = containerIDFile.Write(response.ID)
|
||||
return response.ID, err
|
||||
}
|
||||
|
||||
func warnOnOomKillDisable(hostConfig container.HostConfig, stderr io.Writer) {
|
||||
if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 {
|
||||
fmt.Fprintln(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.")
|
||||
}
|
||||
}
|
||||
|
||||
// check the DNS settings passed via --dns against localhost regexp to warn if
|
||||
// they are trying to set a DNS to a localhost address
|
||||
func warnOnLocalhostDNS(hostConfig container.HostConfig, stderr io.Writer) {
|
||||
for _, dnsIP := range hostConfig.DNS {
|
||||
if isLocalhost(dnsIP) {
|
||||
fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range.
|
||||
const ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)`
|
||||
|
||||
var localhostIPRegexp = regexp.MustCompile(ipLocalhost)
|
||||
|
||||
// IsLocalhost returns true if ip matches the localhost IP regular expression.
|
||||
// Used for determining if nameserver settings are being passed which are
|
||||
// localhost addresses
|
||||
func isLocalhost(ip string) bool {
|
||||
return localhostIPRegexp.MatchString(ip)
|
||||
}
|
||||
|
||||
func validatePullOpt(val string) error {
|
||||
switch val {
|
||||
case PullImageAlways, PullImageMissing, PullImageNever, "":
|
||||
// valid option, but nothing to do yet
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"invalid pull option: '%s': must be one of %q, %q or %q",
|
||||
val,
|
||||
PullImageAlways,
|
||||
PullImageMissing,
|
||||
PullImageNever,
|
||||
)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@@ -1,315 +0,0 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/completion"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/moby/sys/signal"
|
||||
"github.com/moby/term"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type runOptions struct {
|
||||
createOptions
|
||||
detach bool
|
||||
sigProxy bool
|
||||
detachKeys string
|
||||
}
|
||||
|
||||
// NewRunCommand create a new `docker run` command
|
||||
func NewRunCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var options runOptions
|
||||
var copts *containerOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]",
|
||||
Short: "Create and run a new container from an image",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
copts.Image = args[0]
|
||||
if len(args) > 1 {
|
||||
copts.Args = args[1:]
|
||||
}
|
||||
return runRun(cmd.Context(), dockerCli, cmd.Flags(), &options, copts)
|
||||
},
|
||||
ValidArgsFunction: completion.ImageNames(dockerCli),
|
||||
Annotations: map[string]string{
|
||||
"category-top": "1",
|
||||
"aliases": "docker container run, docker run",
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.SetInterspersed(false)
|
||||
|
||||
// These are flags not stored in Config/HostConfig
|
||||
flags.BoolVarP(&options.detach, "detach", "d", false, "Run container in background and print container ID")
|
||||
flags.BoolVar(&options.sigProxy, "sig-proxy", true, "Proxy received signals to the process")
|
||||
flags.StringVar(&options.name, "name", "", "Assign a name to the container")
|
||||
flags.StringVar(&options.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container")
|
||||
flags.StringVar(&options.pull, "pull", PullImageMissing, `Pull image before running ("`+PullImageAlways+`", "`+PullImageMissing+`", "`+PullImageNever+`")`)
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the pull output")
|
||||
|
||||
// Add an explicit help that doesn't have a `-h` to prevent the conflict
|
||||
// with hostname
|
||||
flags.Bool("help", false, "Print usage")
|
||||
|
||||
command.AddPlatformFlag(flags, &options.platform)
|
||||
command.AddTrustVerificationFlags(flags, &options.untrusted, dockerCli.ContentTrustEnabled())
|
||||
copts = addFlags(flags)
|
||||
|
||||
cmd.RegisterFlagCompletionFunc(
|
||||
"env",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return os.Environ(), cobra.ShellCompDirectiveNoFileComp
|
||||
},
|
||||
)
|
||||
cmd.RegisterFlagCompletionFunc(
|
||||
"env-file",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
},
|
||||
)
|
||||
cmd.RegisterFlagCompletionFunc(
|
||||
"network",
|
||||
completion.NetworkNames(dockerCli),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runRun(ctx context.Context, dockerCli command.Cli, flags *pflag.FlagSet, ropts *runOptions, copts *containerOptions) error {
|
||||
if err := validatePullOpt(ropts.pull); err != nil {
|
||||
reportError(dockerCli.Err(), "run", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
proxyConfig := dockerCli.ConfigFile().ParseProxyConfig(dockerCli.Client().DaemonHost(), opts.ConvertKVStringsToMapWithNil(copts.env.GetAll()))
|
||||
newEnv := []string{}
|
||||
for k, v := range proxyConfig {
|
||||
if v == nil {
|
||||
newEnv = append(newEnv, k)
|
||||
} else {
|
||||
newEnv = append(newEnv, k+"="+*v)
|
||||
}
|
||||
}
|
||||
copts.env = *opts.NewListOptsRef(&newEnv, nil)
|
||||
containerCfg, err := parse(flags, copts, dockerCli.ServerInfo().OSType)
|
||||
// just in case the parse does not exit
|
||||
if err != nil {
|
||||
reportError(dockerCli.Err(), "run", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
if err = validateAPIVersion(containerCfg, dockerCli.CurrentVersion()); err != nil {
|
||||
reportError(dockerCli.Err(), "run", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
return runContainer(ctx, dockerCli, ropts, copts, containerCfg)
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOptions, copts *containerOptions, containerCfg *containerConfig) error {
|
||||
config := containerCfg.Config
|
||||
stdout, stderr := dockerCli.Out(), dockerCli.Err()
|
||||
apiClient := dockerCli.Client()
|
||||
|
||||
config.ArgsEscaped = false
|
||||
|
||||
if !runOpts.detach {
|
||||
if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if copts.attach.Len() != 0 {
|
||||
return errors.New("Conflicting options: -a and -d")
|
||||
}
|
||||
|
||||
config.AttachStdin = false
|
||||
config.AttachStdout = false
|
||||
config.AttachStderr = false
|
||||
config.StdinOnce = false
|
||||
}
|
||||
|
||||
ctx, cancelFun := context.WithCancel(ctx)
|
||||
defer cancelFun()
|
||||
|
||||
containerID, err := createContainer(ctx, dockerCli, containerCfg, &runOpts.createOptions)
|
||||
if err != nil {
|
||||
reportError(stderr, "run", err.Error(), true)
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
if runOpts.sigProxy {
|
||||
sigc := notifyAllSignals()
|
||||
go ForwardAllSignals(ctx, apiClient, containerID, sigc)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
||||
var (
|
||||
waitDisplayID chan struct{}
|
||||
errCh chan error
|
||||
)
|
||||
if !config.AttachStdout && !config.AttachStderr {
|
||||
// Make this asynchronous to allow the client to write to stdin before having to read the ID
|
||||
waitDisplayID = make(chan struct{})
|
||||
go func() {
|
||||
defer close(waitDisplayID)
|
||||
_, _ = fmt.Fprintln(stdout, containerID)
|
||||
}()
|
||||
}
|
||||
attach := config.AttachStdin || config.AttachStdout || config.AttachStderr
|
||||
if attach {
|
||||
detachKeys := dockerCli.ConfigFile().DetachKeys
|
||||
if runOpts.detachKeys != "" {
|
||||
detachKeys = runOpts.detachKeys
|
||||
}
|
||||
|
||||
closeFn, err := attachContainer(ctx, dockerCli, containerID, &errCh, config, container.AttachOptions{
|
||||
Stream: true,
|
||||
Stdin: config.AttachStdin,
|
||||
Stdout: config.AttachStdout,
|
||||
Stderr: config.AttachStderr,
|
||||
DetachKeys: detachKeys,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closeFn()
|
||||
}
|
||||
|
||||
statusChan := waitExitOrRemoved(ctx, apiClient, containerID, copts.autoRemove)
|
||||
|
||||
// start the container
|
||||
if err := apiClient.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil {
|
||||
// If we have hijackedIOStreamer, we should notify
|
||||
// hijackedIOStreamer we are going to exit and wait
|
||||
// to avoid the terminal are not restored.
|
||||
if attach {
|
||||
cancelFun()
|
||||
<-errCh
|
||||
}
|
||||
|
||||
reportError(stderr, "run", err.Error(), false)
|
||||
if copts.autoRemove {
|
||||
// wait container to be removed
|
||||
<-statusChan
|
||||
}
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
|
||||
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() {
|
||||
if err := MonitorTtySize(ctx, dockerCli, containerID, false); err != nil {
|
||||
_, _ = fmt.Fprintln(stderr, "Error monitoring TTY size:", err)
|
||||
}
|
||||
}
|
||||
|
||||
if errCh != nil {
|
||||
if err := <-errCh; err != nil {
|
||||
if _, ok := err.(term.EscapeError); ok {
|
||||
// The user entered the detach escape sequence.
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Detached mode: wait for the id to be displayed and return.
|
||||
if !config.AttachStdout && !config.AttachStderr {
|
||||
// Detached mode
|
||||
<-waitDisplayID
|
||||
return nil
|
||||
}
|
||||
|
||||
status := <-statusChan
|
||||
if status != 0 {
|
||||
return cli.StatusError{StatusCode: status}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func attachContainer(ctx context.Context, dockerCli command.Cli, containerID string, errCh *chan error, config *container.Config, options container.AttachOptions) (func(), error) {
|
||||
resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options)
|
||||
if errAttach != nil {
|
||||
return nil, errAttach
|
||||
}
|
||||
|
||||
var (
|
||||
out, cerr io.Writer
|
||||
in io.ReadCloser
|
||||
)
|
||||
if options.Stdin {
|
||||
in = dockerCli.In()
|
||||
}
|
||||
if options.Stdout {
|
||||
out = dockerCli.Out()
|
||||
}
|
||||
if options.Stderr {
|
||||
if config.Tty {
|
||||
cerr = dockerCli.Out()
|
||||
} else {
|
||||
cerr = dockerCli.Err()
|
||||
}
|
||||
}
|
||||
|
||||
ch := make(chan error, 1)
|
||||
*errCh = ch
|
||||
|
||||
go func() {
|
||||
ch <- func() error {
|
||||
streamer := hijackedIOStreamer{
|
||||
streams: dockerCli,
|
||||
inputStream: in,
|
||||
outputStream: out,
|
||||
errorStream: cerr,
|
||||
resp: resp,
|
||||
tty: config.Tty,
|
||||
detachKeys: options.DetachKeys,
|
||||
}
|
||||
|
||||
if errHijack := streamer.stream(ctx); errHijack != nil {
|
||||
return errHijack
|
||||
}
|
||||
return errAttach
|
||||
}()
|
||||
}()
|
||||
return resp.Close, nil
|
||||
}
|
||||
|
||||
// reportError is a utility method that prints a user-friendly message
|
||||
// containing the error that occurred during parsing and a suggestion to get help
|
||||
func reportError(stderr io.Writer, name string, str string, withHelp bool) {
|
||||
str = strings.TrimSuffix(str, ".") + "."
|
||||
if withHelp {
|
||||
str += "\nSee 'docker " + name + " --help'."
|
||||
}
|
||||
_, _ = fmt.Fprintln(stderr, "docker:", str)
|
||||
}
|
||||
|
||||
// if container start fails with 'not found'/'no such' error, return 127
|
||||
// if container start fails with 'permission denied' error, return 126
|
||||
// return 125 for generic docker daemon failures
|
||||
func runStartContainerErr(err error) error {
|
||||
trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ")
|
||||
statusError := cli.StatusError{StatusCode: 125}
|
||||
if strings.Contains(trimmedErr, "executable file not found") ||
|
||||
strings.Contains(trimmedErr, "no such file or directory") ||
|
||||
strings.Contains(trimmedErr, "system cannot find the file specified") {
|
||||
statusError = cli.StatusError{StatusCode: 127}
|
||||
} else if strings.Contains(trimmedErr, syscall.EACCES.Error()) ||
|
||||
strings.Contains(trimmedErr, syscall.EISDIR.Error()) {
|
||||
statusError = cli.StatusError{StatusCode: 126}
|
||||
}
|
||||
|
||||
return statusError
|
||||
}
|
@@ -1,16 +1,41 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
image2 "github.com/docker/cli/cli/command/image"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/moby/sys/signal"
|
||||
"github.com/moby/term"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func waitExitOrRemoved(ctx context.Context, apiClient client.APIClient, containerID string, waitRemove bool) <-chan int {
|
||||
@@ -127,34 +152,478 @@ func legacyWaitExitOrRemoved(ctx context.Context, apiClient client.APIClient, co
|
||||
return statusChan
|
||||
}
|
||||
|
||||
func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, containerID string) error) chan error {
|
||||
if len(containers) == 0 {
|
||||
return nil
|
||||
func runLogsWaitRunning(ctx context.Context, dockerCli command.Cli, id string) error {
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
const defaultParallel int = 50
|
||||
sem := make(chan struct{}, defaultParallel)
|
||||
errChan := make(chan error)
|
||||
|
||||
// make sure result is printed in correct order
|
||||
output := map[string]chan error{}
|
||||
for _, c := range containers {
|
||||
output[c] = make(chan error, 1)
|
||||
options := container.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Follow: true,
|
||||
}
|
||||
logStream, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer logStream.Close()
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := io.MultiWriter(buf, dockerCli.Out())
|
||||
|
||||
cancel, cancelFunc := context.WithCancel(ctx)
|
||||
defer cancelFunc()
|
||||
|
||||
go func() {
|
||||
for _, c := range containers {
|
||||
err := <-output[c]
|
||||
t := time.NewTicker(time.Second)
|
||||
defer t.Stop()
|
||||
for range t.C {
|
||||
// keyword, maybe can find another way more elegant
|
||||
if strings.Contains(buf.String(), "Now you can access resources in the kubernetes cluster, enjoy it :)") {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var errChan = make(chan error)
|
||||
go func() {
|
||||
var err error
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(w, logStream)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(w, dockerCli.Err(), logStream)
|
||||
}
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for _, c := range containers {
|
||||
sem <- struct{}{} // Wait for active queue sem to drain.
|
||||
go func(container string) {
|
||||
output[container] <- op(ctx, container)
|
||||
<-sem
|
||||
}(c)
|
||||
}
|
||||
}()
|
||||
return errChan
|
||||
select {
|
||||
case err = <-errChan:
|
||||
return err
|
||||
case <-cancel.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func runLogsSinceNow(dockerCli command.Cli, id string, follow bool) error {
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := container.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Since: "0m",
|
||||
Follow: follow,
|
||||
}
|
||||
responseBody, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(dockerCli.Out(), responseBody)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func createNetwork(ctx context.Context, cli *client.Client) (string, error) {
|
||||
by := map[string]string{"owner": config.ConfigMapPodTrafficManager}
|
||||
list, _ := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
for _, resource := range list {
|
||||
if reflect.DeepEqual(resource.Labels, by) {
|
||||
return resource.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
create, err := cli.NetworkCreate(ctx, config.ConfigMapPodTrafficManager, types.NetworkCreate{
|
||||
Driver: "bridge",
|
||||
Scope: "local",
|
||||
IPAM: &network.IPAM{
|
||||
Driver: "",
|
||||
Options: nil,
|
||||
Config: []network.IPAMConfig{
|
||||
{
|
||||
Subnet: config.DockerCIDR.String(),
|
||||
Gateway: config.DockerRouterIP.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
//Options: map[string]string{"--icc": "", "--ip-masq": ""},
|
||||
Labels: by,
|
||||
})
|
||||
if err != nil {
|
||||
if errdefs.IsForbidden(err) {
|
||||
list, _ = cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
for _, resource := range list {
|
||||
if reflect.DeepEqual(resource.Labels, by) {
|
||||
return resource.ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return create.ID, nil
|
||||
}
|
||||
|
||||
// Pull constants
|
||||
const (
|
||||
PullImageAlways = "always"
|
||||
PullImageMissing = "missing" // Default (matches previous behavior)
|
||||
PullImageNever = "never"
|
||||
)
|
||||
|
||||
func pullImage(ctx context.Context, dockerCli command.Cli, img string, options RunOptions) error {
|
||||
encodedAuth, err := command.RetrieveAuthTokenFromImage(dockerCli.ConfigFile(), img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
responseBody, err := dockerCli.Client().ImageCreate(ctx, img, image.CreateOptions{
|
||||
RegistryAuth: encodedAuth,
|
||||
Platform: options.Platform,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
out := dockerCli.Err()
|
||||
return jsonmessage.DisplayJSONMessagesToStream(responseBody, streams.NewOut(out), nil)
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func createContainer(ctx context.Context, dockerCli command.Cli, runConfig *RunConfig) (string, error) {
|
||||
config := runConfig.config
|
||||
hostConfig := runConfig.hostConfig
|
||||
networkingConfig := runConfig.networkingConfig
|
||||
var (
|
||||
trustedRef reference.Canonical
|
||||
namedRef reference.Named
|
||||
)
|
||||
|
||||
ref, err := reference.ParseAnyReference(config.Image)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if named, ok := ref.(reference.Named); ok {
|
||||
namedRef = reference.TagNameOnly(named)
|
||||
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && dockerCli.ContentTrustEnabled() {
|
||||
var err error
|
||||
trustedRef, err = image2.TrustedReference(ctx, dockerCli, taggedRef)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
config.Image = reference.FamiliarString(trustedRef)
|
||||
}
|
||||
}
|
||||
|
||||
pullAndTagImage := func() error {
|
||||
if err = pullImage(ctx, dockerCli, config.Image, runConfig.Options); err != nil {
|
||||
return err
|
||||
}
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil {
|
||||
return image2.TagTrusted(ctx, dockerCli, trustedRef, taggedRef)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if runConfig.Options.Pull == PullImageAlways {
|
||||
if err = pullAndTagImage(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize()
|
||||
|
||||
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, runConfig.platform, runConfig.name)
|
||||
if err != nil {
|
||||
// Pull image if it does not exist locally and we have the PullImageMissing option. Default behavior.
|
||||
if errdefs.IsNotFound(err) && namedRef != nil && runConfig.Options.Pull == PullImageMissing {
|
||||
// we don't want to write to stdout anything apart from container.ID
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef))
|
||||
|
||||
if err = pullAndTagImage(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var retryErr error
|
||||
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, runConfig.platform, runConfig.name)
|
||||
if retryErr != nil {
|
||||
return "", retryErr
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
for _, w := range response.Warnings {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "WARNING: %s\n", w)
|
||||
}
|
||||
return response.ID, err
|
||||
}
|
||||
|
||||
func runContainer(ctx context.Context, dockerCli command.Cli, runConfig *RunConfig) error {
|
||||
config := runConfig.config
|
||||
stdout, stderr := dockerCli.Out(), dockerCli.Err()
|
||||
apiClient := dockerCli.Client()
|
||||
|
||||
config.ArgsEscaped = false
|
||||
|
||||
if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancelFun := context.WithCancel(ctx)
|
||||
defer cancelFun()
|
||||
|
||||
containerID, err := createContainer(ctx, dockerCli, runConfig)
|
||||
if err != nil {
|
||||
reportError(stderr, err.Error())
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
if runConfig.Options.SigProxy {
|
||||
sigc := notifyAllSignals()
|
||||
go ForwardAllSignals(ctx, apiClient, containerID, sigc)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
||||
var (
|
||||
waitDisplayID chan struct{}
|
||||
errCh chan error
|
||||
)
|
||||
if !config.AttachStdout && !config.AttachStderr {
|
||||
// Make this asynchronous to allow the client to write to stdin before having to read the ID
|
||||
waitDisplayID = make(chan struct{})
|
||||
go func() {
|
||||
defer close(waitDisplayID)
|
||||
_, _ = fmt.Fprintln(stdout, containerID)
|
||||
}()
|
||||
}
|
||||
attach := config.AttachStdin || config.AttachStdout || config.AttachStderr
|
||||
if attach {
|
||||
closeFn, err := attachContainer(ctx, dockerCli, containerID, &errCh, config, container.AttachOptions{
|
||||
Stream: true,
|
||||
Stdin: config.AttachStdin,
|
||||
Stdout: config.AttachStdout,
|
||||
Stderr: config.AttachStderr,
|
||||
DetachKeys: dockerCli.ConfigFile().DetachKeys,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closeFn()
|
||||
}
|
||||
|
||||
statusChan := waitExitOrRemoved(ctx, apiClient, containerID, runConfig.hostConfig.AutoRemove)
|
||||
|
||||
// start the container
|
||||
if err := apiClient.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil {
|
||||
// If we have hijackedIOStreamer, we should notify
|
||||
// hijackedIOStreamer we are going to exit and wait
|
||||
// to avoid the terminal are not restored.
|
||||
if attach {
|
||||
cancelFun()
|
||||
<-errCh
|
||||
}
|
||||
|
||||
reportError(stderr, err.Error())
|
||||
if runConfig.hostConfig.AutoRemove {
|
||||
// wait container to be removed
|
||||
<-statusChan
|
||||
}
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
|
||||
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() {
|
||||
if err := MonitorTtySize(ctx, dockerCli, containerID, false); err != nil {
|
||||
_, _ = fmt.Fprintln(stderr, "Error monitoring TTY size:", err)
|
||||
}
|
||||
}
|
||||
|
||||
if errCh != nil {
|
||||
if err := <-errCh; err != nil {
|
||||
if _, ok := err.(term.EscapeError); ok {
|
||||
// The user entered the detach escape sequence.
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Detached mode: wait for the id to be displayed and return.
|
||||
if !config.AttachStdout && !config.AttachStderr {
|
||||
// Detached mode
|
||||
<-waitDisplayID
|
||||
return nil
|
||||
}
|
||||
|
||||
status := <-statusChan
|
||||
if status != 0 {
|
||||
return cli.StatusError{StatusCode: status}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func attachContainer(ctx context.Context, dockerCli command.Cli, containerID string, errCh *chan error, config *container.Config, options container.AttachOptions) (func(), error) {
|
||||
resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options)
|
||||
if errAttach != nil {
|
||||
return nil, errAttach
|
||||
}
|
||||
|
||||
var (
|
||||
out, cerr io.Writer
|
||||
in io.ReadCloser
|
||||
)
|
||||
if options.Stdin {
|
||||
in = dockerCli.In()
|
||||
}
|
||||
if options.Stdout {
|
||||
out = dockerCli.Out()
|
||||
}
|
||||
if options.Stderr {
|
||||
if config.Tty {
|
||||
cerr = dockerCli.Out()
|
||||
} else {
|
||||
cerr = dockerCli.Err()
|
||||
}
|
||||
}
|
||||
|
||||
ch := make(chan error, 1)
|
||||
*errCh = ch
|
||||
|
||||
go func() {
|
||||
ch <- func() error {
|
||||
streamer := hijackedIOStreamer{
|
||||
streams: dockerCli,
|
||||
inputStream: in,
|
||||
outputStream: out,
|
||||
errorStream: cerr,
|
||||
resp: resp,
|
||||
tty: config.Tty,
|
||||
detachKeys: options.DetachKeys,
|
||||
}
|
||||
|
||||
if errHijack := streamer.stream(ctx); errHijack != nil {
|
||||
return errHijack
|
||||
}
|
||||
return errAttach
|
||||
}()
|
||||
}()
|
||||
return resp.Close, nil
|
||||
}
|
||||
|
||||
// reportError is a utility method that prints a user-friendly message
|
||||
// containing the error that occurred during parsing and a suggestion to get help
|
||||
func reportError(stderr io.Writer, str string) {
|
||||
str = strings.TrimSuffix(str, ".") + "."
|
||||
_, _ = fmt.Fprintln(stderr, "docker:", str)
|
||||
}
|
||||
|
||||
// if container start fails with 'not found'/'no such' error, return 127
|
||||
// if container start fails with 'permission denied' error, return 126
|
||||
// return 125 for generic docker daemon failures
|
||||
func runStartContainerErr(err error) error {
|
||||
trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ")
|
||||
statusError := cli.StatusError{StatusCode: 125, Status: trimmedErr}
|
||||
if strings.Contains(trimmedErr, "executable file not found") ||
|
||||
strings.Contains(trimmedErr, "no such file or directory") ||
|
||||
strings.Contains(trimmedErr, "system cannot find the file specified") {
|
||||
statusError = cli.StatusError{StatusCode: 127, Status: trimmedErr}
|
||||
} else if strings.Contains(trimmedErr, syscall.EACCES.Error()) ||
|
||||
strings.Contains(trimmedErr, syscall.EISDIR.Error()) {
|
||||
statusError = cli.StatusError{StatusCode: 126, Status: trimmedErr}
|
||||
}
|
||||
|
||||
return statusError
|
||||
}
|
||||
|
||||
func run(ctx context.Context, cli *client.Client, dockerCli *command.DockerCli, runConfig *RunConfig) (id string, err error) {
|
||||
rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var config = runConfig.config
|
||||
var hostConfig = runConfig.hostConfig
|
||||
var platform = runConfig.platform
|
||||
var networkConfig = runConfig.networkingConfig
|
||||
var name = runConfig.name
|
||||
|
||||
var needPull bool
|
||||
var img types.ImageInspect
|
||||
img, _, err = cli.ImageInspectWithRaw(ctx, config.Image)
|
||||
if errdefs.IsNotFound(err) {
|
||||
logrus.Infof("needs to pull image %s", config.Image)
|
||||
needPull = true
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
logrus.Errorf("image inspect failed: %v", err)
|
||||
return
|
||||
}
|
||||
if platform != nil && platform.Architecture != "" && platform.OS != "" {
|
||||
if img.Os != platform.OS || img.Architecture != platform.Architecture {
|
||||
needPull = true
|
||||
}
|
||||
}
|
||||
if needPull {
|
||||
err = util.PullImage(ctx, runConfig.platform, cli, dockerCli, config.Image, nil)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to pull image: %s, err: %s", config.Image, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var create container.CreateResponse
|
||||
create, err = cli.ContainerCreate(ctx, config, hostConfig, networkConfig, platform, name)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create container: %s, err: %s", name, err)
|
||||
return
|
||||
}
|
||||
id = create.ID
|
||||
logrus.Infof("Created container: %s", name)
|
||||
err = cli.ContainerStart(ctx, create.ID, container.StartOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to startup container %s: %v", name, err)
|
||||
return
|
||||
}
|
||||
logrus.Infof("Wait container %s to be running...", name)
|
||||
var inspect types.ContainerJSON
|
||||
ctx2, cancelFunc := context.WithCancel(ctx)
|
||||
wait.UntilWithContext(ctx2, func(ctx context.Context) {
|
||||
inspect, err = cli.ContainerInspect(ctx, create.ID)
|
||||
if errdefs.IsNotFound(err) {
|
||||
cancelFunc()
|
||||
return
|
||||
} else if err != nil {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
if inspect.State != nil && (inspect.State.Status == "exited" || inspect.State.Status == "dead" || inspect.State.Dead) {
|
||||
cancelFunc()
|
||||
err = errors.New(fmt.Sprintf("container status: %s", inspect.State.Status))
|
||||
return
|
||||
}
|
||||
if inspect.State != nil && inspect.State.Running {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
}, time.Second)
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to wait container to be ready: %v", err)
|
||||
_ = runLogsSinceNow(dockerCli, id, false)
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Infof("Container %s is running now", name)
|
||||
return
|
||||
}
|
||||
|
@@ -1,96 +0,0 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
util2 "k8s.io/kubectl/pkg/cmd/util"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func DoDev(ctx context.Context, option *Options, conf *util.SshConfig, flags *pflag.FlagSet, f util2.Factory, transferImage bool) error {
|
||||
if p := option.Options.platform; p != "" {
|
||||
_, err := platforms.Parse(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing specified platform: %v", err)
|
||||
}
|
||||
}
|
||||
client, cli, err := util.GetClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mode := container.NetworkMode(option.Copts.netMode.NetworkMode())
|
||||
if mode.IsContainer() {
|
||||
logrus.Infof("network mode container is %s", mode.ConnectedContainer())
|
||||
var inspect types.ContainerJSON
|
||||
inspect, err = client.ContainerInspect(ctx, mode.ConnectedContainer())
|
||||
if err != nil {
|
||||
logrus.Errorf("can not inspect container %s, err: %v", mode.ConnectedContainer(), err)
|
||||
return err
|
||||
}
|
||||
if inspect.State == nil {
|
||||
return fmt.Errorf("can not get container status, please make container name is valid")
|
||||
}
|
||||
if !inspect.State.Running {
|
||||
return fmt.Errorf("container %s status is %s, expect is running, please make sure your outer docker name is correct", mode.ConnectedContainer(), inspect.State.Status)
|
||||
}
|
||||
logrus.Infof("container %s is running", mode.ConnectedContainer())
|
||||
} else if mode.IsDefault() && util.RunningInContainer() {
|
||||
var hostname string
|
||||
if hostname, err = os.Hostname(); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("hostname is %s", hostname)
|
||||
err = option.Copts.netMode.Set(fmt.Sprintf("container:%s", hostname))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = validatePullOpt(option.Options.pull)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proxyConfig := cli.ConfigFile().ParseProxyConfig(cli.Client().DaemonHost(), opts.ConvertKVStringsToMapWithNil(option.Copts.env.GetAll()))
|
||||
var newEnv []string
|
||||
for k, v := range proxyConfig {
|
||||
if v == nil {
|
||||
newEnv = append(newEnv, k)
|
||||
} else {
|
||||
newEnv = append(newEnv, fmt.Sprintf("%s=%s", k, *v))
|
||||
}
|
||||
}
|
||||
option.Copts.env = *opts.NewListOptsRef(&newEnv, nil)
|
||||
var c *containerConfig
|
||||
c, err = parse(flags, option.Copts, cli.ServerInfo().OSType)
|
||||
// just in case the parse does not exit
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = validateAPIVersion(c, cli.Client().ClientVersion())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// connect to cluster, in container or host
|
||||
cancel, err := option.connect(ctx, f, conf, transferImage, c)
|
||||
defer func() {
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
logrus.Errorf("connect to cluster failed, err: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return option.Main(ctx, c)
|
||||
}
|
109
pkg/dev/merge.go
109
pkg/dev/merge.go
@@ -1,109 +0,0 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
// 这里的逻辑是找到指定的容器。然后以传入的参数 tempContainerConfig 为准。即也就是用户命令行指定的参数为准。
|
||||
// 然后附加上 deployment 中原本的声明
|
||||
func mergeDockerOptions(r ConfigList, copts *Options, tempContainerConfig *containerConfig) {
|
||||
if copts.ContainerName != "" {
|
||||
var index = -1
|
||||
for i, config := range r {
|
||||
if config.k8sContainerName == copts.ContainerName {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if index != -1 {
|
||||
r[0], r[index] = r[index], r[0]
|
||||
}
|
||||
}
|
||||
|
||||
config := r[0]
|
||||
config.Options = copts.Options
|
||||
config.Copts = copts.Copts
|
||||
|
||||
if copts.DevImage != "" {
|
||||
config.config.Image = copts.DevImage
|
||||
}
|
||||
if copts.Options.name != "" {
|
||||
config.containerName = copts.Options.name
|
||||
} else {
|
||||
config.Options.name = config.containerName
|
||||
}
|
||||
if copts.Options.platform != "" {
|
||||
p, _ := platforms.Parse(copts.Options.platform)
|
||||
config.platform = &p
|
||||
}
|
||||
|
||||
tempContainerConfig.HostConfig.CapAdd = append(tempContainerConfig.HostConfig.CapAdd, config.hostConfig.CapAdd...)
|
||||
tempContainerConfig.HostConfig.SecurityOpt = append(tempContainerConfig.HostConfig.SecurityOpt, config.hostConfig.SecurityOpt...)
|
||||
tempContainerConfig.HostConfig.VolumesFrom = append(tempContainerConfig.HostConfig.VolumesFrom, config.hostConfig.VolumesFrom...)
|
||||
tempContainerConfig.HostConfig.DNS = append(tempContainerConfig.HostConfig.DNS, config.hostConfig.DNS...)
|
||||
tempContainerConfig.HostConfig.DNSOptions = append(tempContainerConfig.HostConfig.DNSOptions, config.hostConfig.DNSOptions...)
|
||||
tempContainerConfig.HostConfig.DNSSearch = append(tempContainerConfig.HostConfig.DNSSearch, config.hostConfig.DNSSearch...)
|
||||
tempContainerConfig.HostConfig.Mounts = append(tempContainerConfig.HostConfig.Mounts, config.hostConfig.Mounts...)
|
||||
for port, bindings := range config.hostConfig.PortBindings {
|
||||
if v, ok := tempContainerConfig.HostConfig.PortBindings[port]; ok {
|
||||
tempContainerConfig.HostConfig.PortBindings[port] = append(v, bindings...)
|
||||
} else {
|
||||
tempContainerConfig.HostConfig.PortBindings[port] = bindings
|
||||
}
|
||||
}
|
||||
|
||||
config.hostConfig = tempContainerConfig.HostConfig
|
||||
config.networkingConfig.EndpointsConfig = util.Merge[string, *network.EndpointSettings](tempContainerConfig.NetworkingConfig.EndpointsConfig, config.networkingConfig.EndpointsConfig)
|
||||
|
||||
c := tempContainerConfig.Config
|
||||
var entrypoint = config.config.Entrypoint
|
||||
var args = config.config.Cmd
|
||||
// if special --entrypoint, then use it
|
||||
if len(c.Entrypoint) != 0 {
|
||||
entrypoint = c.Entrypoint
|
||||
args = c.Cmd
|
||||
}
|
||||
if len(c.Cmd) != 0 {
|
||||
args = c.Cmd
|
||||
}
|
||||
c.Entrypoint = entrypoint
|
||||
c.Cmd = args
|
||||
c.Env = append(config.config.Env, c.Env...)
|
||||
c.Image = config.config.Image
|
||||
if c.User == "" {
|
||||
c.User = config.config.User
|
||||
}
|
||||
c.Labels = util.Merge[string, string](config.config.Labels, c.Labels)
|
||||
c.Volumes = util.Merge[string, struct{}](c.Volumes, config.config.Volumes)
|
||||
if c.WorkingDir == "" {
|
||||
c.WorkingDir = config.config.WorkingDir
|
||||
}
|
||||
for k, v := range config.config.ExposedPorts {
|
||||
if _, found := c.ExposedPorts[k]; !found {
|
||||
c.ExposedPorts[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
var hosts []string
|
||||
for _, domain := range copts.ExtraRouteInfo.ExtraDomain {
|
||||
ips, err := net.LookupIP(domain)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if ip.To4() != nil {
|
||||
hosts = append(hosts, fmt.Sprintf("%s:%s", domain, ip.To4().String()))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
config.hostConfig.ExtraHosts = hosts
|
||||
|
||||
config.config = c
|
||||
}
|
@@ -1,14 +1,11 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -16,26 +13,23 @@ import (
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
typescontainer "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/google/uuid"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
pkgerr "github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/polymorphichelpers"
|
||||
"k8s.io/kubectl/pkg/util/interrupt"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
@@ -58,7 +52,6 @@ type Options struct {
|
||||
Headers map[string]string
|
||||
Namespace string
|
||||
Workload string
|
||||
Factory cmdutil.Factory
|
||||
ContainerName string
|
||||
NoProxy bool
|
||||
ExtraRouteInfo handler.ExtraRouteInfo
|
||||
@@ -67,34 +60,159 @@ type Options struct {
|
||||
|
||||
// docker options
|
||||
DevImage string
|
||||
Options runOptions
|
||||
Copts *containerOptions
|
||||
|
||||
RunOptions RunOptions
|
||||
ContainerOptions *ContainerOptions
|
||||
|
||||
// inner
|
||||
Cli *client.Client
|
||||
DockerCli *command.DockerCli
|
||||
cli *client.Client
|
||||
dockerCli *command.DockerCli
|
||||
|
||||
factory cmdutil.Factory
|
||||
clientset *kubernetes.Clientset
|
||||
restclient *rest.RESTClient
|
||||
config *rest.Config
|
||||
|
||||
// rollback
|
||||
rollbackFuncList []func() error
|
||||
}
|
||||
|
||||
func (option *Options) Main(ctx context.Context, c *containerConfig) error {
|
||||
rand.NewSource(time.Now().UnixNano())
|
||||
object, err := util.GetUnstructuredObject(option.Factory, option.Namespace, option.Workload)
|
||||
if err != nil {
|
||||
log.Errorf("get unstructured object error: %v", err)
|
||||
return err
|
||||
func (option *Options) Main(ctx context.Context, sshConfig *util.SshConfig, flags *pflag.FlagSet, transferImage bool) error {
|
||||
mode := typescontainer.NetworkMode(option.ContainerOptions.netMode.NetworkMode())
|
||||
if mode.IsContainer() {
|
||||
log.Infof("network mode container is %s", mode.ConnectedContainer())
|
||||
inspect, err := option.cli.ContainerInspect(ctx, mode.ConnectedContainer())
|
||||
if err != nil {
|
||||
log.Errorf("can not inspect container %s, err: %v", mode.ConnectedContainer(), err)
|
||||
return err
|
||||
}
|
||||
if inspect.State == nil {
|
||||
return fmt.Errorf("can not get container status, please make container name is valid")
|
||||
}
|
||||
if !inspect.State.Running {
|
||||
return fmt.Errorf("container %s status is %s, expect is running, please make sure your outer docker name is correct", mode.ConnectedContainer(), inspect.State.Status)
|
||||
}
|
||||
log.Infof("container %s is running", mode.ConnectedContainer())
|
||||
} else if mode.IsDefault() && util.RunningInContainer() {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("hostname is %s", hostname)
|
||||
err = option.ContainerOptions.netMode.Set(fmt.Sprintf("container:%s", hostname))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
u := object.Object.(*unstructured.Unstructured)
|
||||
var templateSpec *v1.PodTemplateSpec
|
||||
//var path []string
|
||||
templateSpec, _, err = util.GetPodTemplateSpecPath(u)
|
||||
config, hostConfig, err := Parse(flags, option.ContainerOptions)
|
||||
// just in case the Parse does not exit
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientSet, err := option.Factory.KubernetesClientSet()
|
||||
// Connect to cluster, in container or host
|
||||
err = option.Connect(ctx, sshConfig, transferImage, hostConfig.PortBindings)
|
||||
if err != nil {
|
||||
log.Errorf("Connect to cluster failed, err: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return option.Dev(ctx, config, hostConfig)
|
||||
}
|
||||
|
||||
// Connect to cluster network on docker container or host
|
||||
func (option *Options) Connect(ctx context.Context, sshConfig *util.SshConfig, transferImage bool, portBindings nat.PortMap) error {
|
||||
switch option.ConnectMode {
|
||||
case ConnectModeHost:
|
||||
daemonCli := daemon.GetClient(false)
|
||||
if daemonCli == nil {
|
||||
return fmt.Errorf("get nil daemon client")
|
||||
}
|
||||
kubeConfigBytes, ns, err := util.ConvertToKubeConfigBytes(option.factory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logLevel := log.ErrorLevel
|
||||
if config.Debug {
|
||||
logLevel = log.DebugLevel
|
||||
}
|
||||
// not needs to ssh jump in daemon, because dev mode will hang up until user exit,
|
||||
// so just ssh jump in client is enough
|
||||
req := &rpc.ConnectRequest{
|
||||
KubeconfigBytes: string(kubeConfigBytes),
|
||||
Namespace: ns,
|
||||
Headers: option.Headers,
|
||||
Workloads: []string{option.Workload},
|
||||
ExtraRoute: option.ExtraRouteInfo.ToRPC(),
|
||||
Engine: string(option.Engine),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(option.factory),
|
||||
TransferImage: transferImage,
|
||||
Image: config.Image,
|
||||
Level: int32(logLevel),
|
||||
SshJump: sshConfig.ToRPC(),
|
||||
}
|
||||
if option.NoProxy {
|
||||
req.Workloads = nil
|
||||
}
|
||||
option.AddRollbackFunc(func() error {
|
||||
_ = disconnect(ctx, daemonCli, &rpc.DisconnectRequest{
|
||||
KubeconfigBytes: ptr.To(string(kubeConfigBytes)),
|
||||
Namespace: ptr.To(ns),
|
||||
SshJump: sshConfig.ToRPC(),
|
||||
})
|
||||
return nil
|
||||
})
|
||||
var resp rpc.Daemon_ConnectClient
|
||||
resp, err = daemonCli.Proxy(ctx, req)
|
||||
if err != nil {
|
||||
log.Errorf("Connect to cluster error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
for {
|
||||
resp, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprint(os.Stdout, resp.Message)
|
||||
}
|
||||
|
||||
case ConnectModeContainer:
|
||||
runConfig, err := option.CreateConnectContainer(portBindings)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var id string
|
||||
log.Infof("starting container connect to cluster")
|
||||
id, err = run(ctx, option.cli, option.dockerCli, runConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
option.AddRollbackFunc(func() error {
|
||||
_ = option.cli.ContainerKill(context.Background(), id, "SIGTERM")
|
||||
_ = runLogsSinceNow(option.dockerCli, id, true)
|
||||
return nil
|
||||
})
|
||||
err = runLogsWaitRunning(ctx, option.dockerCli, id)
|
||||
if err != nil {
|
||||
// interrupt by signal KILL
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
log.Infof("container connect to cluster successfully")
|
||||
err = option.ContainerOptions.netMode.Set(fmt.Sprintf("container:%s", id))
|
||||
return err
|
||||
default:
|
||||
return fmt.Errorf("unsupport connect mode: %s", option.ConnectMode)
|
||||
}
|
||||
}
|
||||
|
||||
func (option *Options) Dev(ctx context.Context, cConfig *Config, hostConfig *HostConfig) error {
|
||||
templateSpec, err := option.GetPodTemplateSpec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -109,20 +227,18 @@ func (option *Options) Main(ctx context.Context, c *containerConfig) error {
|
||||
return sort.Reverse(podutils.ActivePods(pods))
|
||||
}
|
||||
label := labels.SelectorFromSet(templateSpec.Labels).String()
|
||||
firstPod, _, err := polymorphichelpers.GetFirstPod(clientSet.CoreV1(), option.Namespace, label, time.Second*5, sortBy)
|
||||
firstPod, _, err := polymorphichelpers.GetFirstPod(option.clientset.CoreV1(), option.Namespace, label, time.Second*5, sortBy)
|
||||
if err != nil {
|
||||
log.Errorf("get first running pod from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
pod := firstPod.Name
|
||||
|
||||
env, err := util.GetEnv(ctx, option.Factory, option.Namespace, pod)
|
||||
env, err := util.GetEnv(ctx, option.clientset, option.config, option.Namespace, firstPod.Name)
|
||||
if err != nil {
|
||||
log.Errorf("get env from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
volume, err := util.GetVolume(ctx, option.Factory, option.Namespace, pod)
|
||||
volume, err := util.GetVolume(ctx, option.factory, option.Namespace, firstPod.Name)
|
||||
if err != nil {
|
||||
log.Errorf("get volume from k8s: %v", err)
|
||||
return err
|
||||
@@ -130,24 +246,37 @@ func (option *Options) Main(ctx context.Context, c *containerConfig) error {
|
||||
option.AddRollbackFunc(func() error {
|
||||
return util.RemoveDir(volume)
|
||||
})
|
||||
dns, err := util.GetDNS(ctx, option.Factory, option.Namespace, pod)
|
||||
dns, err := util.GetDNS(ctx, option.clientset, option.config, option.Namespace, firstPod.Name)
|
||||
if err != nil {
|
||||
log.Errorf("get dns from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
inject.RemoveContainers(templateSpec)
|
||||
list := convertKubeResourceToContainer(option.Namespace, *templateSpec, env, volume, dns)
|
||||
mergeDockerOptions(list, option, c)
|
||||
mode := container.NetworkMode(option.Copts.netMode.NetworkMode())
|
||||
if len(option.Copts.netMode.Value()) != 0 {
|
||||
log.Infof("network mode is %s", option.Copts.netMode.NetworkMode())
|
||||
for _, runConfig := range list[:] {
|
||||
if option.ContainerName != "" {
|
||||
var index = -1
|
||||
for i, c := range templateSpec.Spec.Containers {
|
||||
if option.ContainerName == c.Name {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if index != -1 {
|
||||
templateSpec.Spec.Containers[0], templateSpec.Spec.Containers[index] = templateSpec.Spec.Containers[index], templateSpec.Spec.Containers[0]
|
||||
}
|
||||
}
|
||||
configList := ConvertPodToContainer(option.Namespace, *templateSpec, env, volume, dns)
|
||||
MergeDockerOptions(configList, option, cConfig, hostConfig)
|
||||
|
||||
mode := container.NetworkMode(option.ContainerOptions.netMode.NetworkMode())
|
||||
if len(option.ContainerOptions.netMode.Value()) != 0 {
|
||||
log.Infof("network mode is %s", option.ContainerOptions.netMode.NetworkMode())
|
||||
for _, runConfig := range configList[:] {
|
||||
// remove expose port
|
||||
runConfig.config.ExposedPorts = nil
|
||||
runConfig.hostConfig.NetworkMode = mode
|
||||
if mode.IsContainer() {
|
||||
runConfig.hostConfig.PidMode = typescontainer.PidMode(option.Copts.netMode.NetworkMode())
|
||||
runConfig.hostConfig.PidMode = typescontainer.PidMode(option.ContainerOptions.netMode.NetworkMode())
|
||||
}
|
||||
runConfig.hostConfig.PortBindings = nil
|
||||
|
||||
@@ -160,19 +289,19 @@ func (option *Options) Main(ctx context.Context, c *containerConfig) error {
|
||||
}
|
||||
} else {
|
||||
var networkID string
|
||||
networkID, err = createKubevpnNetwork(ctx, option.Cli)
|
||||
networkID, err = createNetwork(ctx, option.cli)
|
||||
if err != nil {
|
||||
log.Errorf("create network for %s: %v", option.Workload, err)
|
||||
return err
|
||||
}
|
||||
log.Infof("create docker network %s", networkID)
|
||||
|
||||
list[len(list)-1].networkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{
|
||||
list[len(list)-1].containerName: {NetworkID: networkID},
|
||||
configList[len(configList)-1].networkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{
|
||||
configList[len(configList)-1].name: {NetworkID: networkID},
|
||||
}
|
||||
var portMap = nat.PortMap{}
|
||||
var portSet = nat.PortSet{}
|
||||
for _, runConfig := range list {
|
||||
for _, runConfig := range configList {
|
||||
for k, v := range runConfig.hostConfig.PortBindings {
|
||||
if oldValue, ok := portMap[k]; ok {
|
||||
portMap[k] = append(oldValue, v...)
|
||||
@@ -184,15 +313,15 @@ func (option *Options) Main(ctx context.Context, c *containerConfig) error {
|
||||
portSet[k] = v
|
||||
}
|
||||
}
|
||||
list[len(list)-1].hostConfig.PortBindings = portMap
|
||||
list[len(list)-1].config.ExposedPorts = portSet
|
||||
configList[len(configList)-1].hostConfig.PortBindings = portMap
|
||||
configList[len(configList)-1].config.ExposedPorts = portSet
|
||||
|
||||
// skip last, use last container network
|
||||
for _, runConfig := range list[:len(list)-1] {
|
||||
for _, runConfig := range configList[:len(configList)-1] {
|
||||
// remove expose port
|
||||
runConfig.config.ExposedPorts = nil
|
||||
runConfig.hostConfig.NetworkMode = typescontainer.NetworkMode("container:" + list[len(list)-1].containerName)
|
||||
runConfig.hostConfig.PidMode = typescontainer.PidMode("container:" + list[len(list)-1].containerName)
|
||||
runConfig.hostConfig.NetworkMode = typescontainer.NetworkMode("container:" + configList[len(configList)-1].name)
|
||||
runConfig.hostConfig.PidMode = typescontainer.PidMode("container:" + configList[len(configList)-1].name)
|
||||
runConfig.hostConfig.PortBindings = nil
|
||||
|
||||
// remove dns
|
||||
@@ -205,435 +334,112 @@ func (option *Options) Main(ctx context.Context, c *containerConfig) error {
|
||||
}
|
||||
|
||||
option.AddRollbackFunc(func() error {
|
||||
return list.Remove(ctx, option.Cli)
|
||||
return configList.Remove(ctx, option.cli)
|
||||
})
|
||||
return list.Run(ctx, volume, option.Cli, option.DockerCli)
|
||||
return configList.Run(ctx, volume, option.cli, option.dockerCli)
|
||||
}
|
||||
|
||||
// connect to cluster network on docker container or host
|
||||
func (option *Options) connect(ctx context.Context, f cmdutil.Factory, conf *util.SshConfig, transferImage bool, c *containerConfig) (func(), error) {
|
||||
connect := &handler.ConnectOptions{
|
||||
Headers: option.Headers,
|
||||
Workloads: []string{option.Workload},
|
||||
ExtraRouteInfo: option.ExtraRouteInfo,
|
||||
Engine: option.Engine,
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
func disconnect(ctx context.Context, daemonClient rpc.DaemonClient, req *rpc.DisconnectRequest) error {
|
||||
resp, err := daemonClient.Disconnect(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := connect.InitClient(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
option.Namespace = connect.Namespace
|
||||
|
||||
if err := connect.PreCheckResource(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(connect.Workloads) > 1 {
|
||||
return nil, fmt.Errorf("can only dev one workloads at same time, workloads: %v", connect.Workloads)
|
||||
}
|
||||
if len(connect.Workloads) < 1 {
|
||||
return nil, fmt.Errorf("you must provide resource to dev, workloads : %v is invaild", connect.Workloads)
|
||||
}
|
||||
option.Workload = connect.Workloads[0]
|
||||
|
||||
// if no-proxy is true, not needs to intercept traffic
|
||||
if option.NoProxy {
|
||||
if len(connect.Headers) != 0 {
|
||||
return nil, fmt.Errorf("not needs to provide headers if is no-proxy mode")
|
||||
}
|
||||
connect.Workloads = []string{}
|
||||
}
|
||||
|
||||
switch option.ConnectMode {
|
||||
case ConnectModeHost:
|
||||
daemonCli := daemon.GetClient(false)
|
||||
if daemonCli == nil {
|
||||
return nil, fmt.Errorf("get nil daemon client")
|
||||
}
|
||||
kubeConfigBytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logLevel := log.ErrorLevel
|
||||
if config.Debug {
|
||||
logLevel = log.DebugLevel
|
||||
}
|
||||
// not needs to ssh jump in daemon, because dev mode will hang up until user exit,
|
||||
// so just ssh jump in client is enough
|
||||
req := &rpc.ConnectRequest{
|
||||
KubeconfigBytes: string(kubeConfigBytes),
|
||||
Namespace: ns,
|
||||
Headers: connect.Headers,
|
||||
Workloads: connect.Workloads,
|
||||
ExtraRoute: connect.ExtraRouteInfo.ToRPC(),
|
||||
Engine: string(connect.Engine),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
TransferImage: transferImage,
|
||||
Image: config.Image,
|
||||
Level: int32(logLevel),
|
||||
SshJump: conf.ToRPC(),
|
||||
}
|
||||
cancel := disconnect(ctx, daemonCli, &rpc.LeaveRequest{Workloads: connect.Workloads}, &rpc.DisconnectRequest{
|
||||
KubeconfigBytes: ptr.To(string(kubeConfigBytes)),
|
||||
Namespace: ptr.To(ns),
|
||||
SshJump: conf.ToRPC(),
|
||||
})
|
||||
var resp rpc.Daemon_ConnectClient
|
||||
resp, err = daemonCli.Proxy(ctx, req)
|
||||
if err != nil {
|
||||
log.Errorf("connect to cluster error: %s", err.Error())
|
||||
return cancel, err
|
||||
}
|
||||
for {
|
||||
response, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
return cancel, nil
|
||||
} else if err != nil {
|
||||
return cancel, err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, response.Message)
|
||||
}
|
||||
|
||||
case ConnectModeContainer:
|
||||
port, set, err := option.GetExposePort(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var path = os.Getenv(config.EnvSSHJump)
|
||||
if path != "" {
|
||||
path, err = util.ConvertK8sApiServerToDomain(path)
|
||||
} else {
|
||||
path, err = util.GetKubeconfigPath(connect.GetFactory())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var platform specs.Platform
|
||||
if option.Options.platform != "" {
|
||||
platform, err = platforms.Parse(option.Options.platform)
|
||||
if err != nil {
|
||||
return nil, pkgerr.Wrap(err, "error parsing specified platform")
|
||||
}
|
||||
}
|
||||
|
||||
var connectContainer *RunConfig
|
||||
connectContainer, err = createConnectContainer(option.NoProxy, *connect, path, option.Cli, &platform, port, set)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cancelCtx, cancelFunc := context.WithCancel(ctx)
|
||||
defer cancelFunc()
|
||||
var id string
|
||||
log.Infof("starting container connect to cluster")
|
||||
id, err = run(cancelCtx, connectContainer, option.Cli, option.DockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h := interrupt.New(
|
||||
func(signal os.Signal) { return },
|
||||
func() {
|
||||
cancelFunc()
|
||||
_ = option.Cli.ContainerKill(context.Background(), id, "SIGTERM")
|
||||
_ = runLogsSinceNow(option.DockerCli, id, true)
|
||||
},
|
||||
)
|
||||
go h.Run(func() error { select {} })
|
||||
option.AddRollbackFunc(func() error {
|
||||
h.Close()
|
||||
for {
|
||||
recv, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
})
|
||||
err = runLogsWaitRunning(cancelCtx, option.DockerCli, id)
|
||||
if err != nil {
|
||||
// interrupt by signal KILL
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("container connect to cluster successfully")
|
||||
err = option.Copts.netMode.Set(fmt.Sprintf("container:%s", id))
|
||||
_, _ = fmt.Fprint(os.Stdout, recv.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func (option *Options) CreateConnectContainer(portBindings nat.PortMap) (*RunConfig, error) {
|
||||
portMap, portSet, err := option.GetExposePort(portBindings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupport connect mode: %s", option.ConnectMode)
|
||||
}
|
||||
}
|
||||
|
||||
func disconnect(ctx context.Context, daemonClient rpc.DaemonClient, leaveReq *rpc.LeaveRequest, req *rpc.DisconnectRequest) func() {
|
||||
return func() {
|
||||
resp, err := daemonClient.Leave(ctx, leaveReq)
|
||||
if err == nil {
|
||||
for {
|
||||
msg, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
log.Errorf("leave resource %s error: %v", strings.Join(leaveReq.Workloads, " "), err)
|
||||
break
|
||||
}
|
||||
fmt.Fprint(os.Stdout, msg.Message)
|
||||
}
|
||||
}
|
||||
resp1, err := daemonClient.Disconnect(ctx, req)
|
||||
if err != nil {
|
||||
log.Errorf("disconnect error: %v", err)
|
||||
return
|
||||
}
|
||||
for {
|
||||
msg, err := resp1.Recv()
|
||||
if err == io.EOF {
|
||||
return
|
||||
} else if err != nil {
|
||||
log.Errorf("disconnect error: %v", err)
|
||||
return
|
||||
}
|
||||
fmt.Fprint(os.Stdout, msg.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createConnectContainer(noProxy bool, connect handler.ConnectOptions, path string, cli *client.Client, platform *specs.Platform, port nat.PortMap, set nat.PortSet) (*RunConfig, error) {
|
||||
var entrypoint []string
|
||||
if noProxy {
|
||||
entrypoint = []string{"kubevpn", "connect", "--foreground", "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image, "--engine", string(connect.Engine)}
|
||||
var kubeconfigPath = os.Getenv(config.EnvSSHJump)
|
||||
if kubeconfigPath != "" {
|
||||
kubeconfigPath, err = util.ConvertK8sApiServerToDomain(kubeconfigPath)
|
||||
} else {
|
||||
entrypoint = []string{"kubevpn", "proxy", connect.Workloads[0], "--foreground", "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image, "--engine", string(connect.Engine)}
|
||||
for k, v := range connect.Headers {
|
||||
kubeconfigPath, err = util.GetKubeconfigPath(option.factory)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var entrypoint []string
|
||||
if option.NoProxy {
|
||||
entrypoint = []string{"kubevpn", "connect", "--foreground", "-n", option.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image, "--engine", string(option.Engine)}
|
||||
} else {
|
||||
entrypoint = []string{"kubevpn", "proxy", option.Workload, "--foreground", "-n", option.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image, "--engine", string(option.Engine)}
|
||||
for k, v := range option.Headers {
|
||||
entrypoint = append(entrypoint, "--headers", fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
}
|
||||
for _, v := range connect.ExtraRouteInfo.ExtraCIDR {
|
||||
for _, v := range option.ExtraRouteInfo.ExtraCIDR {
|
||||
entrypoint = append(entrypoint, "--extra-cidr", v)
|
||||
}
|
||||
for _, v := range connect.ExtraRouteInfo.ExtraDomain {
|
||||
for _, v := range option.ExtraRouteInfo.ExtraDomain {
|
||||
entrypoint = append(entrypoint, "--extra-domain", v)
|
||||
}
|
||||
if connect.ExtraRouteInfo.ExtraNodeIP {
|
||||
if option.ExtraRouteInfo.ExtraNodeIP {
|
||||
entrypoint = append(entrypoint, "--extra-node-ip")
|
||||
}
|
||||
|
||||
runConfig := &container.Config{
|
||||
User: "root",
|
||||
AttachStdin: false,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
ExposedPorts: set,
|
||||
StdinOnce: false,
|
||||
Env: []string{},
|
||||
Cmd: []string{},
|
||||
Healthcheck: nil,
|
||||
ArgsEscaped: false,
|
||||
Image: config.Image,
|
||||
Volumes: nil,
|
||||
Entrypoint: entrypoint,
|
||||
NetworkDisabled: false,
|
||||
MacAddress: "",
|
||||
OnBuild: nil,
|
||||
StopSignal: "",
|
||||
StopTimeout: nil,
|
||||
Shell: nil,
|
||||
User: "root",
|
||||
ExposedPorts: portSet,
|
||||
Env: []string{},
|
||||
Cmd: []string{},
|
||||
Healthcheck: nil,
|
||||
Image: config.Image,
|
||||
Entrypoint: entrypoint,
|
||||
}
|
||||
hostConfig := &container.HostConfig{
|
||||
Binds: []string{fmt.Sprintf("%s:%s", path, "/root/.kube/config")},
|
||||
Binds: []string{fmt.Sprintf("%s:%s", kubeconfigPath, "/root/.kube/config")},
|
||||
LogConfig: container.LogConfig{},
|
||||
PortBindings: port,
|
||||
PortBindings: portMap,
|
||||
AutoRemove: true,
|
||||
Privileged: true,
|
||||
RestartPolicy: container.RestartPolicy{},
|
||||
AutoRemove: false,
|
||||
VolumeDriver: "",
|
||||
VolumesFrom: nil,
|
||||
ConsoleSize: [2]uint{},
|
||||
CapAdd: strslice.StrSlice{"SYS_PTRACE", "SYS_ADMIN"}, // for dlv
|
||||
CgroupnsMode: "",
|
||||
// https://stackoverflow.com/questions/24319662/from-inside-of-a-docker-container-how-do-i-connect-to-the-localhost-of-the-mach
|
||||
// couldn't get current server API group list: Get "https://host.docker.internal:62844/api?timeout=32s": tls: failed to verify certificate: x509: certificate is valid for kubernetes.default.svc.cluster.local, kubernetes.default.svc, kubernetes.default, kubernetes, istio-sidecar-injector.istio-system.svc, proxy-exporter.kube-system.svc, not host.docker.internal
|
||||
ExtraHosts: []string{"host.docker.internal:host-gateway", "kubernetes:host-gateway"},
|
||||
GroupAdd: nil,
|
||||
IpcMode: "",
|
||||
Cgroup: "",
|
||||
Links: nil,
|
||||
OomScoreAdj: 0,
|
||||
PidMode: "",
|
||||
Privileged: true,
|
||||
PublishAllPorts: false,
|
||||
ReadonlyRootfs: false,
|
||||
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
|
||||
StorageOpt: nil,
|
||||
Tmpfs: nil,
|
||||
UTSMode: "",
|
||||
UsernsMode: "",
|
||||
ShmSize: 0,
|
||||
Sysctls: map[string]string{"net.ipv6.conf.all.disable_ipv6": strconv.Itoa(0)},
|
||||
Runtime: "",
|
||||
Isolation: "",
|
||||
Resources: container.Resources{},
|
||||
MaskedPaths: nil,
|
||||
ReadonlyPaths: nil,
|
||||
Init: nil,
|
||||
ExtraHosts: []string{"host.docker.internal:host-gateway", "kubernetes:host-gateway"},
|
||||
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
|
||||
Sysctls: map[string]string{"net.ipv6.conf.all.disable_ipv6": strconv.Itoa(0)},
|
||||
Resources: container.Resources{},
|
||||
}
|
||||
var suffix string
|
||||
if newUUID, err := uuid.NewUUID(); err == nil {
|
||||
suffix = strings.ReplaceAll(newUUID.String(), "-", "")[:5]
|
||||
}
|
||||
networkID, err := createKubevpnNetwork(context.Background(), cli)
|
||||
newUUID, err := uuid.NewUUID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name := fmt.Sprintf("%s_%s_%s", "kubevpn", "local", suffix)
|
||||
suffix := strings.ReplaceAll(newUUID.String(), "-", "")[:5]
|
||||
name := util.Join(option.Namespace, "kubevpn", suffix)
|
||||
networkID, err := createNetwork(context.Background(), option.cli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var platform *specs.Platform
|
||||
if option.RunOptions.Platform != "" {
|
||||
plat, _ := platforms.Parse(option.RunOptions.Platform)
|
||||
platform = &plat
|
||||
}
|
||||
c := &RunConfig{
|
||||
config: runConfig,
|
||||
hostConfig: hostConfig,
|
||||
networkingConfig: &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{name: {
|
||||
NetworkID: networkID,
|
||||
}},
|
||||
},
|
||||
config: runConfig,
|
||||
hostConfig: hostConfig,
|
||||
networkingConfig: &network.NetworkingConfig{EndpointsConfig: map[string]*network.EndpointSettings{name: {NetworkID: networkID}}},
|
||||
platform: platform,
|
||||
containerName: name,
|
||||
k8sContainerName: name,
|
||||
name: name,
|
||||
Options: RunOptions{Pull: PullImageMissing},
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func runLogsWaitRunning(ctx context.Context, dockerCli command.Cli, container string) error {
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := typescontainer.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Follow: true,
|
||||
}
|
||||
logStream, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer logStream.Close()
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := io.MultiWriter(buf, dockerCli.Out())
|
||||
|
||||
cancel, cancelFunc := context.WithCancel(ctx)
|
||||
defer cancelFunc()
|
||||
|
||||
go func() {
|
||||
t := time.NewTicker(time.Second)
|
||||
defer t.Stop()
|
||||
for range t.C {
|
||||
// keyword, maybe can find another way more elegant
|
||||
if strings.Contains(buf.String(), "dns service ok") {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var errChan = make(chan error)
|
||||
go func() {
|
||||
var err error
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(w, logStream)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(w, dockerCli.Err(), logStream)
|
||||
}
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case err = <-errChan:
|
||||
return err
|
||||
case <-cancel.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func runLogsSinceNow(dockerCli command.Cli, container string, follow bool) error {
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := typescontainer.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Since: "0m",
|
||||
Follow: follow,
|
||||
}
|
||||
responseBody, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(dockerCli.Out(), responseBody)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func runKill(dockerCli command.Cli, containers ...string) error {
|
||||
var errs []string
|
||||
ctx := context.Background()
|
||||
errChan := parallelOperation(ctx, append([]string{}, containers...), func(ctx context.Context, container string) error {
|
||||
return dockerCli.Client().ContainerKill(ctx, container, "SIGTERM")
|
||||
})
|
||||
for _, name := range containers {
|
||||
if err := <-errChan; err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
} else {
|
||||
fmt.Fprintln(dockerCli.Out(), name)
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createKubevpnNetwork(ctx context.Context, cli *client.Client) (string, error) {
|
||||
by := map[string]string{"owner": config.ConfigMapPodTrafficManager}
|
||||
list, _ := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
for _, resource := range list {
|
||||
if reflect.DeepEqual(resource.Labels, by) {
|
||||
return resource.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
create, err := cli.NetworkCreate(ctx, config.ConfigMapPodTrafficManager, types.NetworkCreate{
|
||||
Driver: "bridge",
|
||||
Scope: "local",
|
||||
IPAM: &network.IPAM{
|
||||
Driver: "",
|
||||
Options: nil,
|
||||
Config: []network.IPAMConfig{
|
||||
{
|
||||
Subnet: config.DockerCIDR.String(),
|
||||
Gateway: config.DockerRouterIP.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
//Options: map[string]string{"--icc": "", "--ip-masq": ""},
|
||||
Labels: by,
|
||||
})
|
||||
if err != nil {
|
||||
if errdefs.IsForbidden(err) {
|
||||
list, _ = cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
for _, resource := range list {
|
||||
if reflect.DeepEqual(resource.Labels, by) {
|
||||
return resource.ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return create.ID, nil
|
||||
}
|
||||
|
||||
func (option *Options) AddRollbackFunc(f func() error) {
|
||||
option.rollbackFuncList = append(option.rollbackFuncList, f)
|
||||
}
|
||||
@@ -642,35 +448,23 @@ func (option *Options) GetRollbackFuncList() []func() error {
|
||||
return option.rollbackFuncList
|
||||
}
|
||||
|
||||
func AddDockerFlags(options *Options, p *pflag.FlagSet, cli *command.DockerCli) {
|
||||
func AddDockerFlags(options *Options, p *pflag.FlagSet) {
|
||||
p.SetInterspersed(false)
|
||||
|
||||
// These are flags not stored in Config/HostConfig
|
||||
p.BoolVarP(&options.Options.detach, "detach", "d", false, "Run container in background and print container ID")
|
||||
p.StringVar(&options.Options.name, "name", "", "Assign a name to the container")
|
||||
p.StringVar(&options.Options.pull, "pull", PullImageMissing, `Pull image before running ("`+PullImageAlways+`"|"`+PullImageMissing+`"|"`+PullImageNever+`")`)
|
||||
p.BoolVarP(&options.Options.quiet, "quiet", "q", false, "Suppress the pull output")
|
||||
p.StringVar(&options.RunOptions.Pull, "pull", PullImageMissing, `Pull image before running ("`+PullImageAlways+`"|"`+PullImageMissing+`"|"`+PullImageNever+`")`)
|
||||
p.BoolVar(&options.RunOptions.SigProxy, "sig-proxy", true, "Proxy received signals to the process")
|
||||
|
||||
// Add an explicit help that doesn't have a `-h` to prevent the conflict
|
||||
// with hostname
|
||||
p.Bool("help", false, "Print usage")
|
||||
|
||||
command.AddPlatformFlag(p, &options.Options.platform)
|
||||
command.AddTrustVerificationFlags(p, &options.Options.untrusted, cli.ContentTrustEnabled())
|
||||
|
||||
options.Copts = addFlags(p)
|
||||
command.AddPlatformFlag(p, &options.RunOptions.Platform)
|
||||
options.ContainerOptions = addFlags(p)
|
||||
}
|
||||
|
||||
func (option *Options) GetExposePort(containerCfg *containerConfig) (nat.PortMap, nat.PortSet, error) {
|
||||
object, err := util.GetUnstructuredObject(option.Factory, option.Namespace, option.Workload)
|
||||
if err != nil {
|
||||
log.Errorf("get unstructured object error: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
u := object.Object.(*unstructured.Unstructured)
|
||||
var templateSpec *v1.PodTemplateSpec
|
||||
templateSpec, _, err = util.GetPodTemplateSpecPath(u)
|
||||
func (option *Options) GetExposePort(portBinds nat.PortMap) (nat.PortMap, nat.PortSet, error) {
|
||||
templateSpec, err := option.GetPodTemplateSpec()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -691,10 +485,43 @@ func (option *Options) GetExposePort(containerCfg *containerConfig) (nat.PortMap
|
||||
}
|
||||
}
|
||||
|
||||
for port, bindings := range containerCfg.HostConfig.PortBindings {
|
||||
for port, bindings := range portBinds {
|
||||
portMap[port] = bindings
|
||||
portSet[port] = struct{}{}
|
||||
}
|
||||
|
||||
return portMap, portSet, nil
|
||||
}
|
||||
|
||||
func (option *Options) InitClient(f cmdutil.Factory) (err error) {
|
||||
option.factory = f
|
||||
if option.config, err = option.factory.ToRESTConfig(); err != nil {
|
||||
return
|
||||
}
|
||||
if option.restclient, err = option.factory.RESTClient(); err != nil {
|
||||
return
|
||||
}
|
||||
if option.clientset, err = option.factory.KubernetesClientSet(); err != nil {
|
||||
return
|
||||
}
|
||||
if option.Namespace, _, err = option.factory.ToRawKubeConfigLoader().Namespace(); err != nil {
|
||||
return
|
||||
}
|
||||
if option.cli, option.dockerCli, err = util.GetClient(); err != nil {
|
||||
return err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (option *Options) GetPodTemplateSpec() (*v1.PodTemplateSpec, error) {
|
||||
object, err := util.GetUnstructuredObject(option.factory, option.Namespace, option.Workload)
|
||||
if err != nil {
|
||||
log.Errorf("get unstructured object error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u := object.Object.(*unstructured.Unstructured)
|
||||
var templateSpec *v1.PodTemplateSpec
|
||||
templateSpec, _, err = util.GetPodTemplateSpecPath(u)
|
||||
return templateSpec, err
|
||||
}
|
||||
|
@@ -2,46 +2,41 @@ package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
typescommand "github.com/docker/docker/api/types/container"
|
||||
typescontainer "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/google/uuid"
|
||||
"github.com/miekg/dns"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
v12 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type RunConfig struct {
|
||||
containerName string
|
||||
k8sContainerName string
|
||||
name string
|
||||
|
||||
config *typescontainer.Config
|
||||
hostConfig *typescontainer.HostConfig
|
||||
networkingConfig *network.NetworkingConfig
|
||||
platform *v1.Platform
|
||||
|
||||
Options runOptions
|
||||
Copts *containerOptions
|
||||
Options RunOptions
|
||||
Copts ContainerOptions
|
||||
}
|
||||
|
||||
type ConfigList []*RunConfig
|
||||
@@ -58,11 +53,11 @@ func (c ConfigList) Remove(ctx context.Context, cli *client.Client) error {
|
||||
return nil
|
||||
}
|
||||
for _, runConfig := range c {
|
||||
err := cli.NetworkDisconnect(ctx, runConfig.containerName, runConfig.containerName, true)
|
||||
err := cli.NetworkDisconnect(ctx, runConfig.name, runConfig.name, true)
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
}
|
||||
err = cli.ContainerRemove(ctx, runConfig.containerName, typescontainer.RemoveOptions{Force: true})
|
||||
err = cli.ContainerRemove(ctx, runConfig.name, typescontainer.RemoveOptions{Force: true})
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
}
|
||||
@@ -81,31 +76,30 @@ func (c ConfigList) Run(ctx context.Context, volume map[string][]mount.Mount, cl
|
||||
for index := len(c) - 1; index >= 0; index-- {
|
||||
runConfig := c[index]
|
||||
if index == 0 {
|
||||
err := runAndAttach(ctx, runConfig, dockerCli)
|
||||
err := runContainer(ctx, dockerCli, runConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
id, err := run(ctx, runConfig, cli, dockerCli)
|
||||
}
|
||||
_, err := run(ctx, cli, dockerCli, runConfig)
|
||||
if err != nil {
|
||||
// try to copy volume into container, why?
|
||||
runConfig.hostConfig.Mounts = nil
|
||||
id, err1 := run(ctx, cli, dockerCli, runConfig)
|
||||
if err1 != nil {
|
||||
// return first error
|
||||
return err
|
||||
}
|
||||
err = util.CopyVolumeIntoContainer(ctx, volume[runConfig.name], cli, id)
|
||||
if err != nil {
|
||||
// try another way to startup container
|
||||
log.Infof("occur err: %v, try to use copy to startup container...", err)
|
||||
runConfig.hostConfig.Mounts = nil
|
||||
id, err = run(ctx, runConfig, cli, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.CopyVolumeIntoContainer(ctx, volume[runConfig.k8sContainerName], cli, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertKubeResourceToContainer(ns string, temp v12.PodTemplateSpec, envMap map[string][]string, mountVolume map[string][]mount.Mount, dnsConfig *dns.ClientConfig) (list ConfigList) {
|
||||
func ConvertPodToContainer(ns string, temp v12.PodTemplateSpec, envMap map[string][]string, mountVolume map[string][]mount.Mount, dnsConfig *dns.ClientConfig) (list ConfigList) {
|
||||
getHostname := func(containerName string) string {
|
||||
for _, envEntry := range envMap[containerName] {
|
||||
env := strings.Split(envEntry, "=")
|
||||
@@ -118,17 +112,17 @@ func convertKubeResourceToContainer(ns string, temp v12.PodTemplateSpec, envMap
|
||||
|
||||
for _, c := range temp.Spec.Containers {
|
||||
containerConf := &typescontainer.Config{
|
||||
Hostname: getHostname(c.Name),
|
||||
Hostname: getHostname(util.Join(ns, c.Name)),
|
||||
Domainname: temp.Spec.Subdomain,
|
||||
User: "root",
|
||||
AttachStdin: false,
|
||||
AttachStdin: c.Stdin,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
ExposedPorts: nil,
|
||||
Tty: c.TTY,
|
||||
OpenStdin: c.Stdin,
|
||||
StdinOnce: false,
|
||||
Env: envMap[c.Name],
|
||||
StdinOnce: c.StdinOnce,
|
||||
Env: envMap[util.Join(ns, c.Name)],
|
||||
Cmd: c.Args,
|
||||
Healthcheck: nil,
|
||||
ArgsEscaped: false,
|
||||
@@ -169,7 +163,7 @@ func convertKubeResourceToContainer(ns string, temp v12.PodTemplateSpec, envMap
|
||||
Links: nil,
|
||||
OomScoreAdj: 0,
|
||||
PidMode: "",
|
||||
Privileged: true,
|
||||
Privileged: ptr.Deref(ptr.Deref(c.SecurityContext, v12.SecurityContext{}).Privileged, false),
|
||||
PublishAllPorts: false,
|
||||
ReadonlyRootfs: false,
|
||||
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
|
||||
@@ -182,7 +176,7 @@ func convertKubeResourceToContainer(ns string, temp v12.PodTemplateSpec, envMap
|
||||
Runtime: "",
|
||||
Isolation: "",
|
||||
Resources: typescontainer.Resources{},
|
||||
Mounts: mountVolume[c.Name],
|
||||
Mounts: mountVolume[util.Join(ns, c.Name)],
|
||||
MaskedPaths: nil,
|
||||
ReadonlyPaths: nil,
|
||||
Init: nil,
|
||||
@@ -206,113 +200,86 @@ func convertKubeResourceToContainer(ns string, temp v12.PodTemplateSpec, envMap
|
||||
hostConfig.CapAdd = append(hostConfig.CapAdd, *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Add))...)
|
||||
hostConfig.CapDrop = *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Drop))
|
||||
}
|
||||
var suffix string
|
||||
newUUID, err := uuid.NewUUID()
|
||||
if err == nil {
|
||||
suffix = strings.ReplaceAll(newUUID.String(), "-", "")[:5]
|
||||
|
||||
var r = RunConfig{
|
||||
name: util.Join(ns, c.Name),
|
||||
config: containerConf,
|
||||
hostConfig: hostConfig,
|
||||
networkingConfig: &network.NetworkingConfig{EndpointsConfig: make(map[string]*network.EndpointSettings)},
|
||||
platform: nil,
|
||||
Options: RunOptions{Pull: PullImageMissing},
|
||||
}
|
||||
|
||||
var r RunConfig
|
||||
r.containerName = fmt.Sprintf("%s_%s_%s_%s", c.Name, ns, "kubevpn", suffix)
|
||||
r.k8sContainerName = c.Name
|
||||
r.config = containerConf
|
||||
r.hostConfig = hostConfig
|
||||
r.networkingConfig = &network.NetworkingConfig{EndpointsConfig: make(map[string]*network.EndpointSettings)}
|
||||
r.platform = nil
|
||||
list = append(list, &r)
|
||||
}
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
func run(ctx context.Context, runConfig *RunConfig, cli *client.Client, c *command.DockerCli) (id string, err error) {
|
||||
rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
func MergeDockerOptions(list ConfigList, options *Options, config *Config, hostConfig *HostConfig) {
|
||||
conf := list[0]
|
||||
conf.Options = options.RunOptions
|
||||
conf.Copts = *options.ContainerOptions
|
||||
|
||||
var config = runConfig.config
|
||||
var hostConfig = runConfig.hostConfig
|
||||
var platform = runConfig.platform
|
||||
var networkConfig = runConfig.networkingConfig
|
||||
var name = runConfig.containerName
|
||||
|
||||
var needPull bool
|
||||
var img types.ImageInspect
|
||||
img, _, err = cli.ImageInspectWithRaw(ctx, config.Image)
|
||||
if errdefs.IsNotFound(err) {
|
||||
log.Infof("needs to pull image %s", config.Image)
|
||||
needPull = true
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
log.Errorf("image inspect failed: %v", err)
|
||||
return
|
||||
if options.RunOptions.Platform != "" {
|
||||
p, _ := platforms.Parse(options.RunOptions.Platform)
|
||||
conf.platform = &p
|
||||
}
|
||||
if platform != nil && platform.Architecture != "" && platform.OS != "" {
|
||||
if img.Os != platform.OS || img.Architecture != platform.Architecture {
|
||||
needPull = true
|
||||
|
||||
// container config
|
||||
var entrypoint = conf.config.Entrypoint
|
||||
var args = conf.config.Cmd
|
||||
// if special --entrypoint, then use it
|
||||
if len(config.Entrypoint) != 0 {
|
||||
entrypoint = config.Entrypoint
|
||||
args = config.Cmd
|
||||
}
|
||||
if len(config.Cmd) != 0 {
|
||||
args = config.Cmd
|
||||
}
|
||||
conf.config.Entrypoint = entrypoint
|
||||
conf.config.Cmd = args
|
||||
if options.DevImage != "" {
|
||||
conf.config.Image = options.DevImage
|
||||
}
|
||||
conf.config.Volumes = util.Merge[string, struct{}](conf.config.Volumes, config.Volumes)
|
||||
for k, v := range config.ExposedPorts {
|
||||
if _, found := conf.config.ExposedPorts[k]; !found {
|
||||
conf.config.ExposedPorts[k] = v
|
||||
}
|
||||
}
|
||||
if needPull {
|
||||
err = util.PullImage(ctx, runConfig.platform, cli, c, config.Image, nil)
|
||||
conf.config.StdinOnce = config.StdinOnce
|
||||
conf.config.AttachStdin = config.AttachStdin
|
||||
conf.config.AttachStdout = config.AttachStdout
|
||||
conf.config.AttachStderr = config.AttachStderr
|
||||
conf.config.Tty = config.Tty
|
||||
conf.config.OpenStdin = config.OpenStdin
|
||||
|
||||
// host config
|
||||
var hosts []string
|
||||
for _, domain := range options.ExtraRouteInfo.ExtraDomain {
|
||||
ips, err := net.LookupIP(domain)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to pull image: %s, err: %s", config.Image, err)
|
||||
return
|
||||
continue
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if ip.To4() != nil {
|
||||
hosts = append(hosts, fmt.Sprintf("%s:%s", domain, ip.To4().String()))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var create typescommand.CreateResponse
|
||||
create, err = cli.ContainerCreate(ctx, config, hostConfig, networkConfig, platform, name)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create container: %s, err: %s", name, err)
|
||||
return
|
||||
conf.hostConfig.ExtraHosts = hosts
|
||||
conf.hostConfig.AutoRemove = hostConfig.AutoRemove
|
||||
conf.hostConfig.Privileged = hostConfig.Privileged
|
||||
conf.hostConfig.PublishAllPorts = hostConfig.PublishAllPorts
|
||||
conf.hostConfig.Mounts = append(conf.hostConfig.Mounts, hostConfig.Mounts...)
|
||||
conf.hostConfig.Binds = append(conf.hostConfig.Binds, hostConfig.Binds...)
|
||||
for port, bindings := range hostConfig.PortBindings {
|
||||
if v, ok := conf.hostConfig.PortBindings[port]; ok {
|
||||
conf.hostConfig.PortBindings[port] = append(v, bindings...)
|
||||
} else {
|
||||
conf.hostConfig.PortBindings[port] = bindings
|
||||
}
|
||||
}
|
||||
id = create.ID
|
||||
log.Infof("Created container: %s", name)
|
||||
defer func() {
|
||||
if err != nil && runConfig.hostConfig.AutoRemove {
|
||||
_ = cli.ContainerRemove(ctx, id, typescontainer.RemoveOptions{Force: true})
|
||||
}
|
||||
}()
|
||||
|
||||
err = cli.ContainerStart(ctx, create.ID, typescontainer.StartOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("failed to startup container %s: %v", name, err)
|
||||
return
|
||||
}
|
||||
log.Infof("Wait container %s to be running...", name)
|
||||
var inspect types.ContainerJSON
|
||||
ctx2, cancelFunc := context.WithTimeout(ctx, time.Minute*5)
|
||||
wait.UntilWithContext(ctx2, func(ctx context.Context) {
|
||||
inspect, err = cli.ContainerInspect(ctx, create.ID)
|
||||
if errdefs.IsNotFound(err) {
|
||||
cancelFunc()
|
||||
return
|
||||
} else if err != nil {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
if inspect.State != nil && (inspect.State.Status == "exited" || inspect.State.Status == "dead" || inspect.State.Dead) {
|
||||
cancelFunc()
|
||||
err = errors.New(fmt.Sprintf("container status: %s", inspect.State.Status))
|
||||
return
|
||||
}
|
||||
if inspect.State != nil && inspect.State.Running {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
}, time.Second)
|
||||
if err != nil {
|
||||
log.Errorf("failed to wait container to be ready: %v", err)
|
||||
_ = runLogsSinceNow(c, id, false)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Container %s is running now", name)
|
||||
return
|
||||
}
|
||||
|
||||
func runAndAttach(ctx context.Context, runConfig *RunConfig, cli *command.DockerCli) error {
|
||||
c := &containerConfig{
|
||||
Config: runConfig.config,
|
||||
HostConfig: runConfig.hostConfig,
|
||||
NetworkingConfig: runConfig.networkingConfig,
|
||||
}
|
||||
return runContainer(ctx, cli, &runConfig.Options, runConfig.Copts, c)
|
||||
}
|
||||
|
@@ -12,7 +12,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/kubectl/pkg/cmd/util"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
)
|
||||
@@ -75,16 +74,8 @@ func GetDNSIPFromDnsPod(ctx context.Context, clientset *kubernetes.Clientset) (i
|
||||
return
|
||||
}
|
||||
|
||||
func GetDNS(ctx context.Context, f util.Factory, ns, pod string) (*dns.ClientConfig, error) {
|
||||
clientSet, err := f.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = clientSet.CoreV1().Pods(ns).Get(ctx, pod, v12.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
restConfig, err := f.ToRESTConfig()
|
||||
func GetDNS(ctx context.Context, clientSet *kubernetes.Clientset, restConfig *rest.Config, ns, pod string) (*dns.ClientConfig, error) {
|
||||
_, err := clientSet.CoreV1().Pods(ns).Get(ctx, pod, v12.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -31,23 +31,23 @@ import (
|
||||
)
|
||||
|
||||
func GetClient() (*client.Client, *command.DockerCli, error) {
|
||||
client, err := client.NewClientWithOpts(
|
||||
cli, err := client.NewClientWithOpts(
|
||||
client.FromEnv,
|
||||
client.WithAPIVersionNegotiation(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can not create docker client from env, err: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
var cli *command.DockerCli
|
||||
cli, err = command.NewDockerCli(command.WithAPIClient(client))
|
||||
var dockerCli *command.DockerCli
|
||||
dockerCli, err = command.NewDockerCli(command.WithAPIClient(cli))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can not create docker client from env, err: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
err = cli.Initialize(flags.NewClientOptions())
|
||||
err = dockerCli.Initialize(flags.NewClientOptions())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can not init docker client, err: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return client, cli, nil
|
||||
return cli, dockerCli, nil
|
||||
}
|
||||
|
||||
// TransferImage
|
||||
@@ -160,7 +160,7 @@ func TransferImage(ctx context.Context, conf *SshConfig, imageSource, imageTarge
|
||||
}
|
||||
|
||||
// PullImage image.RunPull(ctx, c, image.PullOptions{})
|
||||
func PullImage(ctx context.Context, platform *v1.Platform, cli *client.Client, c *command.DockerCli, img string, out io.Writer) error {
|
||||
func PullImage(ctx context.Context, platform *v1.Platform, cli *client.Client, dockerCli *command.DockerCli, img string, out io.Writer) error {
|
||||
var readCloser io.ReadCloser
|
||||
var plat string
|
||||
if platform != nil && platform.Architecture != "" && platform.OS != "" {
|
||||
@@ -172,7 +172,7 @@ func PullImage(ctx context.Context, platform *v1.Platform, cli *client.Client, c
|
||||
return err
|
||||
}
|
||||
var imgRefAndAuth trust.ImageRefAndAuth
|
||||
imgRefAndAuth, err = trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(c), distributionRef.String())
|
||||
imgRefAndAuth, err = trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(dockerCli), distributionRef.String())
|
||||
if err != nil {
|
||||
log.Errorf("can not get image auth: %v", err)
|
||||
return err
|
||||
@@ -183,7 +183,7 @@ func PullImage(ctx context.Context, platform *v1.Platform, cli *client.Client, c
|
||||
log.Errorf("can not encode auth config to base64: %v", err)
|
||||
return err
|
||||
}
|
||||
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(c, imgRefAndAuth.RepoInfo().Index, "pull")
|
||||
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, imgRefAndAuth.RepoInfo().Index, "pull")
|
||||
readCloser, err = cli.ImagePull(ctx, img, typesimage.PullOptions{
|
||||
All: false,
|
||||
RegistryAuth: encodedAuth,
|
||||
|
7
pkg/util/name.go
Normal file
7
pkg/util/name.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package util
|
||||
|
||||
import "strings"
|
||||
|
||||
func Join(names ...string) string {
|
||||
return strings.Join(names, "_")
|
||||
}
|
@@ -101,27 +101,19 @@ func max[T constraints.Ordered](a T, b T) T {
|
||||
return b
|
||||
}
|
||||
|
||||
func GetEnv(ctx context.Context, f util.Factory, ns, pod string) (map[string][]string, error) {
|
||||
set, err2 := f.KubernetesClientSet()
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
config, err2 := f.ToRESTConfig()
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
get, err := set.CoreV1().Pods(ns).Get(ctx, pod, v1.GetOptions{})
|
||||
func GetEnv(ctx context.Context, set *kubernetes.Clientset, config *rest.Config, ns, podName string) (map[string][]string, error) {
|
||||
pod, err := set.CoreV1().Pods(ns).Get(ctx, podName, v1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := map[string][]string{}
|
||||
for _, c := range get.Spec.Containers {
|
||||
env, err := Shell(ctx, set, config, pod, c.Name, ns, []string{"env"})
|
||||
for _, c := range pod.Spec.Containers {
|
||||
env, err := Shell(ctx, set, config, podName, c.Name, ns, []string{"env"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
split := strings.Split(env, "\n")
|
||||
result[c.Name] = split
|
||||
result[Join(ns, c.Name)] = split
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
@@ -79,7 +79,7 @@ func GetVolume(ctx context.Context, f util.Factory, ns, podName string) (map[str
|
||||
})
|
||||
logrus.Infof("%s:%s", localPath, volumeMount.MountPath)
|
||||
}
|
||||
result[container.Name] = m
|
||||
result[Join(ns, container.Name)] = m
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user